Commit 22471de0 by 包珍珍 Committed by Enrico Pozzobon

knot

parent 8e94fb3c
#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 16
#define CRYPTO_NOOVERLAP 1
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
.macro PUSH_CONFLICT
push r16
push r17
push r18
push r19
push r23
push r24
push r25
push r26
push r27
push r28
push r29
push r30
push r31
.endm
.macro POP_CONFLICT
pop r31
pop r30
pop r29
pop r28
pop r27
pop r26
pop r25
pop r24
pop r23
pop r19
pop r18
pop r17
pop r16
.endm
.macro PUSH_ALL
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
push r16
push r17
push r28
push r29
.endm
.macro POP_ALL
pop r29
pop r28
pop r17
pop r16
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
clr r1
.endm
\ No newline at end of file
#ifndef __CONFIG_H__
#define __CONFIG_H__
#define CRYPTO_AEAD
//#define CRYPTO_HASH
#define MAX_MESSAGE_LENGTH 128
#define STATE_INBITS 256
/* For CRYPTO_AEAD */
#define CRYPTO_KEYBITS 128
/* For CRYPTO_HASH */
#define CRYPTO_BITS 256
#define STATE_INBYTES ((STATE_INBITS + 7) / 8)
#define ROW_INBITS ((STATE_INBITS + 3) / 4)
#define ROW_INBYTES ((ROW_INBITS + 7) / 8)
/* For CRYPTO_AEAD */
#define CRYPTO_KEYBYTES ((CRYPTO_KEYBITS + 7) / 8)
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES CRYPTO_KEYBYTES
#define CRYPTO_ABYTES CRYPTO_KEYBYTES
#define CRYPTO_NOOVERLAP 1
#define MAX_ASSOCIATED_DATA_LENGTH 32
#define MAX_CIPHER_LENGTH (MAX_MESSAGE_LENGTH + CRYPTO_ABYTES)
#define TAG_MATCH 0
#define TAG_UNMATCH -1
#define OTHER_FAILURES -2
/* For CRYPTO_HASH */
#define CRYPTO_BYTES ((CRYPTO_BITS + 7) / 8)
#define DOMAIN_BITS 0x80
#define PAD_BITS 0x01
#define S384_R192_BITS 0x80
#if (STATE_INBITS==256)
#define C1 1
#define C2 8
#define C3 25
#elif (STATE_INBITS==384)
#define C1 1
#define C2 8
#define C3 55
#elif (STATE_INBITS==512)
#define C1 1
#define C2 16
#define C3 25
#else
#error "Not specified state size"
#endif
#ifdef CRYPTO_AEAD
/* For CRYPTO_AEAD */
#define KEY_INBITS (CRYPTO_KEYBYTES * 8)
#define KEY_INBYTES (CRYPTO_KEYBYTES)
#define NONCE_INBITS (CRYPTO_NPUBBYTES * 8)
#define NONCE_INBYTES (CRYPTO_NPUBBYTES)
#define TAG_INBITS (CRYPTO_ABYTES * 8)
#define TAG_INBYTES (CRYPTO_ABYTES)
#if (KEY_INBITS==128) && (STATE_INBITS==256)
#define RATE_INBITS 64
#define NR_0 52
#define NR_i 28
#define NR_f 32
#elif (KEY_INBITS==128) && (STATE_INBITS==384)
#define RATE_INBITS 192
#define NR_0 76
#define NR_i 28
#define NR_f 32
#elif (KEY_INBITS==192) && (STATE_INBITS==384)
#define RATE_INBITS 96
#define NR_0 76
#define NR_i 40
#define NR_f 44
#elif (KEY_INBITS==256) && (STATE_INBITS==512)
#define RATE_INBITS 128
#define NR_0 100
#define NR_i 52
#define NR_f 56
#else
#error "Not specified key size and state size"
#endif
#define RATE_INBYTES ((RATE_INBITS + 7) / 8)
#define SQUEEZE_RATE_INBYTES TAG_INBYTES
#endif
#ifdef CRYPTO_HASH
/* For CRYPTO_HASH */
#define HASH_DIGEST_INBITS (CRYPTO_BYTES * 8)
#if (HASH_DIGEST_INBITS==256) && (STATE_INBITS==256)
#define HASH_RATE_INBITS 32
#define HASH_SQUEEZE_RATE_INBITS 128
#define NR_h 68
#elif (HASH_DIGEST_INBITS==256) && (STATE_INBITS==384)
#define HASH_RATE_INBITS 128
#define HASH_SQUEEZE_RATE_INBITS 128
#define NR_h 80
#elif (HASH_DIGEST_INBITS==384) && (STATE_INBITS==384)
#define HASH_RATE_INBITS 48
#define HASH_SQUEEZE_RATE_INBITS 192
#define NR_h 104
#elif (HASH_DIGEST_INBITS==512) && (STATE_INBITS==512)
#define HASH_RATE_INBITS 64
#define HASH_SQUEEZE_RATE_INBITS 256
#define NR_h 140
#else
#error "Not specified hash digest size and state size"
#endif
#define HASH_RATE_INBYTES ((HASH_RATE_INBITS + 7) / 8)
#define HASH_SQUEEZE_RATE_INBYTES ((HASH_SQUEEZE_RATE_INBITS + 7) / 8)
#endif
#define TAG_MATCH 0
#define TAG_UNMATCH -1
#define OTHER_FAILURES -2
#endif
\ No newline at end of file
#ifdef __cplusplus
extern "C" {
#endif
int crypto_aead_encrypt(
unsigned char *c,unsigned long long *clen,
const unsigned char *m,unsigned long long mlen,
const unsigned char *ad,unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k
);
int crypto_aead_decrypt(
unsigned char *m,unsigned long long *outputmlen,
unsigned char *nsec,
const unsigned char *c,unsigned long long clen,
const unsigned char *ad,unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k
);
#ifdef __cplusplus
}
#endif
#include <avr/io.h>
#include <avr/sfr_defs.h>
#include <stdlib.h>
#include <string.h>
#include "config.h"
extern void crypto_aead_encrypt_asm(
unsigned char *c,
const unsigned char *m,
unsigned char mlen,
const unsigned char *ad,
unsigned char adlen,
const unsigned char *npub,
const unsigned char *k
);
extern int crypto_aead_decrypt_asm(
unsigned char *m,
const unsigned char *c,
unsigned char clen,
const unsigned char *ad,
unsigned char adlen,
const unsigned char *npub,
const unsigned char *k
);
extern void crypto_hash_asm(
unsigned char *out,
const unsigned char *in,
unsigned char inlen
);
int crypto_aead_encrypt(
unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k
)
{
/*
...
... the code for the cipher implementation goes here,
... generating a ciphertext c[0],c[1],...,c[*clen-1]
... from a plaintext m[0],m[1],...,m[mlen-1]
... and associated data ad[0],ad[1],...,ad[adlen-1]
... and nonce npub[0],npub[1],..
... and secret key k[0],k[1],...
... the implementation shall not use nsec
...
... return 0;
*/
(void)nsec;
crypto_aead_encrypt_asm(c, m, mlen, ad, adlen, npub, k);
*clen = mlen + TAG_INBYTES;
return 0;
}
int crypto_aead_decrypt(
unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k
)
{
/*
...
... the code for the AEAD implementation goes here,
... generating a plaintext m[0],m[1],...,m[*mlen-1]
... and secret message number nsec[0],nsec[1],...
... from a ciphertext c[0],c[1],...,c[clen-1]
... and associated data ad[0],ad[1],...,ad[adlen-1]
... and nonce number npub[0],npub[1],...
... and secret key k[0],k[1],...
...
... return 0;
*/
unsigned long long mlen_;
unsigned char tag_is_match;
(void)nsec;
if (clen < CRYPTO_ABYTES) {
return -1;
}
mlen_ = clen - CRYPTO_ABYTES;
tag_is_match = crypto_aead_decrypt_asm(m, c, mlen_, ad, adlen, npub, k);
if (tag_is_match != 0)
{
memset(m, 0, (size_t)mlen_);
return -1;
}
*mlen = mlen_;
return 0;
}
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
#define x10 r0
#define x11 r1
#define x12 r2
#define x13 r3
#define x14 r4
#define x15 r5
#define x16 r6
#define x17 r7
; an intentionally arrangement of registers to facilitate movw
#define x20 r8
#define x21 r10
#define x22 r12
#define x23 r14
#define x24 r9
#define x25 r11
#define x26 r13
#define x27 r15
; an intentionally arrangement of registers to facilitate movw
#define x30 r16
#define x35 r18
#define x32 r20
#define x37 r22
#define x34 r17
#define x31 r19
#define x36 r21
#define x33 r23
#define t0j r24
#define t1j r25
#define x0j r27
#include "assist.h"
.macro Sbox i0, i1, i2, i3
mov t0j, \i1
com \i0
and \i1, \i0
eor \i1, \i2
or \i2, t0j
eor \i0, \i3
eor \i2, \i0
eor t0j, \i3
and \i0, \i1
eor \i3, \i1
eor \i0, t0j
and t0j, \i2
eor \i1, t0j
.endm
Permutation:
PUSH_CONFLICT
mov rcnt, rn
ldi YH, hi8(SRAM_STATE + ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + ROW_INBYTES)
ld x10, Y+
ld x11, Y+
ld x12, Y+
ld x13, Y+
ld x14, Y+
ld x15, Y+
ld x16, Y+
ld x17, Y+
ld x20, Y+
ld x21, Y+
ld x22, Y+
ld x23, Y+
ld x24, Y+
ld x25, Y+
ld x26, Y+
ld x27, Y+
ld x30, Y+
ld x31, Y+
ld x32, Y+
ld x33, Y+
ld x34, Y+
ld x35, Y+
ld x36, Y+
ld x37, Y+
#if defined(CRYPTO_AEAD) && defined(CRYPTO_HASH)
sbrc AEDH, 2 ; AEDH[2] = 0 for AEAD and AEDH[2] = 1 for HASH
rjmp For_Hash
For_AEAD:
ldi ZL, lo8(RC_LFSR6)
ldi ZH, hi8(RC_LFSR6)
rjmp round_loop_start
For_Hash:
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
#elif defined(CRYPTO_AEAD)
ldi ZL, lo8(RC_LFSR6)
ldi ZH, hi8(RC_LFSR6)
#else
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
#endif
round_loop_start:
; AddRC
lpm t0j, Z+
ldi YH, hi8(SRAM_STATE)
ldi YL, lo8(SRAM_STATE)
ld x0j, Y
eor x0j, t0j
; SubColumns
Sbox x0j, x10, x20, x30
st Y+, x0j
ld x0j, Y
Sbox x0j, x11, x21, x31
st Y+, x0j
ld x0j, Y
Sbox x0j, x12, x22, x32
st Y+, x0j
ld x0j, Y
Sbox x0j, x13, x23, x33
st Y+, x0j
ld x0j, Y
Sbox x0j, x14, x24, x34
st Y+, x0j
ld x0j, Y
Sbox x0j, x15, x25, x35
st Y+, x0j
ld x0j, Y
Sbox x0j, x16, x26, x36
st Y+, x0j
ld x0j, Y
Sbox x0j, x17, x27, x37
st Y, x0j
; ShiftRows
; <<< 1
mov t0j, x17
rol t0j
rol x10
rol x11
rol x12
rol x13
rol x14
rol x15
rol x16
rol x17
; <<< 8
; 7 6 5 4 3 2 1 0 => 6 5 4 3 2 1 0 7
;mov t0j, x27
;mov x27, x26
;mov x26, x25
;mov x25, x24
;mov x24, x23
;mov x23, x22
;mov x22, x21
;mov x21, x20
;mov x20, t0j
; an intentionally arrangement of registers to facilitate movw
movw t0j, x23 ; t1j:t0j <= x27:x23
movw x23, x22 ; x27:x23 <= x26:x22
movw x22, x21 ; x26:x22 <= x25:x21
movw x21, x20 ; x25:x21 <= x24:x20
mov x20, t1j ; x20 <= t1j
mov x24, t0j ; x24 <= t0j
; <<< 1
mov t0j, x37
rol t0j
rol x30
rol x31
rol x32
rol x33
rol x34
rol x35
rol x36
rol x37
; <<< 24
; 7 6 5 4 3 2 1 0 => 4 3 2 1 0 7 6 5
;mov t0j, x30
;mov x30, x35
;mov x35, x32
;mov x32, x37
;mov x37, x34
;mov x34, x31
;mov x31, x36
;mov x36, x33
;mov x33, t0j
; an intentionally arrangement of registers to facilitate movw
;x30 r16
;x35 r18
;x32 r20
;x37 r22
;x34 r17
;x31 r19
;x36 r21
;x33 r23
movw t0j, x30 ; t1j:t0j <= x34:x30
movw x30, x35 ; x34:x30 <= x31:x35
movw x35, x32 ; x31:x35 <= x36:x32
movw x32, x37 ; x36:x32 <= x33:x37
mov x37, t1j ; x37 <= x34
mov x33, t0j ; x33 <= x30
dec rcnt
breq round_loop_end
jmp round_loop_start
round_loop_end:
ldi YH, hi8(SRAM_STATE + ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + ROW_INBYTES)
st Y+, x10
st Y+, x11
st Y+, x12
st Y+, x13
st Y+, x14
st Y+, x15
st Y+, x16
st Y+, x17
st Y+, x20
st Y+, x21
st Y+, x22
st Y+, x23
st Y+, x24
st Y+, x25
st Y+, x26
st Y+, x27
st Y+, x30
st Y+, x31
st Y+, x32
st Y+, x33
st Y+, x34
st Y+, x35
st Y+, x36
st Y+, x37
POP_CONFLICT
ret
.section .text
#if defined(CRYPTO_AEAD) && defined(CRYPTO_HASH)
RC_LFSR6:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06
.byte 0x0c, 0x18, 0x31, 0x22, 0x05, 0x0a, 0x14, 0x29
.byte 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28
.byte 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24
.byte 0x09, 0x12, 0x25, 0x0b, 0x16, 0x2d, 0x1b, 0x37
.byte 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26
.byte 0x0d, 0x1a, 0x35, 0x2a, 0x15, 0x2b, 0x17, 0x2f
.byte 0x1f, 0x3f, 0x3e, 0x3c, 0x38, 0x30, 0x20, 0x00
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
#elif defined(CRYPTO_AEAD)
RC_LFSR6:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06
.byte 0x0c, 0x18, 0x31, 0x22, 0x05, 0x0a, 0x14, 0x29
.byte 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28
.byte 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24
.byte 0x09, 0x12, 0x25, 0x0b, 0x16, 0x2d, 0x1b, 0x37
.byte 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26
.byte 0x0d, 0x1a, 0x35, 0x2a, 0x15, 0x2b, 0x17, 0x2f
.byte 0x1f, 0x3f, 0x3e, 0x3c, 0x38, 0x30, 0x20, 0x00
#else
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
#endif
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
; an intentionally arrangement of registers to facilitate movw
#define x20 r0
#define x21 r2
#define x22 r4
#define x23 r6
#define x24 r8
#define x25 r10
#define x26 r1
#define x27 r3
#define x28 r5
#define x29 r7
#define x2a r9
#define x2b r11
; an intentionally arrangement of registers to facilitate movw
#define x30 r22
#define x35 r20
#define x3a r18
#define x33 r16
#define x38 r14
#define x31 r12
#define x36 r23
#define x3b r21
#define x34 r19
#define x39 r17
#define x32 r15
#define x37 r13
#define t0j r24
#define t1j r25
#define x0j r25
#define x1j r27
#include "assist.h"
.macro Sbox i0, i1, i2, i3
ldi t0j, 0xFF
eor \i0, t0j
mov t0j, \i1
and \i1, \i0
eor \i1, \i2
or \i2, t0j
eor \i0, \i3
eor \i2, \i0
eor t0j, \i3
and \i0, \i1
eor \i3, \i1
eor \i0, t0j
and t0j, \i2
eor \i1, t0j
.endm
.macro OneColumn i0, i1, i2, i3
ld \i0, Y
ldd \i1, Y + ROW_INBYTES
Sbox \i0, \i1, \i2, \i3
st Y+, \i0
rol \i1 ; ShiftRows -- Row 1 <<< 1
std Y + ROW_INBYTES -1, \i1
.endm
Permutation:
PUSH_CONFLICT
mov rcnt, rn
ldi YH, hi8(SRAM_STATE + 2 * ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + 2 * ROW_INBYTES)
ld x20, Y+
ld x21, Y+
ld x22, Y+
ld x23, Y+
ld x24, Y+
ld x25, Y+
ld x26, Y+
ld x27, Y+
ld x28, Y+
ld x29, Y+
ld x2a, Y+
ld x2b, Y+
ld x30, Y+
ld x31, Y+
ld x32, Y+
ld x33, Y+
ld x34, Y+
ld x35, Y+
ld x36, Y+
ld x37, Y+
ld x38, Y+
ld x39, Y+
ld x3a, Y+
ld x3b, Y+
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
round_loop_start:
; AddRC
lpm t0j, Z+
ldi YH, hi8(SRAM_STATE)
ldi YL, lo8(SRAM_STATE)
ld x0j, Y
eor x0j, t0j
ldd x1j, Y + ROW_INBYTES
Sbox x0j, x1j, x20, x30
st Y+, x0j
lsl x1j ; ShiftRows -- Row 1 <<< 1
std Y + ROW_INBYTES -1, x1j
OneColumn x0j, x1j, x21, x31
OneColumn x0j, x1j, x22, x32
OneColumn x0j, x1j, x23, x33
OneColumn x0j, x1j, x24, x34
OneColumn x0j, x1j, x25, x35
OneColumn x0j, x1j, x26, x36
OneColumn x0j, x1j, x27, x37
OneColumn x0j, x1j, x28, x38
OneColumn x0j, x1j, x29, x39
OneColumn x0j, x1j, x2a, x3a
OneColumn x0j, x1j, x2b, x3b
ld x1j, Y
eor t0j, t0j
adc x1j, t0j
st Y, x1j
; b a 9 8 7 6 5 4 3 2 1 0
; -- -- -- -- -- -- -- -- -- -- -- x- 0
; -- -- -- -- -- -- -- -- -- -- -- x' 0
; -- -- -- -- -- -- -- -- -- -- x- -- 1
; -- -- -- -- x' -- -- -- -- -- -- -- 7
; 4 3 2 1 0 b a 9 8 7 6 5
; ShiftRows -- the last two rows
; <<< 8
; b a 9 8 7 6 5 4 3 2 1 0 => a 9 8 7 6 5 4 3 2 1 0 b
movw t0j, x25 ; t1j:t0j <= x2b:x25
movw x25, x24 ; x2b:x25 <= x2a:x24
movw x24, x23 ; x2a:x24 <= x29:x23
movw x23, x22 ; x29:x23 <= x28:x22
movw x22, x21 ; x28:x22 <= x27:x21
movw x21, x20 ; x27:x21 <= x26:x20
mov x26, t0j ; x26 <= x25
mov x20, t1j ; x20 <= x2b
; >>> 1
mov t0j, x3b
ror t0j
ror x3a
ror x39
ror x38
ror x37
ror x36
ror x35
ror x34
ror x33
ror x32
ror x31
ror x30
ror x3b
; <<< 56
; b a 9 8 7 6 5 4 3 2 1 0 => 4 3 2 1 0 b a 9 8 7 6 5
; mov x3j, x30
; mov x30, x35
; mov x35, x3a
; mov x3a, x33
; mov x33, x38
; mov x38, x31
; mov x31, x36
; mov x36, x3b
; mov x3b, x34
; mov x34, x39
; mov x39, x32
; mov x32, x37
; mov x37, x3j
; an intentionally arrangement of registers to facilitate movw
; x30 r22
; x35 r20
; x3a r18
; x33 r16
; x38 r14
; x31 r12
; x36 r23
; x3b r21
; x34 r19
; x39 r17
; x32 r15
; x37 r13
movw t0j, x30 ; t1j:t0j <= x36:x30
movw x30, x35 ; x36:x30 <= x3b:x35
movw x35, x3a ; x3b:x35 <= x34:x3a
movw x3a, x33 ; x34:x3a <= x39:x33
movw x33, x38 ; x39:x33 <= x32:x38
movw x38, x31 ; x32:x38 <= x37:x31
mov x31, t1j ; x31 <= x36
mov x37, t0j ; x37 <= x30
dec rcnt
breq round_loop_end
jmp round_loop_start
round_loop_end:
ldi YH, hi8(SRAM_STATE + 2 * ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + 2 * ROW_INBYTES)
st Y+, x20
st Y+, x21
st Y+, x22
st Y+, x23
st Y+, x24
st Y+, x25
st Y+, x26
st Y+, x27
st Y+, x28
st Y+, x29
st Y+, x2a
st Y+, x2b
st Y+, x30
st Y+, x31
st Y+, x32
st Y+, x33
st Y+, x34
st Y+, x35
st Y+, x36
st Y+, x37
st Y+, x38
st Y+, x39
st Y+, x3a
st Y+, x3b
POP_CONFLICT
ret
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
;
; ============================================
; R E G I S T E R D E F I N I T I O N S
; ============================================
;
#define mclen r16
#define radlen r17
#define tcnt r17
#define tmp0 r20
#define tmp1 r21
#define cnt0 r22
#define rn r23
#define rate r24
; AEDH = 0b000: for authenticate AD
; AEDH = 0b001: for encryption
; AEDH = 0b011: for decryption
; AEDH = 0b100: for hash
#define AEDH r25
#define rcnt r26
#if (STATE_INBITS==256)
#include "knot256.h"
#elif (STATE_INBITS==384)
#include "knot384.h"
#elif (STATE_INBITS==512)
#include "knot512.h"
#else
#error "Not specified key size and state size"
#endif
#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 16
#define CRYPTO_NOOVERLAP 1
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
.macro PUSH_CONFLICT
push r16
push r17
push r18
push r19
push r23
push r24
push r25
push r26
push r27
push r28
push r29
push r30
push r31
.endm
.macro POP_CONFLICT
pop r31
pop r30
pop r29
pop r28
pop r27
pop r26
pop r25
pop r24
pop r23
pop r19
pop r18
pop r17
pop r16
.endm
.macro PUSH_ALL
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
push r16
push r17
push r28
push r29
.endm
.macro POP_ALL
pop r29
pop r28
pop r17
pop r16
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
clr r1
.endm
\ No newline at end of file
#ifndef __CONFIG_H__
#define __CONFIG_H__
#define CRYPTO_AEAD
//#define CRYPTO_HASH
#define MAX_MESSAGE_LENGTH 128
#define STATE_INBITS 384
/* For CRYPTO_AEAD */
#define CRYPTO_KEYBITS 128
/* For CRYPTO_HASH */
#define CRYPTO_BITS 256
#define STATE_INBYTES ((STATE_INBITS + 7) / 8)
#define ROW_INBITS ((STATE_INBITS + 3) / 4)
#define ROW_INBYTES ((ROW_INBITS + 7) / 8)
/* For CRYPTO_AEAD */
#define CRYPTO_KEYBYTES ((CRYPTO_KEYBITS + 7) / 8)
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES CRYPTO_KEYBYTES
#define CRYPTO_ABYTES CRYPTO_KEYBYTES
#define CRYPTO_NOOVERLAP 1
#define MAX_ASSOCIATED_DATA_LENGTH 32
#define MAX_CIPHER_LENGTH (MAX_MESSAGE_LENGTH + CRYPTO_ABYTES)
#define TAG_MATCH 0
#define TAG_UNMATCH -1
#define OTHER_FAILURES -2
/* For CRYPTO_HASH */
#define CRYPTO_BYTES ((CRYPTO_BITS + 7) / 8)
#define DOMAIN_BITS 0x80
#define PAD_BITS 0x01
#define S384_R192_BITS 0x80
#if (STATE_INBITS==256)
#define C1 1
#define C2 8
#define C3 25
#elif (STATE_INBITS==384)
#define C1 1
#define C2 8
#define C3 55
#elif (STATE_INBITS==512)
#define C1 1
#define C2 16
#define C3 25
#else
#error "Not specified state size"
#endif
#ifdef CRYPTO_AEAD
/* For CRYPTO_AEAD */
#define KEY_INBITS (CRYPTO_KEYBYTES * 8)
#define KEY_INBYTES (CRYPTO_KEYBYTES)
#define NONCE_INBITS (CRYPTO_NPUBBYTES * 8)
#define NONCE_INBYTES (CRYPTO_NPUBBYTES)
#define TAG_INBITS (CRYPTO_ABYTES * 8)
#define TAG_INBYTES (CRYPTO_ABYTES)
#if (KEY_INBITS==128) && (STATE_INBITS==256)
#define RATE_INBITS 64
#define NR_0 52
#define NR_i 28
#define NR_f 32
#elif (KEY_INBITS==128) && (STATE_INBITS==384)
#define RATE_INBITS 192
#define NR_0 76
#define NR_i 28
#define NR_f 32
#elif (KEY_INBITS==192) && (STATE_INBITS==384)
#define RATE_INBITS 96
#define NR_0 76
#define NR_i 40
#define NR_f 44
#elif (KEY_INBITS==256) && (STATE_INBITS==512)
#define RATE_INBITS 128
#define NR_0 100
#define NR_i 52
#define NR_f 56
#else
#error "Not specified key size and state size"
#endif
#define RATE_INBYTES ((RATE_INBITS + 7) / 8)
#define SQUEEZE_RATE_INBYTES TAG_INBYTES
#endif
#ifdef CRYPTO_HASH
/* For CRYPTO_HASH */
#define HASH_DIGEST_INBITS (CRYPTO_BYTES * 8)
#if (HASH_DIGEST_INBITS==256) && (STATE_INBITS==256)
#define HASH_RATE_INBITS 32
#define HASH_SQUEEZE_RATE_INBITS 128
#define NR_h 68
#elif (HASH_DIGEST_INBITS==256) && (STATE_INBITS==384)
#define HASH_RATE_INBITS 128
#define HASH_SQUEEZE_RATE_INBITS 128
#define NR_h 80
#elif (HASH_DIGEST_INBITS==384) && (STATE_INBITS==384)
#define HASH_RATE_INBITS 48
#define HASH_SQUEEZE_RATE_INBITS 192
#define NR_h 104
#elif (HASH_DIGEST_INBITS==512) && (STATE_INBITS==512)
#define HASH_RATE_INBITS 64
#define HASH_SQUEEZE_RATE_INBITS 256
#define NR_h 140
#else
#error "Not specified hash digest size and state size"
#endif
#define HASH_RATE_INBYTES ((HASH_RATE_INBITS + 7) / 8)
#define HASH_SQUEEZE_RATE_INBYTES ((HASH_SQUEEZE_RATE_INBITS + 7) / 8)
#endif
#define TAG_MATCH 0
#define TAG_UNMATCH -1
#define OTHER_FAILURES -2
#endif
\ No newline at end of file
#ifdef __cplusplus
extern "C" {
#endif
int crypto_aead_encrypt(
unsigned char *c,unsigned long long *clen,
const unsigned char *m,unsigned long long mlen,
const unsigned char *ad,unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k
);
int crypto_aead_decrypt(
unsigned char *m,unsigned long long *outputmlen,
unsigned char *nsec,
const unsigned char *c,unsigned long long clen,
const unsigned char *ad,unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k
);
#ifdef __cplusplus
}
#endif
#include <avr/io.h>
#include <avr/sfr_defs.h>
#include <stdlib.h>
#include <string.h>
#include "config.h"
extern void crypto_aead_encrypt_asm(
unsigned char *c,
const unsigned char *m,
unsigned char mlen,
const unsigned char *ad,
unsigned char adlen,
const unsigned char *npub,
const unsigned char *k
);
extern int crypto_aead_decrypt_asm(
unsigned char *m,
const unsigned char *c,
unsigned char clen,
const unsigned char *ad,
unsigned char adlen,
const unsigned char *npub,
const unsigned char *k
);
extern void crypto_hash_asm(
unsigned char *out,
const unsigned char *in,
unsigned char inlen
);
int crypto_aead_encrypt(
unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k
)
{
/*
...
... the code for the cipher implementation goes here,
... generating a ciphertext c[0],c[1],...,c[*clen-1]
... from a plaintext m[0],m[1],...,m[mlen-1]
... and associated data ad[0],ad[1],...,ad[adlen-1]
... and nonce npub[0],npub[1],..
... and secret key k[0],k[1],...
... the implementation shall not use nsec
...
... return 0;
*/
(void)nsec;
crypto_aead_encrypt_asm(c, m, mlen, ad, adlen, npub, k);
*clen = mlen + TAG_INBYTES;
return 0;
}
int crypto_aead_decrypt(
unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k
)
{
/*
...
... the code for the AEAD implementation goes here,
... generating a plaintext m[0],m[1],...,m[*mlen-1]
... and secret message number nsec[0],nsec[1],...
... from a ciphertext c[0],c[1],...,c[clen-1]
... and associated data ad[0],ad[1],...,ad[adlen-1]
... and nonce number npub[0],npub[1],...
... and secret key k[0],k[1],...
...
... return 0;
*/
unsigned long long mlen_;
unsigned char tag_is_match;
(void)nsec;
if (clen < CRYPTO_ABYTES) {
return -1;
}
mlen_ = clen - CRYPTO_ABYTES;
tag_is_match = crypto_aead_decrypt_asm(m, c, mlen_, ad, adlen, npub, k);
if (tag_is_match != 0)
{
memset(m, 0, (size_t)mlen_);
return -1;
}
*mlen = mlen_;
return 0;
}
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
#define x10 r0
#define x11 r1
#define x12 r2
#define x13 r3
#define x14 r4
#define x15 r5
#define x16 r6
#define x17 r7
; an intentionally arrangement of registers to facilitate movw
#define x20 r8
#define x21 r10
#define x22 r12
#define x23 r14
#define x24 r9
#define x25 r11
#define x26 r13
#define x27 r15
; an intentionally arrangement of registers to facilitate movw
#define x30 r16
#define x35 r18
#define x32 r20
#define x37 r22
#define x34 r17
#define x31 r19
#define x36 r21
#define x33 r23
#define t0j r24
#define t1j r25
#define x0j r27
#include "assist.h"
.macro Sbox i0, i1, i2, i3
mov t0j, \i1
com \i0
and \i1, \i0
eor \i1, \i2
or \i2, t0j
eor \i0, \i3
eor \i2, \i0
eor t0j, \i3
and \i0, \i1
eor \i3, \i1
eor \i0, t0j
and t0j, \i2
eor \i1, t0j
.endm
Permutation:
PUSH_CONFLICT
mov rcnt, rn
ldi YH, hi8(SRAM_STATE + ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + ROW_INBYTES)
ld x10, Y+
ld x11, Y+
ld x12, Y+
ld x13, Y+
ld x14, Y+
ld x15, Y+
ld x16, Y+
ld x17, Y+
ld x20, Y+
ld x21, Y+
ld x22, Y+
ld x23, Y+
ld x24, Y+
ld x25, Y+
ld x26, Y+
ld x27, Y+
ld x30, Y+
ld x31, Y+
ld x32, Y+
ld x33, Y+
ld x34, Y+
ld x35, Y+
ld x36, Y+
ld x37, Y+
#if defined(CRYPTO_AEAD) && defined(CRYPTO_HASH)
sbrc AEDH, 2 ; AEDH[2] = 0 for AEAD and AEDH[2] = 1 for HASH
rjmp For_Hash
For_AEAD:
ldi ZL, lo8(RC_LFSR6)
ldi ZH, hi8(RC_LFSR6)
rjmp round_loop_start
For_Hash:
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
#elif defined(CRYPTO_AEAD)
ldi ZL, lo8(RC_LFSR6)
ldi ZH, hi8(RC_LFSR6)
#else
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
#endif
round_loop_start:
; AddRC
lpm t0j, Z+
ldi YH, hi8(SRAM_STATE)
ldi YL, lo8(SRAM_STATE)
ld x0j, Y
eor x0j, t0j
; SubColumns
Sbox x0j, x10, x20, x30
st Y+, x0j
ld x0j, Y
Sbox x0j, x11, x21, x31
st Y+, x0j
ld x0j, Y
Sbox x0j, x12, x22, x32
st Y+, x0j
ld x0j, Y
Sbox x0j, x13, x23, x33
st Y+, x0j
ld x0j, Y
Sbox x0j, x14, x24, x34
st Y+, x0j
ld x0j, Y
Sbox x0j, x15, x25, x35
st Y+, x0j
ld x0j, Y
Sbox x0j, x16, x26, x36
st Y+, x0j
ld x0j, Y
Sbox x0j, x17, x27, x37
st Y, x0j
; ShiftRows
; <<< 1
mov t0j, x17
rol t0j
rol x10
rol x11
rol x12
rol x13
rol x14
rol x15
rol x16
rol x17
; <<< 8
; 7 6 5 4 3 2 1 0 => 6 5 4 3 2 1 0 7
;mov t0j, x27
;mov x27, x26
;mov x26, x25
;mov x25, x24
;mov x24, x23
;mov x23, x22
;mov x22, x21
;mov x21, x20
;mov x20, t0j
; an intentionally arrangement of registers to facilitate movw
movw t0j, x23 ; t1j:t0j <= x27:x23
movw x23, x22 ; x27:x23 <= x26:x22
movw x22, x21 ; x26:x22 <= x25:x21
movw x21, x20 ; x25:x21 <= x24:x20
mov x20, t1j ; x20 <= t1j
mov x24, t0j ; x24 <= t0j
; <<< 1
mov t0j, x37
rol t0j
rol x30
rol x31
rol x32
rol x33
rol x34
rol x35
rol x36
rol x37
; <<< 24
; 7 6 5 4 3 2 1 0 => 4 3 2 1 0 7 6 5
;mov t0j, x30
;mov x30, x35
;mov x35, x32
;mov x32, x37
;mov x37, x34
;mov x34, x31
;mov x31, x36
;mov x36, x33
;mov x33, t0j
; an intentionally arrangement of registers to facilitate movw
;x30 r16
;x35 r18
;x32 r20
;x37 r22
;x34 r17
;x31 r19
;x36 r21
;x33 r23
movw t0j, x30 ; t1j:t0j <= x34:x30
movw x30, x35 ; x34:x30 <= x31:x35
movw x35, x32 ; x31:x35 <= x36:x32
movw x32, x37 ; x36:x32 <= x33:x37
mov x37, t1j ; x37 <= x34
mov x33, t0j ; x33 <= x30
dec rcnt
breq round_loop_end
jmp round_loop_start
round_loop_end:
ldi YH, hi8(SRAM_STATE + ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + ROW_INBYTES)
st Y+, x10
st Y+, x11
st Y+, x12
st Y+, x13
st Y+, x14
st Y+, x15
st Y+, x16
st Y+, x17
st Y+, x20
st Y+, x21
st Y+, x22
st Y+, x23
st Y+, x24
st Y+, x25
st Y+, x26
st Y+, x27
st Y+, x30
st Y+, x31
st Y+, x32
st Y+, x33
st Y+, x34
st Y+, x35
st Y+, x36
st Y+, x37
POP_CONFLICT
ret
.section .text
#if defined(CRYPTO_AEAD) && defined(CRYPTO_HASH)
RC_LFSR6:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06
.byte 0x0c, 0x18, 0x31, 0x22, 0x05, 0x0a, 0x14, 0x29
.byte 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28
.byte 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24
.byte 0x09, 0x12, 0x25, 0x0b, 0x16, 0x2d, 0x1b, 0x37
.byte 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26
.byte 0x0d, 0x1a, 0x35, 0x2a, 0x15, 0x2b, 0x17, 0x2f
.byte 0x1f, 0x3f, 0x3e, 0x3c, 0x38, 0x30, 0x20, 0x00
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
#elif defined(CRYPTO_AEAD)
RC_LFSR6:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06
.byte 0x0c, 0x18, 0x31, 0x22, 0x05, 0x0a, 0x14, 0x29
.byte 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28
.byte 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24
.byte 0x09, 0x12, 0x25, 0x0b, 0x16, 0x2d, 0x1b, 0x37
.byte 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26
.byte 0x0d, 0x1a, 0x35, 0x2a, 0x15, 0x2b, 0x17, 0x2f
.byte 0x1f, 0x3f, 0x3e, 0x3c, 0x38, 0x30, 0x20, 0x00
#else
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
#endif
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
; an intentionally arrangement of registers to facilitate movw
#define x20 r0
#define x21 r2
#define x22 r4
#define x23 r6
#define x24 r8
#define x25 r10
#define x26 r1
#define x27 r3
#define x28 r5
#define x29 r7
#define x2a r9
#define x2b r11
; an intentionally arrangement of registers to facilitate movw
#define x30 r22
#define x35 r20
#define x3a r18
#define x33 r16
#define x38 r14
#define x31 r12
#define x36 r23
#define x3b r21
#define x34 r19
#define x39 r17
#define x32 r15
#define x37 r13
#define t0j r24
#define t1j r25
#define x0j r25
#define x1j r27
#include "assist.h"
.macro Sbox i0, i1, i2, i3
ldi t0j, 0xFF
eor \i0, t0j
mov t0j, \i1
and \i1, \i0
eor \i1, \i2
or \i2, t0j
eor \i0, \i3
eor \i2, \i0
eor t0j, \i3
and \i0, \i1
eor \i3, \i1
eor \i0, t0j
and t0j, \i2
eor \i1, t0j
.endm
.macro OneColumn i0, i1, i2, i3
ld \i0, Y
ldd \i1, Y + ROW_INBYTES
Sbox \i0, \i1, \i2, \i3
st Y+, \i0
rol \i1 ; ShiftRows -- Row 1 <<< 1
std Y + ROW_INBYTES -1, \i1
.endm
Permutation:
PUSH_CONFLICT
mov rcnt, rn
ldi YH, hi8(SRAM_STATE + 2 * ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + 2 * ROW_INBYTES)
ld x20, Y+
ld x21, Y+
ld x22, Y+
ld x23, Y+
ld x24, Y+
ld x25, Y+
ld x26, Y+
ld x27, Y+
ld x28, Y+
ld x29, Y+
ld x2a, Y+
ld x2b, Y+
ld x30, Y+
ld x31, Y+
ld x32, Y+
ld x33, Y+
ld x34, Y+
ld x35, Y+
ld x36, Y+
ld x37, Y+
ld x38, Y+
ld x39, Y+
ld x3a, Y+
ld x3b, Y+
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
round_loop_start:
; AddRC
lpm t0j, Z+
ldi YH, hi8(SRAM_STATE)
ldi YL, lo8(SRAM_STATE)
ld x0j, Y
eor x0j, t0j
ldd x1j, Y + ROW_INBYTES
Sbox x0j, x1j, x20, x30
st Y+, x0j
lsl x1j ; ShiftRows -- Row 1 <<< 1
std Y + ROW_INBYTES -1, x1j
OneColumn x0j, x1j, x21, x31
OneColumn x0j, x1j, x22, x32
OneColumn x0j, x1j, x23, x33
OneColumn x0j, x1j, x24, x34
OneColumn x0j, x1j, x25, x35
OneColumn x0j, x1j, x26, x36
OneColumn x0j, x1j, x27, x37
OneColumn x0j, x1j, x28, x38
OneColumn x0j, x1j, x29, x39
OneColumn x0j, x1j, x2a, x3a
OneColumn x0j, x1j, x2b, x3b
ld x1j, Y
eor t0j, t0j
adc x1j, t0j
st Y, x1j
; b a 9 8 7 6 5 4 3 2 1 0
; -- -- -- -- -- -- -- -- -- -- -- x- 0
; -- -- -- -- -- -- -- -- -- -- -- x' 0
; -- -- -- -- -- -- -- -- -- -- x- -- 1
; -- -- -- -- x' -- -- -- -- -- -- -- 7
; 4 3 2 1 0 b a 9 8 7 6 5
; ShiftRows -- the last two rows
; <<< 8
; b a 9 8 7 6 5 4 3 2 1 0 => a 9 8 7 6 5 4 3 2 1 0 b
movw t0j, x25 ; t1j:t0j <= x2b:x25
movw x25, x24 ; x2b:x25 <= x2a:x24
movw x24, x23 ; x2a:x24 <= x29:x23
movw x23, x22 ; x29:x23 <= x28:x22
movw x22, x21 ; x28:x22 <= x27:x21
movw x21, x20 ; x27:x21 <= x26:x20
mov x26, t0j ; x26 <= x25
mov x20, t1j ; x20 <= x2b
; >>> 1
mov t0j, x3b
ror t0j
ror x3a
ror x39
ror x38
ror x37
ror x36
ror x35
ror x34
ror x33
ror x32
ror x31
ror x30
ror x3b
; <<< 56
; b a 9 8 7 6 5 4 3 2 1 0 => 4 3 2 1 0 b a 9 8 7 6 5
; mov x3j, x30
; mov x30, x35
; mov x35, x3a
; mov x3a, x33
; mov x33, x38
; mov x38, x31
; mov x31, x36
; mov x36, x3b
; mov x3b, x34
; mov x34, x39
; mov x39, x32
; mov x32, x37
; mov x37, x3j
; an intentionally arrangement of registers to facilitate movw
; x30 r22
; x35 r20
; x3a r18
; x33 r16
; x38 r14
; x31 r12
; x36 r23
; x3b r21
; x34 r19
; x39 r17
; x32 r15
; x37 r13
movw t0j, x30 ; t1j:t0j <= x36:x30
movw x30, x35 ; x36:x30 <= x3b:x35
movw x35, x3a ; x3b:x35 <= x34:x3a
movw x3a, x33 ; x34:x3a <= x39:x33
movw x33, x38 ; x39:x33 <= x32:x38
movw x38, x31 ; x32:x38 <= x37:x31
mov x31, t1j ; x31 <= x36
mov x37, t0j ; x37 <= x30
dec rcnt
breq round_loop_end
jmp round_loop_start
round_loop_end:
ldi YH, hi8(SRAM_STATE + 2 * ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + 2 * ROW_INBYTES)
st Y+, x20
st Y+, x21
st Y+, x22
st Y+, x23
st Y+, x24
st Y+, x25
st Y+, x26
st Y+, x27
st Y+, x28
st Y+, x29
st Y+, x2a
st Y+, x2b
st Y+, x30
st Y+, x31
st Y+, x32
st Y+, x33
st Y+, x34
st Y+, x35
st Y+, x36
st Y+, x37
st Y+, x38
st Y+, x39
st Y+, x3a
st Y+, x3b
POP_CONFLICT
ret
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
;
; ============================================
; R E G I S T E R D E F I N I T I O N S
; ============================================
;
#define mclen r16
#define radlen r17
#define tcnt r17
#define tmp0 r20
#define tmp1 r21
#define cnt0 r22
#define rn r23
#define rate r24
; AEDH = 0b000: for authenticate AD
; AEDH = 0b001: for encryption
; AEDH = 0b011: for decryption
; AEDH = 0b100: for hash
#define AEDH r25
#define rcnt r26
#if (STATE_INBITS==256)
#include "knot256.h"
#elif (STATE_INBITS==384)
#include "knot384.h"
#elif (STATE_INBITS==512)
#include "knot512.h"
#else
#error "Not specified key size and state size"
#endif
#define CRYPTO_KEYBYTES 24
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 24
#define CRYPTO_ABYTES 24
#define CRYPTO_NOOVERLAP 1
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
.macro PUSH_CONFLICT
push r16
push r17
push r18
push r19
push r23
push r24
push r25
push r26
push r27
push r28
push r29
push r30
push r31
.endm
.macro POP_CONFLICT
pop r31
pop r30
pop r29
pop r28
pop r27
pop r26
pop r25
pop r24
pop r23
pop r19
pop r18
pop r17
pop r16
.endm
.macro PUSH_ALL
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
push r16
push r17
push r28
push r29
.endm
.macro POP_ALL
pop r29
pop r28
pop r17
pop r16
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
clr r1
.endm
\ No newline at end of file
#ifndef __CONFIG_H__
#define __CONFIG_H__
#define CRYPTO_AEAD
//#define CRYPTO_HASH
#define MAX_MESSAGE_LENGTH 128
#define STATE_INBITS 384
/* For CRYPTO_AEAD */
#define CRYPTO_KEYBITS 192
/* For CRYPTO_HASH */
#define CRYPTO_BITS 384
#define STATE_INBYTES ((STATE_INBITS + 7) / 8)
#define ROW_INBITS ((STATE_INBITS + 3) / 4)
#define ROW_INBYTES ((ROW_INBITS + 7) / 8)
/* For CRYPTO_AEAD */
#define CRYPTO_KEYBYTES ((CRYPTO_KEYBITS + 7) / 8)
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES CRYPTO_KEYBYTES
#define CRYPTO_ABYTES CRYPTO_KEYBYTES
#define CRYPTO_NOOVERLAP 1
#define MAX_ASSOCIATED_DATA_LENGTH 32
#define MAX_CIPHER_LENGTH (MAX_MESSAGE_LENGTH + CRYPTO_ABYTES)
#define TAG_MATCH 0
#define TAG_UNMATCH -1
#define OTHER_FAILURES -2
/* For CRYPTO_HASH */
#define CRYPTO_BYTES ((CRYPTO_BITS + 7) / 8)
#define DOMAIN_BITS 0x80
#define PAD_BITS 0x01
#define S384_R192_BITS 0x80
#if (STATE_INBITS==256)
#define C1 1
#define C2 8
#define C3 25
#elif (STATE_INBITS==384)
#define C1 1
#define C2 8
#define C3 55
#elif (STATE_INBITS==512)
#define C1 1
#define C2 16
#define C3 25
#else
#error "Not specified state size"
#endif
#ifdef CRYPTO_AEAD
/* For CRYPTO_AEAD */
#define KEY_INBITS (CRYPTO_KEYBYTES * 8)
#define KEY_INBYTES (CRYPTO_KEYBYTES)
#define NONCE_INBITS (CRYPTO_NPUBBYTES * 8)
#define NONCE_INBYTES (CRYPTO_NPUBBYTES)
#define TAG_INBITS (CRYPTO_ABYTES * 8)
#define TAG_INBYTES (CRYPTO_ABYTES)
#if (KEY_INBITS==128) && (STATE_INBITS==256)
#define RATE_INBITS 64
#define NR_0 52
#define NR_i 28
#define NR_f 32
#elif (KEY_INBITS==128) && (STATE_INBITS==384)
#define RATE_INBITS 192
#define NR_0 76
#define NR_i 28
#define NR_f 32
#elif (KEY_INBITS==192) && (STATE_INBITS==384)
#define RATE_INBITS 96
#define NR_0 76
#define NR_i 40
#define NR_f 44
#elif (KEY_INBITS==256) && (STATE_INBITS==512)
#define RATE_INBITS 128
#define NR_0 100
#define NR_i 52
#define NR_f 56
#else
#error "Not specified key size and state size"
#endif
#define RATE_INBYTES ((RATE_INBITS + 7) / 8)
#define SQUEEZE_RATE_INBYTES TAG_INBYTES
#endif
#ifdef CRYPTO_HASH
/* For CRYPTO_HASH */
#define HASH_DIGEST_INBITS (CRYPTO_BYTES * 8)
#if (HASH_DIGEST_INBITS==256) && (STATE_INBITS==256)
#define HASH_RATE_INBITS 32
#define HASH_SQUEEZE_RATE_INBITS 128
#define NR_h 68
#elif (HASH_DIGEST_INBITS==256) && (STATE_INBITS==384)
#define HASH_RATE_INBITS 128
#define HASH_SQUEEZE_RATE_INBITS 128
#define NR_h 80
#elif (HASH_DIGEST_INBITS==384) && (STATE_INBITS==384)
#define HASH_RATE_INBITS 48
#define HASH_SQUEEZE_RATE_INBITS 192
#define NR_h 104
#elif (HASH_DIGEST_INBITS==512) && (STATE_INBITS==512)
#define HASH_RATE_INBITS 64
#define HASH_SQUEEZE_RATE_INBITS 256
#define NR_h 140
#else
#error "Not specified hash digest size and state size"
#endif
#define HASH_RATE_INBYTES ((HASH_RATE_INBITS + 7) / 8)
#define HASH_SQUEEZE_RATE_INBYTES ((HASH_SQUEEZE_RATE_INBITS + 7) / 8)
#endif
#define TAG_MATCH 0
#define TAG_UNMATCH -1
#define OTHER_FAILURES -2
#endif
\ No newline at end of file
#ifdef __cplusplus
extern "C" {
#endif
int crypto_aead_encrypt(
unsigned char *c,unsigned long long *clen,
const unsigned char *m,unsigned long long mlen,
const unsigned char *ad,unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k
);
int crypto_aead_decrypt(
unsigned char *m,unsigned long long *outputmlen,
unsigned char *nsec,
const unsigned char *c,unsigned long long clen,
const unsigned char *ad,unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k
);
#ifdef __cplusplus
}
#endif
#include <avr/io.h>
#include <avr/sfr_defs.h>
#include <stdlib.h>
#include <string.h>
#include "config.h"
extern void crypto_aead_encrypt_asm(
unsigned char *c,
const unsigned char *m,
unsigned char mlen,
const unsigned char *ad,
unsigned char adlen,
const unsigned char *npub,
const unsigned char *k
);
extern int crypto_aead_decrypt_asm(
unsigned char *m,
const unsigned char *c,
unsigned char clen,
const unsigned char *ad,
unsigned char adlen,
const unsigned char *npub,
const unsigned char *k
);
extern void crypto_hash_asm(
unsigned char *out,
const unsigned char *in,
unsigned char inlen
);
int crypto_aead_encrypt(
unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k
)
{
/*
...
... the code for the cipher implementation goes here,
... generating a ciphertext c[0],c[1],...,c[*clen-1]
... from a plaintext m[0],m[1],...,m[mlen-1]
... and associated data ad[0],ad[1],...,ad[adlen-1]
... and nonce npub[0],npub[1],..
... and secret key k[0],k[1],...
... the implementation shall not use nsec
...
... return 0;
*/
(void)nsec;
crypto_aead_encrypt_asm(c, m, mlen, ad, adlen, npub, k);
*clen = mlen + TAG_INBYTES;
return 0;
}
int crypto_aead_decrypt(
unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k
)
{
/*
...
... the code for the AEAD implementation goes here,
... generating a plaintext m[0],m[1],...,m[*mlen-1]
... and secret message number nsec[0],nsec[1],...
... from a ciphertext c[0],c[1],...,c[clen-1]
... and associated data ad[0],ad[1],...,ad[adlen-1]
... and nonce number npub[0],npub[1],...
... and secret key k[0],k[1],...
...
... return 0;
*/
unsigned long long mlen_;
unsigned char tag_is_match;
(void)nsec;
if (clen < CRYPTO_ABYTES) {
return -1;
}
mlen_ = clen - CRYPTO_ABYTES;
tag_is_match = crypto_aead_decrypt_asm(m, c, mlen_, ad, adlen, npub, k);
if (tag_is_match != 0)
{
memset(m, 0, (size_t)mlen_);
return -1;
}
*mlen = mlen_;
return 0;
}
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
#define x10 r0
#define x11 r1
#define x12 r2
#define x13 r3
#define x14 r4
#define x15 r5
#define x16 r6
#define x17 r7
; an intentionally arrangement of registers to facilitate movw
#define x20 r8
#define x21 r10
#define x22 r12
#define x23 r14
#define x24 r9
#define x25 r11
#define x26 r13
#define x27 r15
; an intentionally arrangement of registers to facilitate movw
#define x30 r16
#define x35 r18
#define x32 r20
#define x37 r22
#define x34 r17
#define x31 r19
#define x36 r21
#define x33 r23
#define t0j r24
#define t1j r25
#define x0j r27
#include "assist.h"
.macro Sbox i0, i1, i2, i3
mov t0j, \i1
com \i0
and \i1, \i0
eor \i1, \i2
or \i2, t0j
eor \i0, \i3
eor \i2, \i0
eor t0j, \i3
and \i0, \i1
eor \i3, \i1
eor \i0, t0j
and t0j, \i2
eor \i1, t0j
.endm
Permutation:
PUSH_CONFLICT
mov rcnt, rn
ldi YH, hi8(SRAM_STATE + ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + ROW_INBYTES)
ld x10, Y+
ld x11, Y+
ld x12, Y+
ld x13, Y+
ld x14, Y+
ld x15, Y+
ld x16, Y+
ld x17, Y+
ld x20, Y+
ld x21, Y+
ld x22, Y+
ld x23, Y+
ld x24, Y+
ld x25, Y+
ld x26, Y+
ld x27, Y+
ld x30, Y+
ld x31, Y+
ld x32, Y+
ld x33, Y+
ld x34, Y+
ld x35, Y+
ld x36, Y+
ld x37, Y+
#if defined(CRYPTO_AEAD) && defined(CRYPTO_HASH)
sbrc AEDH, 2 ; AEDH[2] = 0 for AEAD and AEDH[2] = 1 for HASH
rjmp For_Hash
For_AEAD:
ldi ZL, lo8(RC_LFSR6)
ldi ZH, hi8(RC_LFSR6)
rjmp round_loop_start
For_Hash:
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
#elif defined(CRYPTO_AEAD)
ldi ZL, lo8(RC_LFSR6)
ldi ZH, hi8(RC_LFSR6)
#else
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
#endif
round_loop_start:
; AddRC
lpm t0j, Z+
ldi YH, hi8(SRAM_STATE)
ldi YL, lo8(SRAM_STATE)
ld x0j, Y
eor x0j, t0j
; SubColumns
Sbox x0j, x10, x20, x30
st Y+, x0j
ld x0j, Y
Sbox x0j, x11, x21, x31
st Y+, x0j
ld x0j, Y
Sbox x0j, x12, x22, x32
st Y+, x0j
ld x0j, Y
Sbox x0j, x13, x23, x33
st Y+, x0j
ld x0j, Y
Sbox x0j, x14, x24, x34
st Y+, x0j
ld x0j, Y
Sbox x0j, x15, x25, x35
st Y+, x0j
ld x0j, Y
Sbox x0j, x16, x26, x36
st Y+, x0j
ld x0j, Y
Sbox x0j, x17, x27, x37
st Y, x0j
; ShiftRows
; <<< 1
mov t0j, x17
rol t0j
rol x10
rol x11
rol x12
rol x13
rol x14
rol x15
rol x16
rol x17
; <<< 8
; 7 6 5 4 3 2 1 0 => 6 5 4 3 2 1 0 7
;mov t0j, x27
;mov x27, x26
;mov x26, x25
;mov x25, x24
;mov x24, x23
;mov x23, x22
;mov x22, x21
;mov x21, x20
;mov x20, t0j
; an intentionally arrangement of registers to facilitate movw
movw t0j, x23 ; t1j:t0j <= x27:x23
movw x23, x22 ; x27:x23 <= x26:x22
movw x22, x21 ; x26:x22 <= x25:x21
movw x21, x20 ; x25:x21 <= x24:x20
mov x20, t1j ; x20 <= t1j
mov x24, t0j ; x24 <= t0j
; <<< 1
mov t0j, x37
rol t0j
rol x30
rol x31
rol x32
rol x33
rol x34
rol x35
rol x36
rol x37
; <<< 24
; 7 6 5 4 3 2 1 0 => 4 3 2 1 0 7 6 5
;mov t0j, x30
;mov x30, x35
;mov x35, x32
;mov x32, x37
;mov x37, x34
;mov x34, x31
;mov x31, x36
;mov x36, x33
;mov x33, t0j
; an intentionally arrangement of registers to facilitate movw
;x30 r16
;x35 r18
;x32 r20
;x37 r22
;x34 r17
;x31 r19
;x36 r21
;x33 r23
movw t0j, x30 ; t1j:t0j <= x34:x30
movw x30, x35 ; x34:x30 <= x31:x35
movw x35, x32 ; x31:x35 <= x36:x32
movw x32, x37 ; x36:x32 <= x33:x37
mov x37, t1j ; x37 <= x34
mov x33, t0j ; x33 <= x30
dec rcnt
breq round_loop_end
jmp round_loop_start
round_loop_end:
ldi YH, hi8(SRAM_STATE + ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + ROW_INBYTES)
st Y+, x10
st Y+, x11
st Y+, x12
st Y+, x13
st Y+, x14
st Y+, x15
st Y+, x16
st Y+, x17
st Y+, x20
st Y+, x21
st Y+, x22
st Y+, x23
st Y+, x24
st Y+, x25
st Y+, x26
st Y+, x27
st Y+, x30
st Y+, x31
st Y+, x32
st Y+, x33
st Y+, x34
st Y+, x35
st Y+, x36
st Y+, x37
POP_CONFLICT
ret
.section .text
#if defined(CRYPTO_AEAD) && defined(CRYPTO_HASH)
RC_LFSR6:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06
.byte 0x0c, 0x18, 0x31, 0x22, 0x05, 0x0a, 0x14, 0x29
.byte 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28
.byte 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24
.byte 0x09, 0x12, 0x25, 0x0b, 0x16, 0x2d, 0x1b, 0x37
.byte 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26
.byte 0x0d, 0x1a, 0x35, 0x2a, 0x15, 0x2b, 0x17, 0x2f
.byte 0x1f, 0x3f, 0x3e, 0x3c, 0x38, 0x30, 0x20, 0x00
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
#elif defined(CRYPTO_AEAD)
RC_LFSR6:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06
.byte 0x0c, 0x18, 0x31, 0x22, 0x05, 0x0a, 0x14, 0x29
.byte 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28
.byte 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24
.byte 0x09, 0x12, 0x25, 0x0b, 0x16, 0x2d, 0x1b, 0x37
.byte 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26
.byte 0x0d, 0x1a, 0x35, 0x2a, 0x15, 0x2b, 0x17, 0x2f
.byte 0x1f, 0x3f, 0x3e, 0x3c, 0x38, 0x30, 0x20, 0x00
#else
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
#endif
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
; an intentionally arrangement of registers to facilitate movw
#define x20 r0
#define x21 r2
#define x22 r4
#define x23 r6
#define x24 r8
#define x25 r10
#define x26 r1
#define x27 r3
#define x28 r5
#define x29 r7
#define x2a r9
#define x2b r11
; an intentionally arrangement of registers to facilitate movw
#define x30 r22
#define x35 r20
#define x3a r18
#define x33 r16
#define x38 r14
#define x31 r12
#define x36 r23
#define x3b r21
#define x34 r19
#define x39 r17
#define x32 r15
#define x37 r13
#define t0j r24
#define t1j r25
#define x0j r25
#define x1j r27
#include "assist.h"
.macro Sbox i0, i1, i2, i3
ldi t0j, 0xFF
eor \i0, t0j
mov t0j, \i1
and \i1, \i0
eor \i1, \i2
or \i2, t0j
eor \i0, \i3
eor \i2, \i0
eor t0j, \i3
and \i0, \i1
eor \i3, \i1
eor \i0, t0j
and t0j, \i2
eor \i1, t0j
.endm
.macro OneColumn i0, i1, i2, i3
ld \i0, Y
ldd \i1, Y + ROW_INBYTES
Sbox \i0, \i1, \i2, \i3
st Y+, \i0
rol \i1 ; ShiftRows -- Row 1 <<< 1
std Y + ROW_INBYTES -1, \i1
.endm
Permutation:
PUSH_CONFLICT
mov rcnt, rn
ldi YH, hi8(SRAM_STATE + 2 * ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + 2 * ROW_INBYTES)
ld x20, Y+
ld x21, Y+
ld x22, Y+
ld x23, Y+
ld x24, Y+
ld x25, Y+
ld x26, Y+
ld x27, Y+
ld x28, Y+
ld x29, Y+
ld x2a, Y+
ld x2b, Y+
ld x30, Y+
ld x31, Y+
ld x32, Y+
ld x33, Y+
ld x34, Y+
ld x35, Y+
ld x36, Y+
ld x37, Y+
ld x38, Y+
ld x39, Y+
ld x3a, Y+
ld x3b, Y+
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
round_loop_start:
; AddRC
lpm t0j, Z+
ldi YH, hi8(SRAM_STATE)
ldi YL, lo8(SRAM_STATE)
ld x0j, Y
eor x0j, t0j
ldd x1j, Y + ROW_INBYTES
Sbox x0j, x1j, x20, x30
st Y+, x0j
lsl x1j ; ShiftRows -- Row 1 <<< 1
std Y + ROW_INBYTES -1, x1j
OneColumn x0j, x1j, x21, x31
OneColumn x0j, x1j, x22, x32
OneColumn x0j, x1j, x23, x33
OneColumn x0j, x1j, x24, x34
OneColumn x0j, x1j, x25, x35
OneColumn x0j, x1j, x26, x36
OneColumn x0j, x1j, x27, x37
OneColumn x0j, x1j, x28, x38
OneColumn x0j, x1j, x29, x39
OneColumn x0j, x1j, x2a, x3a
OneColumn x0j, x1j, x2b, x3b
ld x1j, Y
eor t0j, t0j
adc x1j, t0j
st Y, x1j
; b a 9 8 7 6 5 4 3 2 1 0
; -- -- -- -- -- -- -- -- -- -- -- x- 0
; -- -- -- -- -- -- -- -- -- -- -- x' 0
; -- -- -- -- -- -- -- -- -- -- x- -- 1
; -- -- -- -- x' -- -- -- -- -- -- -- 7
; 4 3 2 1 0 b a 9 8 7 6 5
; ShiftRows -- the last two rows
; <<< 8
; b a 9 8 7 6 5 4 3 2 1 0 => a 9 8 7 6 5 4 3 2 1 0 b
movw t0j, x25 ; t1j:t0j <= x2b:x25
movw x25, x24 ; x2b:x25 <= x2a:x24
movw x24, x23 ; x2a:x24 <= x29:x23
movw x23, x22 ; x29:x23 <= x28:x22
movw x22, x21 ; x28:x22 <= x27:x21
movw x21, x20 ; x27:x21 <= x26:x20
mov x26, t0j ; x26 <= x25
mov x20, t1j ; x20 <= x2b
; >>> 1
mov t0j, x3b
ror t0j
ror x3a
ror x39
ror x38
ror x37
ror x36
ror x35
ror x34
ror x33
ror x32
ror x31
ror x30
ror x3b
; <<< 56
; b a 9 8 7 6 5 4 3 2 1 0 => 4 3 2 1 0 b a 9 8 7 6 5
; mov x3j, x30
; mov x30, x35
; mov x35, x3a
; mov x3a, x33
; mov x33, x38
; mov x38, x31
; mov x31, x36
; mov x36, x3b
; mov x3b, x34
; mov x34, x39
; mov x39, x32
; mov x32, x37
; mov x37, x3j
; an intentionally arrangement of registers to facilitate movw
; x30 r22
; x35 r20
; x3a r18
; x33 r16
; x38 r14
; x31 r12
; x36 r23
; x3b r21
; x34 r19
; x39 r17
; x32 r15
; x37 r13
movw t0j, x30 ; t1j:t0j <= x36:x30
movw x30, x35 ; x36:x30 <= x3b:x35
movw x35, x3a ; x3b:x35 <= x34:x3a
movw x3a, x33 ; x34:x3a <= x39:x33
movw x33, x38 ; x39:x33 <= x32:x38
movw x38, x31 ; x32:x38 <= x37:x31
mov x31, t1j ; x31 <= x36
mov x37, t0j ; x37 <= x30
dec rcnt
breq round_loop_end
jmp round_loop_start
round_loop_end:
ldi YH, hi8(SRAM_STATE + 2 * ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + 2 * ROW_INBYTES)
st Y+, x20
st Y+, x21
st Y+, x22
st Y+, x23
st Y+, x24
st Y+, x25
st Y+, x26
st Y+, x27
st Y+, x28
st Y+, x29
st Y+, x2a
st Y+, x2b
st Y+, x30
st Y+, x31
st Y+, x32
st Y+, x33
st Y+, x34
st Y+, x35
st Y+, x36
st Y+, x37
st Y+, x38
st Y+, x39
st Y+, x3a
st Y+, x3b
POP_CONFLICT
ret
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
;
; ============================================
; R E G I S T E R D E F I N I T I O N S
; ============================================
;
#define mclen r16
#define radlen r17
#define tcnt r17
#define tmp0 r20
#define tmp1 r21
#define cnt0 r22
#define rn r23
#define rate r24
; AEDH = 0b000: for authenticate AD
; AEDH = 0b001: for encryption
; AEDH = 0b011: for decryption
; AEDH = 0b100: for hash
#define AEDH r25
#define rcnt r26
#if (STATE_INBITS==256)
#include "knot256.h"
#elif (STATE_INBITS==384)
#include "knot384.h"
#elif (STATE_INBITS==512)
#include "knot512.h"
#else
#error "Not specified key size and state size"
#endif
#define CRYPTO_KEYBYTES 32
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 32
#define CRYPTO_ABYTES 32
#define CRYPTO_NOOVERLAP 1
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
.macro PUSH_CONFLICT
push r16
push r17
push r18
push r19
push r23
push r24
push r25
push r26
push r27
push r28
push r29
push r30
push r31
.endm
.macro POP_CONFLICT
pop r31
pop r30
pop r29
pop r28
pop r27
pop r26
pop r25
pop r24
pop r23
pop r19
pop r18
pop r17
pop r16
.endm
.macro PUSH_ALL
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
push r16
push r17
push r28
push r29
.endm
.macro POP_ALL
pop r29
pop r28
pop r17
pop r16
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
clr r1
.endm
\ No newline at end of file
#ifndef __CONFIG_H__
#define __CONFIG_H__
#define CRYPTO_AEAD
//#define CRYPTO_HASH
#define MAX_MESSAGE_LENGTH 128
#define STATE_INBITS 512
/* For CRYPTO_AEAD */
#define CRYPTO_KEYBITS 256
/* For CRYPTO_HASH */
#define CRYPTO_BITS 512
#define STATE_INBYTES ((STATE_INBITS + 7) / 8)
#define ROW_INBITS ((STATE_INBITS + 3) / 4)
#define ROW_INBYTES ((ROW_INBITS + 7) / 8)
/* For CRYPTO_AEAD */
#define CRYPTO_KEYBYTES ((CRYPTO_KEYBITS + 7) / 8)
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES CRYPTO_KEYBYTES
#define CRYPTO_ABYTES CRYPTO_KEYBYTES
#define CRYPTO_NOOVERLAP 1
#define MAX_ASSOCIATED_DATA_LENGTH 32
#define MAX_CIPHER_LENGTH (MAX_MESSAGE_LENGTH + CRYPTO_ABYTES)
#define TAG_MATCH 0
#define TAG_UNMATCH -1
#define OTHER_FAILURES -2
/* For CRYPTO_HASH */
#define CRYPTO_BYTES ((CRYPTO_BITS + 7) / 8)
#define DOMAIN_BITS 0x80
#define PAD_BITS 0x01
#define S384_R192_BITS 0x80
#if (STATE_INBITS==256)
#define C1 1
#define C2 8
#define C3 25
#elif (STATE_INBITS==384)
#define C1 1
#define C2 8
#define C3 55
#elif (STATE_INBITS==512)
#define C1 1
#define C2 16
#define C3 25
#else
#error "Not specified state size"
#endif
#ifdef CRYPTO_AEAD
/* For CRYPTO_AEAD */
#define KEY_INBITS (CRYPTO_KEYBYTES * 8)
#define KEY_INBYTES (CRYPTO_KEYBYTES)
#define NONCE_INBITS (CRYPTO_NPUBBYTES * 8)
#define NONCE_INBYTES (CRYPTO_NPUBBYTES)
#define TAG_INBITS (CRYPTO_ABYTES * 8)
#define TAG_INBYTES (CRYPTO_ABYTES)
#if (KEY_INBITS==128) && (STATE_INBITS==256)
#define RATE_INBITS 64
#define NR_0 52
#define NR_i 28
#define NR_f 32
#elif (KEY_INBITS==128) && (STATE_INBITS==384)
#define RATE_INBITS 192
#define NR_0 76
#define NR_i 28
#define NR_f 32
#elif (KEY_INBITS==192) && (STATE_INBITS==384)
#define RATE_INBITS 96
#define NR_0 76
#define NR_i 40
#define NR_f 44
#elif (KEY_INBITS==256) && (STATE_INBITS==512)
#define RATE_INBITS 128
#define NR_0 100
#define NR_i 52
#define NR_f 56
#else
#error "Not specified key size and state size"
#endif
#define RATE_INBYTES ((RATE_INBITS + 7) / 8)
#define SQUEEZE_RATE_INBYTES TAG_INBYTES
#endif
#ifdef CRYPTO_HASH
/* For CRYPTO_HASH */
#define HASH_DIGEST_INBITS (CRYPTO_BYTES * 8)
#if (HASH_DIGEST_INBITS==256) && (STATE_INBITS==256)
#define HASH_RATE_INBITS 32
#define HASH_SQUEEZE_RATE_INBITS 128
#define NR_h 68
#elif (HASH_DIGEST_INBITS==256) && (STATE_INBITS==384)
#define HASH_RATE_INBITS 128
#define HASH_SQUEEZE_RATE_INBITS 128
#define NR_h 80
#elif (HASH_DIGEST_INBITS==384) && (STATE_INBITS==384)
#define HASH_RATE_INBITS 48
#define HASH_SQUEEZE_RATE_INBITS 192
#define NR_h 104
#elif (HASH_DIGEST_INBITS==512) && (STATE_INBITS==512)
#define HASH_RATE_INBITS 64
#define HASH_SQUEEZE_RATE_INBITS 256
#define NR_h 140
#else
#error "Not specified hash digest size and state size"
#endif
#define HASH_RATE_INBYTES ((HASH_RATE_INBITS + 7) / 8)
#define HASH_SQUEEZE_RATE_INBYTES ((HASH_SQUEEZE_RATE_INBITS + 7) / 8)
#endif
#define TAG_MATCH 0
#define TAG_UNMATCH -1
#define OTHER_FAILURES -2
#endif
\ No newline at end of file
#ifdef __cplusplus
extern "C" {
#endif
int crypto_aead_encrypt(
unsigned char *c,unsigned long long *clen,
const unsigned char *m,unsigned long long mlen,
const unsigned char *ad,unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k
);
int crypto_aead_decrypt(
unsigned char *m,unsigned long long *outputmlen,
unsigned char *nsec,
const unsigned char *c,unsigned long long clen,
const unsigned char *ad,unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k
);
#ifdef __cplusplus
}
#endif
#include <avr/io.h>
#include <avr/sfr_defs.h>
#include <stdlib.h>
#include <string.h>
#include "config.h"
extern void crypto_aead_encrypt_asm(
unsigned char *c,
const unsigned char *m,
unsigned char mlen,
const unsigned char *ad,
unsigned char adlen,
const unsigned char *npub,
const unsigned char *k
);
extern int crypto_aead_decrypt_asm(
unsigned char *m,
const unsigned char *c,
unsigned char clen,
const unsigned char *ad,
unsigned char adlen,
const unsigned char *npub,
const unsigned char *k
);
extern void crypto_hash_asm(
unsigned char *out,
const unsigned char *in,
unsigned char inlen
);
int crypto_aead_encrypt(
unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k
)
{
/*
...
... the code for the cipher implementation goes here,
... generating a ciphertext c[0],c[1],...,c[*clen-1]
... from a plaintext m[0],m[1],...,m[mlen-1]
... and associated data ad[0],ad[1],...,ad[adlen-1]
... and nonce npub[0],npub[1],..
... and secret key k[0],k[1],...
... the implementation shall not use nsec
...
... return 0;
*/
(void)nsec;
crypto_aead_encrypt_asm(c, m, mlen, ad, adlen, npub, k);
*clen = mlen + TAG_INBYTES;
return 0;
}
int crypto_aead_decrypt(
unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k
)
{
/*
...
... the code for the AEAD implementation goes here,
... generating a plaintext m[0],m[1],...,m[*mlen-1]
... and secret message number nsec[0],nsec[1],...
... from a ciphertext c[0],c[1],...,c[clen-1]
... and associated data ad[0],ad[1],...,ad[adlen-1]
... and nonce number npub[0],npub[1],...
... and secret key k[0],k[1],...
...
... return 0;
*/
unsigned long long mlen_;
unsigned char tag_is_match;
(void)nsec;
if (clen < CRYPTO_ABYTES) {
return -1;
}
mlen_ = clen - CRYPTO_ABYTES;
tag_is_match = crypto_aead_decrypt_asm(m, c, mlen_, ad, adlen, npub, k);
if (tag_is_match != 0)
{
memset(m, 0, (size_t)mlen_);
return -1;
}
*mlen = mlen_;
return 0;
}
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
#define x10 r0
#define x11 r1
#define x12 r2
#define x13 r3
#define x14 r4
#define x15 r5
#define x16 r6
#define x17 r7
; an intentionally arrangement of registers to facilitate movw
#define x20 r8
#define x21 r10
#define x22 r12
#define x23 r14
#define x24 r9
#define x25 r11
#define x26 r13
#define x27 r15
; an intentionally arrangement of registers to facilitate movw
#define x30 r16
#define x35 r18
#define x32 r20
#define x37 r22
#define x34 r17
#define x31 r19
#define x36 r21
#define x33 r23
#define t0j r24
#define t1j r25
#define x0j r27
#include "assist.h"
.macro Sbox i0, i1, i2, i3
mov t0j, \i1
com \i0
and \i1, \i0
eor \i1, \i2
or \i2, t0j
eor \i0, \i3
eor \i2, \i0
eor t0j, \i3
and \i0, \i1
eor \i3, \i1
eor \i0, t0j
and t0j, \i2
eor \i1, t0j
.endm
Permutation:
PUSH_CONFLICT
mov rcnt, rn
ldi YH, hi8(SRAM_STATE + ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + ROW_INBYTES)
ld x10, Y+
ld x11, Y+
ld x12, Y+
ld x13, Y+
ld x14, Y+
ld x15, Y+
ld x16, Y+
ld x17, Y+
ld x20, Y+
ld x21, Y+
ld x22, Y+
ld x23, Y+
ld x24, Y+
ld x25, Y+
ld x26, Y+
ld x27, Y+
ld x30, Y+
ld x31, Y+
ld x32, Y+
ld x33, Y+
ld x34, Y+
ld x35, Y+
ld x36, Y+
ld x37, Y+
#if defined(CRYPTO_AEAD) && defined(CRYPTO_HASH)
sbrc AEDH, 2 ; AEDH[2] = 0 for AEAD and AEDH[2] = 1 for HASH
rjmp For_Hash
For_AEAD:
ldi ZL, lo8(RC_LFSR6)
ldi ZH, hi8(RC_LFSR6)
rjmp round_loop_start
For_Hash:
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
#elif defined(CRYPTO_AEAD)
ldi ZL, lo8(RC_LFSR6)
ldi ZH, hi8(RC_LFSR6)
#else
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
#endif
round_loop_start:
; AddRC
lpm t0j, Z+
ldi YH, hi8(SRAM_STATE)
ldi YL, lo8(SRAM_STATE)
ld x0j, Y
eor x0j, t0j
; SubColumns
Sbox x0j, x10, x20, x30
st Y+, x0j
ld x0j, Y
Sbox x0j, x11, x21, x31
st Y+, x0j
ld x0j, Y
Sbox x0j, x12, x22, x32
st Y+, x0j
ld x0j, Y
Sbox x0j, x13, x23, x33
st Y+, x0j
ld x0j, Y
Sbox x0j, x14, x24, x34
st Y+, x0j
ld x0j, Y
Sbox x0j, x15, x25, x35
st Y+, x0j
ld x0j, Y
Sbox x0j, x16, x26, x36
st Y+, x0j
ld x0j, Y
Sbox x0j, x17, x27, x37
st Y, x0j
; ShiftRows
; <<< 1
mov t0j, x17
rol t0j
rol x10
rol x11
rol x12
rol x13
rol x14
rol x15
rol x16
rol x17
; <<< 8
; 7 6 5 4 3 2 1 0 => 6 5 4 3 2 1 0 7
;mov t0j, x27
;mov x27, x26
;mov x26, x25
;mov x25, x24
;mov x24, x23
;mov x23, x22
;mov x22, x21
;mov x21, x20
;mov x20, t0j
; an intentionally arrangement of registers to facilitate movw
movw t0j, x23 ; t1j:t0j <= x27:x23
movw x23, x22 ; x27:x23 <= x26:x22
movw x22, x21 ; x26:x22 <= x25:x21
movw x21, x20 ; x25:x21 <= x24:x20
mov x20, t1j ; x20 <= t1j
mov x24, t0j ; x24 <= t0j
; <<< 1
mov t0j, x37
rol t0j
rol x30
rol x31
rol x32
rol x33
rol x34
rol x35
rol x36
rol x37
; <<< 24
; 7 6 5 4 3 2 1 0 => 4 3 2 1 0 7 6 5
;mov t0j, x30
;mov x30, x35
;mov x35, x32
;mov x32, x37
;mov x37, x34
;mov x34, x31
;mov x31, x36
;mov x36, x33
;mov x33, t0j
; an intentionally arrangement of registers to facilitate movw
;x30 r16
;x35 r18
;x32 r20
;x37 r22
;x34 r17
;x31 r19
;x36 r21
;x33 r23
movw t0j, x30 ; t1j:t0j <= x34:x30
movw x30, x35 ; x34:x30 <= x31:x35
movw x35, x32 ; x31:x35 <= x36:x32
movw x32, x37 ; x36:x32 <= x33:x37
mov x37, t1j ; x37 <= x34
mov x33, t0j ; x33 <= x30
dec rcnt
breq round_loop_end
jmp round_loop_start
round_loop_end:
ldi YH, hi8(SRAM_STATE + ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + ROW_INBYTES)
st Y+, x10
st Y+, x11
st Y+, x12
st Y+, x13
st Y+, x14
st Y+, x15
st Y+, x16
st Y+, x17
st Y+, x20
st Y+, x21
st Y+, x22
st Y+, x23
st Y+, x24
st Y+, x25
st Y+, x26
st Y+, x27
st Y+, x30
st Y+, x31
st Y+, x32
st Y+, x33
st Y+, x34
st Y+, x35
st Y+, x36
st Y+, x37
POP_CONFLICT
ret
.section .text
#if defined(CRYPTO_AEAD) && defined(CRYPTO_HASH)
RC_LFSR6:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06
.byte 0x0c, 0x18, 0x31, 0x22, 0x05, 0x0a, 0x14, 0x29
.byte 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28
.byte 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24
.byte 0x09, 0x12, 0x25, 0x0b, 0x16, 0x2d, 0x1b, 0x37
.byte 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26
.byte 0x0d, 0x1a, 0x35, 0x2a, 0x15, 0x2b, 0x17, 0x2f
.byte 0x1f, 0x3f, 0x3e, 0x3c, 0x38, 0x30, 0x20, 0x00
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
#elif defined(CRYPTO_AEAD)
RC_LFSR6:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06
.byte 0x0c, 0x18, 0x31, 0x22, 0x05, 0x0a, 0x14, 0x29
.byte 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28
.byte 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24
.byte 0x09, 0x12, 0x25, 0x0b, 0x16, 0x2d, 0x1b, 0x37
.byte 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26
.byte 0x0d, 0x1a, 0x35, 0x2a, 0x15, 0x2b, 0x17, 0x2f
.byte 0x1f, 0x3f, 0x3e, 0x3c, 0x38, 0x30, 0x20, 0x00
#else
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
#endif
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
; an intentionally arrangement of registers to facilitate movw
#define x20 r0
#define x21 r2
#define x22 r4
#define x23 r6
#define x24 r8
#define x25 r10
#define x26 r1
#define x27 r3
#define x28 r5
#define x29 r7
#define x2a r9
#define x2b r11
; an intentionally arrangement of registers to facilitate movw
#define x30 r22
#define x35 r20
#define x3a r18
#define x33 r16
#define x38 r14
#define x31 r12
#define x36 r23
#define x3b r21
#define x34 r19
#define x39 r17
#define x32 r15
#define x37 r13
#define t0j r24
#define t1j r25
#define x0j r25
#define x1j r27
#include "assist.h"
.macro Sbox i0, i1, i2, i3
ldi t0j, 0xFF
eor \i0, t0j
mov t0j, \i1
and \i1, \i0
eor \i1, \i2
or \i2, t0j
eor \i0, \i3
eor \i2, \i0
eor t0j, \i3
and \i0, \i1
eor \i3, \i1
eor \i0, t0j
and t0j, \i2
eor \i1, t0j
.endm
.macro OneColumn i0, i1, i2, i3
ld \i0, Y
ldd \i1, Y + ROW_INBYTES
Sbox \i0, \i1, \i2, \i3
st Y+, \i0
rol \i1 ; ShiftRows -- Row 1 <<< 1
std Y + ROW_INBYTES -1, \i1
.endm
Permutation:
PUSH_CONFLICT
mov rcnt, rn
ldi YH, hi8(SRAM_STATE + 2 * ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + 2 * ROW_INBYTES)
ld x20, Y+
ld x21, Y+
ld x22, Y+
ld x23, Y+
ld x24, Y+
ld x25, Y+
ld x26, Y+
ld x27, Y+
ld x28, Y+
ld x29, Y+
ld x2a, Y+
ld x2b, Y+
ld x30, Y+
ld x31, Y+
ld x32, Y+
ld x33, Y+
ld x34, Y+
ld x35, Y+
ld x36, Y+
ld x37, Y+
ld x38, Y+
ld x39, Y+
ld x3a, Y+
ld x3b, Y+
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
round_loop_start:
; AddRC
lpm t0j, Z+
ldi YH, hi8(SRAM_STATE)
ldi YL, lo8(SRAM_STATE)
ld x0j, Y
eor x0j, t0j
ldd x1j, Y + ROW_INBYTES
Sbox x0j, x1j, x20, x30
st Y+, x0j
lsl x1j ; ShiftRows -- Row 1 <<< 1
std Y + ROW_INBYTES -1, x1j
OneColumn x0j, x1j, x21, x31
OneColumn x0j, x1j, x22, x32
OneColumn x0j, x1j, x23, x33
OneColumn x0j, x1j, x24, x34
OneColumn x0j, x1j, x25, x35
OneColumn x0j, x1j, x26, x36
OneColumn x0j, x1j, x27, x37
OneColumn x0j, x1j, x28, x38
OneColumn x0j, x1j, x29, x39
OneColumn x0j, x1j, x2a, x3a
OneColumn x0j, x1j, x2b, x3b
ld x1j, Y
eor t0j, t0j
adc x1j, t0j
st Y, x1j
; b a 9 8 7 6 5 4 3 2 1 0
; -- -- -- -- -- -- -- -- -- -- -- x- 0
; -- -- -- -- -- -- -- -- -- -- -- x' 0
; -- -- -- -- -- -- -- -- -- -- x- -- 1
; -- -- -- -- x' -- -- -- -- -- -- -- 7
; 4 3 2 1 0 b a 9 8 7 6 5
; ShiftRows -- the last two rows
; <<< 8
; b a 9 8 7 6 5 4 3 2 1 0 => a 9 8 7 6 5 4 3 2 1 0 b
movw t0j, x25 ; t1j:t0j <= x2b:x25
movw x25, x24 ; x2b:x25 <= x2a:x24
movw x24, x23 ; x2a:x24 <= x29:x23
movw x23, x22 ; x29:x23 <= x28:x22
movw x22, x21 ; x28:x22 <= x27:x21
movw x21, x20 ; x27:x21 <= x26:x20
mov x26, t0j ; x26 <= x25
mov x20, t1j ; x20 <= x2b
; >>> 1
mov t0j, x3b
ror t0j
ror x3a
ror x39
ror x38
ror x37
ror x36
ror x35
ror x34
ror x33
ror x32
ror x31
ror x30
ror x3b
; <<< 56
; b a 9 8 7 6 5 4 3 2 1 0 => 4 3 2 1 0 b a 9 8 7 6 5
; mov x3j, x30
; mov x30, x35
; mov x35, x3a
; mov x3a, x33
; mov x33, x38
; mov x38, x31
; mov x31, x36
; mov x36, x3b
; mov x3b, x34
; mov x34, x39
; mov x39, x32
; mov x32, x37
; mov x37, x3j
; an intentionally arrangement of registers to facilitate movw
; x30 r22
; x35 r20
; x3a r18
; x33 r16
; x38 r14
; x31 r12
; x36 r23
; x3b r21
; x34 r19
; x39 r17
; x32 r15
; x37 r13
movw t0j, x30 ; t1j:t0j <= x36:x30
movw x30, x35 ; x36:x30 <= x3b:x35
movw x35, x3a ; x3b:x35 <= x34:x3a
movw x3a, x33 ; x34:x3a <= x39:x33
movw x33, x38 ; x39:x33 <= x32:x38
movw x38, x31 ; x32:x38 <= x37:x31
mov x31, t1j ; x31 <= x36
mov x37, t0j ; x37 <= x30
dec rcnt
breq round_loop_end
jmp round_loop_start
round_loop_end:
ldi YH, hi8(SRAM_STATE + 2 * ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + 2 * ROW_INBYTES)
st Y+, x20
st Y+, x21
st Y+, x22
st Y+, x23
st Y+, x24
st Y+, x25
st Y+, x26
st Y+, x27
st Y+, x28
st Y+, x29
st Y+, x2a
st Y+, x2b
st Y+, x30
st Y+, x31
st Y+, x32
st Y+, x33
st Y+, x34
st Y+, x35
st Y+, x36
st Y+, x37
st Y+, x38
st Y+, x39
st Y+, x3a
st Y+, x3b
POP_CONFLICT
ret
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
;
; ============================================
; R E G I S T E R D E F I N I T I O N S
; ============================================
;
#define mclen r16
#define radlen r17
#define tcnt r17
#define tmp0 r20
#define tmp1 r21
#define cnt0 r22
#define rn r23
#define rate r24
; AEDH = 0b000: for authenticate AD
; AEDH = 0b001: for encryption
; AEDH = 0b011: for decryption
; AEDH = 0b100: for hash
#define AEDH r25
#define rcnt r26
#if (STATE_INBITS==256)
#include "knot256.h"
#elif (STATE_INBITS==384)
#include "knot384.h"
#elif (STATE_INBITS==512)
#include "knot512.h"
#else
#error "Not specified key size and state size"
#endif
#define CRYPTO_BYTES 32
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
.macro PUSH_CONFLICT
push r16
push r17
push r18
push r19
push r23
push r24
push r25
push r26
push r27
push r28
push r29
push r30
push r31
.endm
.macro POP_CONFLICT
pop r31
pop r30
pop r29
pop r28
pop r27
pop r26
pop r25
pop r24
pop r23
pop r19
pop r18
pop r17
pop r16
.endm
.macro PUSH_ALL
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
push r16
push r17
push r28
push r29
.endm
.macro POP_ALL
pop r29
pop r28
pop r17
pop r16
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
clr r1
.endm
\ No newline at end of file
#ifndef __CONFIG_H__
#define __CONFIG_H__
//#define CRYPTO_AEAD
#define CRYPTO_HASH
#define MAX_MESSAGE_LENGTH 128
#define STATE_INBITS 256
/* For CRYPTO_AEAD */
#define CRYPTO_KEYBITS 128
/* For CRYPTO_HASH */
#define CRYPTO_BITS 256
#define STATE_INBYTES ((STATE_INBITS + 7) / 8)
#define ROW_INBITS ((STATE_INBITS + 3) / 4)
#define ROW_INBYTES ((ROW_INBITS + 7) / 8)
/* For CRYPTO_AEAD */
#define CRYPTO_KEYBYTES ((CRYPTO_KEYBITS + 7) / 8)
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES CRYPTO_KEYBYTES
#define CRYPTO_ABYTES CRYPTO_KEYBYTES
#define CRYPTO_NOOVERLAP 1
#define MAX_ASSOCIATED_DATA_LENGTH 32
#define MAX_CIPHER_LENGTH (MAX_MESSAGE_LENGTH + CRYPTO_ABYTES)
#define TAG_MATCH 0
#define TAG_UNMATCH -1
#define OTHER_FAILURES -2
/* For CRYPTO_HASH */
#define CRYPTO_BYTES ((CRYPTO_BITS + 7) / 8)
#define DOMAIN_BITS 0x80
#define PAD_BITS 0x01
#define S384_R192_BITS 0x80
#if (STATE_INBITS==256)
#define C1 1
#define C2 8
#define C3 25
#elif (STATE_INBITS==384)
#define C1 1
#define C2 8
#define C3 55
#elif (STATE_INBITS==512)
#define C1 1
#define C2 16
#define C3 25
#else
#error "Not specified state size"
#endif
#ifdef CRYPTO_AEAD
/* For CRYPTO_AEAD */
#define KEY_INBITS (CRYPTO_KEYBYTES * 8)
#define KEY_INBYTES (CRYPTO_KEYBYTES)
#define NONCE_INBITS (CRYPTO_NPUBBYTES * 8)
#define NONCE_INBYTES (CRYPTO_NPUBBYTES)
#define TAG_INBITS (CRYPTO_ABYTES * 8)
#define TAG_INBYTES (CRYPTO_ABYTES)
#if (KEY_INBITS==128) && (STATE_INBITS==256)
#define RATE_INBITS 64
#define NR_0 52
#define NR_i 28
#define NR_f 32
#elif (KEY_INBITS==128) && (STATE_INBITS==384)
#define RATE_INBITS 192
#define NR_0 76
#define NR_i 28
#define NR_f 32
#elif (KEY_INBITS==192) && (STATE_INBITS==384)
#define RATE_INBITS 96
#define NR_0 76
#define NR_i 40
#define NR_f 44
#elif (KEY_INBITS==256) && (STATE_INBITS==512)
#define RATE_INBITS 128
#define NR_0 100
#define NR_i 52
#define NR_f 56
#else
#error "Not specified key size and state size"
#endif
#define RATE_INBYTES ((RATE_INBITS + 7) / 8)
#define SQUEEZE_RATE_INBYTES TAG_INBYTES
#endif
#ifdef CRYPTO_HASH
/* For CRYPTO_HASH */
#define HASH_DIGEST_INBITS (CRYPTO_BYTES * 8)
#if (HASH_DIGEST_INBITS==256) && (STATE_INBITS==256)
#define HASH_RATE_INBITS 32
#define HASH_SQUEEZE_RATE_INBITS 128
#define NR_h 68
#elif (HASH_DIGEST_INBITS==256) && (STATE_INBITS==384)
#define HASH_RATE_INBITS 128
#define HASH_SQUEEZE_RATE_INBITS 128
#define NR_h 80
#elif (HASH_DIGEST_INBITS==384) && (STATE_INBITS==384)
#define HASH_RATE_INBITS 48
#define HASH_SQUEEZE_RATE_INBITS 192
#define NR_h 104
#elif (HASH_DIGEST_INBITS==512) && (STATE_INBITS==512)
#define HASH_RATE_INBITS 64
#define HASH_SQUEEZE_RATE_INBITS 256
#define NR_h 140
#else
#error "Not specified hash digest size and state size"
#endif
#define HASH_RATE_INBYTES ((HASH_RATE_INBITS + 7) / 8)
#define HASH_SQUEEZE_RATE_INBYTES ((HASH_SQUEEZE_RATE_INBITS + 7) / 8)
#endif
#define TAG_MATCH 0
#define TAG_UNMATCH -1
#define OTHER_FAILURES -2
#endif
\ No newline at end of file
#ifdef __cplusplus
extern "C" {
#endif
int crypto_hash(
unsigned char *out,
const unsigned char *in,
unsigned long long inlen
);
#ifdef __cplusplus
}
#endif
\ No newline at end of file
#include <avr/io.h>
#include <avr/sfr_defs.h>
#include <stdlib.h>
#include <string.h>
#include "config.h"
extern void crypto_aead_encrypt_asm(
unsigned char *c,
const unsigned char *m,
unsigned char mlen,
const unsigned char *ad,
unsigned char adlen,
const unsigned char *npub,
const unsigned char *k
);
extern int crypto_aead_decrypt_asm(
unsigned char *m,
const unsigned char *c,
unsigned char clen,
const unsigned char *ad,
unsigned char adlen,
const unsigned char *npub,
const unsigned char *k
);
extern void crypto_hash_asm(
unsigned char *out,
const unsigned char *in,
unsigned char inlen
);
int crypto_aead_encrypt(
unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k
)
{
/*
...
... the code for the cipher implementation goes here,
... generating a ciphertext c[0],c[1],...,c[*clen-1]
... from a plaintext m[0],m[1],...,m[mlen-1]
... and associated data ad[0],ad[1],...,ad[adlen-1]
... and nonce npub[0],npub[1],..
... and secret key k[0],k[1],...
... the implementation shall not use nsec
...
... return 0;
*/
(void)nsec;
crypto_aead_encrypt_asm(c, m, mlen, ad, adlen, npub, k);
*clen = mlen + TAG_INBYTES;
return 0;
}
int crypto_aead_decrypt(
unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k
)
{
/*
...
... the code for the AEAD implementation goes here,
... generating a plaintext m[0],m[1],...,m[*mlen-1]
... and secret message number nsec[0],nsec[1],...
... from a ciphertext c[0],c[1],...,c[clen-1]
... and associated data ad[0],ad[1],...,ad[adlen-1]
... and nonce number npub[0],npub[1],...
... and secret key k[0],k[1],...
...
... return 0;
*/
unsigned long long mlen_;
unsigned char tag_is_match;
(void)nsec;
if (clen < CRYPTO_ABYTES) {
return -1;
}
mlen_ = clen - CRYPTO_ABYTES;
tag_is_match = crypto_aead_decrypt_asm(m, c, mlen_, ad, adlen, npub, k);
if (tag_is_match != 0)
{
memset(m, 0, (size_t)mlen_);
return -1;
}
*mlen = mlen_;
return 0;
}
\ No newline at end of file
#include <avr/io.h>
#include <avr/sfr_defs.h>
#include <stdlib.h>
#include <string.h>
#include "api.h"
#include "crypto_hash.h"
extern void crypto_hash_asm(
unsigned char *out,
const unsigned char *in,
unsigned char inlen
);
int crypto_hash(
unsigned char *out,
const unsigned char *in,
unsigned long long inlen
)
{
/*
...
... the code for the hash function implementation goes here
... generating a hash value out[0],out[1],...,out[CRYPTO_BYTES-1]
... from a message in[0],in[1],...,in[in-1]
...
... return 0;
*/
crypto_hash_asm(out, in, inlen);
return 0;
}
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
#define x10 r0
#define x11 r1
#define x12 r2
#define x13 r3
#define x14 r4
#define x15 r5
#define x16 r6
#define x17 r7
; an intentionally arrangement of registers to facilitate movw
#define x20 r8
#define x21 r10
#define x22 r12
#define x23 r14
#define x24 r9
#define x25 r11
#define x26 r13
#define x27 r15
; an intentionally arrangement of registers to facilitate movw
#define x30 r16
#define x35 r18
#define x32 r20
#define x37 r22
#define x34 r17
#define x31 r19
#define x36 r21
#define x33 r23
#define t0j r24
#define t1j r25
#define x0j r27
#include "assist.h"
.macro Sbox i0, i1, i2, i3
mov t0j, \i1
com \i0
and \i1, \i0
eor \i1, \i2
or \i2, t0j
eor \i0, \i3
eor \i2, \i0
eor t0j, \i3
and \i0, \i1
eor \i3, \i1
eor \i0, t0j
and t0j, \i2
eor \i1, t0j
.endm
Permutation:
PUSH_CONFLICT
mov rcnt, rn
ldi YH, hi8(SRAM_STATE + ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + ROW_INBYTES)
ld x10, Y+
ld x11, Y+
ld x12, Y+
ld x13, Y+
ld x14, Y+
ld x15, Y+
ld x16, Y+
ld x17, Y+
ld x20, Y+
ld x21, Y+
ld x22, Y+
ld x23, Y+
ld x24, Y+
ld x25, Y+
ld x26, Y+
ld x27, Y+
ld x30, Y+
ld x31, Y+
ld x32, Y+
ld x33, Y+
ld x34, Y+
ld x35, Y+
ld x36, Y+
ld x37, Y+
#if defined(CRYPTO_AEAD) && defined(CRYPTO_HASH)
sbrc AEDH, 2 ; AEDH[2] = 0 for AEAD and AEDH[2] = 1 for HASH
rjmp For_Hash
For_AEAD:
ldi ZL, lo8(RC_LFSR6)
ldi ZH, hi8(RC_LFSR6)
rjmp round_loop_start
For_Hash:
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
#elif defined(CRYPTO_AEAD)
ldi ZL, lo8(RC_LFSR6)
ldi ZH, hi8(RC_LFSR6)
#else
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
#endif
round_loop_start:
; AddRC
lpm t0j, Z+
ldi YH, hi8(SRAM_STATE)
ldi YL, lo8(SRAM_STATE)
ld x0j, Y
eor x0j, t0j
; SubColumns
Sbox x0j, x10, x20, x30
st Y+, x0j
ld x0j, Y
Sbox x0j, x11, x21, x31
st Y+, x0j
ld x0j, Y
Sbox x0j, x12, x22, x32
st Y+, x0j
ld x0j, Y
Sbox x0j, x13, x23, x33
st Y+, x0j
ld x0j, Y
Sbox x0j, x14, x24, x34
st Y+, x0j
ld x0j, Y
Sbox x0j, x15, x25, x35
st Y+, x0j
ld x0j, Y
Sbox x0j, x16, x26, x36
st Y+, x0j
ld x0j, Y
Sbox x0j, x17, x27, x37
st Y, x0j
; ShiftRows
; <<< 1
mov t0j, x17
rol t0j
rol x10
rol x11
rol x12
rol x13
rol x14
rol x15
rol x16
rol x17
; <<< 8
; 7 6 5 4 3 2 1 0 => 6 5 4 3 2 1 0 7
;mov t0j, x27
;mov x27, x26
;mov x26, x25
;mov x25, x24
;mov x24, x23
;mov x23, x22
;mov x22, x21
;mov x21, x20
;mov x20, t0j
; an intentionally arrangement of registers to facilitate movw
movw t0j, x23 ; t1j:t0j <= x27:x23
movw x23, x22 ; x27:x23 <= x26:x22
movw x22, x21 ; x26:x22 <= x25:x21
movw x21, x20 ; x25:x21 <= x24:x20
mov x20, t1j ; x20 <= t1j
mov x24, t0j ; x24 <= t0j
; <<< 1
mov t0j, x37
rol t0j
rol x30
rol x31
rol x32
rol x33
rol x34
rol x35
rol x36
rol x37
; <<< 24
; 7 6 5 4 3 2 1 0 => 4 3 2 1 0 7 6 5
;mov t0j, x30
;mov x30, x35
;mov x35, x32
;mov x32, x37
;mov x37, x34
;mov x34, x31
;mov x31, x36
;mov x36, x33
;mov x33, t0j
; an intentionally arrangement of registers to facilitate movw
;x30 r16
;x35 r18
;x32 r20
;x37 r22
;x34 r17
;x31 r19
;x36 r21
;x33 r23
movw t0j, x30 ; t1j:t0j <= x34:x30
movw x30, x35 ; x34:x30 <= x31:x35
movw x35, x32 ; x31:x35 <= x36:x32
movw x32, x37 ; x36:x32 <= x33:x37
mov x37, t1j ; x37 <= x34
mov x33, t0j ; x33 <= x30
dec rcnt
breq round_loop_end
jmp round_loop_start
round_loop_end:
ldi YH, hi8(SRAM_STATE + ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + ROW_INBYTES)
st Y+, x10
st Y+, x11
st Y+, x12
st Y+, x13
st Y+, x14
st Y+, x15
st Y+, x16
st Y+, x17
st Y+, x20
st Y+, x21
st Y+, x22
st Y+, x23
st Y+, x24
st Y+, x25
st Y+, x26
st Y+, x27
st Y+, x30
st Y+, x31
st Y+, x32
st Y+, x33
st Y+, x34
st Y+, x35
st Y+, x36
st Y+, x37
POP_CONFLICT
ret
.section .text
#if defined(CRYPTO_AEAD) && defined(CRYPTO_HASH)
RC_LFSR6:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06
.byte 0x0c, 0x18, 0x31, 0x22, 0x05, 0x0a, 0x14, 0x29
.byte 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28
.byte 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24
.byte 0x09, 0x12, 0x25, 0x0b, 0x16, 0x2d, 0x1b, 0x37
.byte 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26
.byte 0x0d, 0x1a, 0x35, 0x2a, 0x15, 0x2b, 0x17, 0x2f
.byte 0x1f, 0x3f, 0x3e, 0x3c, 0x38, 0x30, 0x20, 0x00
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
#elif defined(CRYPTO_AEAD)
RC_LFSR6:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x21, 0x03, 0x06
.byte 0x0c, 0x18, 0x31, 0x22, 0x05, 0x0a, 0x14, 0x29
.byte 0x13, 0x27, 0x0f, 0x1e, 0x3d, 0x3a, 0x34, 0x28
.byte 0x11, 0x23, 0x07, 0x0e, 0x1c, 0x39, 0x32, 0x24
.byte 0x09, 0x12, 0x25, 0x0b, 0x16, 0x2d, 0x1b, 0x37
.byte 0x2e, 0x1d, 0x3b, 0x36, 0x2c, 0x19, 0x33, 0x26
.byte 0x0d, 0x1a, 0x35, 0x2a, 0x15, 0x2b, 0x17, 0x2f
.byte 0x1f, 0x3f, 0x3e, 0x3c, 0x38, 0x30, 0x20, 0x00
#else
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
#endif
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
; an intentionally arrangement of registers to facilitate movw
#define x20 r0
#define x21 r2
#define x22 r4
#define x23 r6
#define x24 r8
#define x25 r10
#define x26 r1
#define x27 r3
#define x28 r5
#define x29 r7
#define x2a r9
#define x2b r11
; an intentionally arrangement of registers to facilitate movw
#define x30 r22
#define x35 r20
#define x3a r18
#define x33 r16
#define x38 r14
#define x31 r12
#define x36 r23
#define x3b r21
#define x34 r19
#define x39 r17
#define x32 r15
#define x37 r13
#define t0j r24
#define t1j r25
#define x0j r25
#define x1j r27
#include "assist.h"
.macro Sbox i0, i1, i2, i3
ldi t0j, 0xFF
eor \i0, t0j
mov t0j, \i1
and \i1, \i0
eor \i1, \i2
or \i2, t0j
eor \i0, \i3
eor \i2, \i0
eor t0j, \i3
and \i0, \i1
eor \i3, \i1
eor \i0, t0j
and t0j, \i2
eor \i1, t0j
.endm
.macro OneColumn i0, i1, i2, i3
ld \i0, Y
ldd \i1, Y + ROW_INBYTES
Sbox \i0, \i1, \i2, \i3
st Y+, \i0
rol \i1 ; ShiftRows -- Row 1 <<< 1
std Y + ROW_INBYTES -1, \i1
.endm
Permutation:
PUSH_CONFLICT
mov rcnt, rn
ldi YH, hi8(SRAM_STATE + 2 * ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + 2 * ROW_INBYTES)
ld x20, Y+
ld x21, Y+
ld x22, Y+
ld x23, Y+
ld x24, Y+
ld x25, Y+
ld x26, Y+
ld x27, Y+
ld x28, Y+
ld x29, Y+
ld x2a, Y+
ld x2b, Y+
ld x30, Y+
ld x31, Y+
ld x32, Y+
ld x33, Y+
ld x34, Y+
ld x35, Y+
ld x36, Y+
ld x37, Y+
ld x38, Y+
ld x39, Y+
ld x3a, Y+
ld x3b, Y+
ldi ZL, lo8(RC_LFSR7)
ldi ZH, hi8(RC_LFSR7)
round_loop_start:
; AddRC
lpm t0j, Z+
ldi YH, hi8(SRAM_STATE)
ldi YL, lo8(SRAM_STATE)
ld x0j, Y
eor x0j, t0j
ldd x1j, Y + ROW_INBYTES
Sbox x0j, x1j, x20, x30
st Y+, x0j
lsl x1j ; ShiftRows -- Row 1 <<< 1
std Y + ROW_INBYTES -1, x1j
OneColumn x0j, x1j, x21, x31
OneColumn x0j, x1j, x22, x32
OneColumn x0j, x1j, x23, x33
OneColumn x0j, x1j, x24, x34
OneColumn x0j, x1j, x25, x35
OneColumn x0j, x1j, x26, x36
OneColumn x0j, x1j, x27, x37
OneColumn x0j, x1j, x28, x38
OneColumn x0j, x1j, x29, x39
OneColumn x0j, x1j, x2a, x3a
OneColumn x0j, x1j, x2b, x3b
ld x1j, Y
eor t0j, t0j
adc x1j, t0j
st Y, x1j
; b a 9 8 7 6 5 4 3 2 1 0
; -- -- -- -- -- -- -- -- -- -- -- x- 0
; -- -- -- -- -- -- -- -- -- -- -- x' 0
; -- -- -- -- -- -- -- -- -- -- x- -- 1
; -- -- -- -- x' -- -- -- -- -- -- -- 7
; 4 3 2 1 0 b a 9 8 7 6 5
; ShiftRows -- the last two rows
; <<< 8
; b a 9 8 7 6 5 4 3 2 1 0 => a 9 8 7 6 5 4 3 2 1 0 b
movw t0j, x25 ; t1j:t0j <= x2b:x25
movw x25, x24 ; x2b:x25 <= x2a:x24
movw x24, x23 ; x2a:x24 <= x29:x23
movw x23, x22 ; x29:x23 <= x28:x22
movw x22, x21 ; x28:x22 <= x27:x21
movw x21, x20 ; x27:x21 <= x26:x20
mov x26, t0j ; x26 <= x25
mov x20, t1j ; x20 <= x2b
; >>> 1
mov t0j, x3b
ror t0j
ror x3a
ror x39
ror x38
ror x37
ror x36
ror x35
ror x34
ror x33
ror x32
ror x31
ror x30
ror x3b
; <<< 56
; b a 9 8 7 6 5 4 3 2 1 0 => 4 3 2 1 0 b a 9 8 7 6 5
; mov x3j, x30
; mov x30, x35
; mov x35, x3a
; mov x3a, x33
; mov x33, x38
; mov x38, x31
; mov x31, x36
; mov x36, x3b
; mov x3b, x34
; mov x34, x39
; mov x39, x32
; mov x32, x37
; mov x37, x3j
; an intentionally arrangement of registers to facilitate movw
; x30 r22
; x35 r20
; x3a r18
; x33 r16
; x38 r14
; x31 r12
; x36 r23
; x3b r21
; x34 r19
; x39 r17
; x32 r15
; x37 r13
movw t0j, x30 ; t1j:t0j <= x36:x30
movw x30, x35 ; x36:x30 <= x3b:x35
movw x35, x3a ; x3b:x35 <= x34:x3a
movw x3a, x33 ; x34:x3a <= x39:x33
movw x33, x38 ; x39:x33 <= x32:x38
movw x38, x31 ; x32:x38 <= x37:x31
mov x31, t1j ; x31 <= x36
mov x37, t0j ; x37 <= x30
dec rcnt
breq round_loop_end
jmp round_loop_start
round_loop_end:
ldi YH, hi8(SRAM_STATE + 2 * ROW_INBYTES)
ldi YL, lo8(SRAM_STATE + 2 * ROW_INBYTES)
st Y+, x20
st Y+, x21
st Y+, x22
st Y+, x23
st Y+, x24
st Y+, x25
st Y+, x26
st Y+, x27
st Y+, x28
st Y+, x29
st Y+, x2a
st Y+, x2b
st Y+, x30
st Y+, x31
st Y+, x32
st Y+, x33
st Y+, x34
st Y+, x35
st Y+, x36
st Y+, x37
st Y+, x38
st Y+, x39
st Y+, x3a
st Y+, x3b
POP_CONFLICT
ret
RC_LFSR7:
.byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x41, 0x03
.byte 0x06, 0x0c, 0x18, 0x30, 0x61, 0x42, 0x05, 0x0a
.byte 0x14, 0x28, 0x51, 0x23, 0x47, 0x0f, 0x1e, 0x3c
.byte 0x79, 0x72, 0x64, 0x48, 0x11, 0x22, 0x45, 0x0b
.byte 0x16, 0x2c, 0x59, 0x33, 0x67, 0x4e, 0x1d, 0x3a
.byte 0x75, 0x6a, 0x54, 0x29, 0x53, 0x27, 0x4f, 0x1f
.byte 0x3e, 0x7d, 0x7a, 0x74, 0x68, 0x50, 0x21, 0x43
.byte 0x07, 0x0e, 0x1c, 0x38, 0x71, 0x62, 0x44, 0x09
.byte 0x12, 0x24, 0x49, 0x13, 0x26, 0x4d, 0x1b, 0x36
.byte 0x6d, 0x5a, 0x35, 0x6b, 0x56, 0x2d, 0x5b, 0x37
.byte 0x6f, 0x5e, 0x3d, 0x7b, 0x76, 0x6c, 0x58, 0x31
.byte 0x63, 0x46, 0x0d, 0x1a, 0x34, 0x69, 0x52, 0x25
.byte 0x4b, 0x17, 0x2e, 0x5d, 0x3b, 0x77, 0x6e, 0x5c
.byte 0x39, 0x73, 0x66, 0x4c, 0x19, 0x32, 0x65, 0x4a
.byte 0x15, 0x2a, 0x55, 0x2b, 0x57, 0x2f, 0x5f, 0x3f
.byte 0x7f, 0x7e, 0x7c, 0x78, 0x70, 0x60, 0x40, 0x00
\ No newline at end of file
;
; **********************************************
; * KNOT: a family of bit-slice lightweight *
; * authenticated encryption algorithms *
; * and hash functions *
; * *
; * Assembly implementation for 8-bit AVR CPU *
; * Version 1.1 2020 by KNOT Team *
; **********************************************
;
;
; ============================================
; R E G I S T E R D E F I N I T I O N S
; ============================================
;
#define mclen r16
#define radlen r17
#define tcnt r17
#define tmp0 r20
#define tmp1 r21
#define cnt0 r22
#define rn r23
#define rate r24
; AEDH = 0b000: for authenticate AD
; AEDH = 0b001: for encryption
; AEDH = 0b011: for decryption
; AEDH = 0b100: for hash
#define AEDH r25
#define rcnt r26
#if (STATE_INBITS==256)
#include "knot256.h"
#elif (STATE_INBITS==384)
#include "knot384.h"
#elif (STATE_INBITS==512)
#include "knot512.h"
#else
#error "Not specified key size and state size"
#endif
#define CRYPTO_BYTES 32
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment