/** DryGascon128 'v7m_fpu_x implementation' Sebastien Riou, May 27th 2020 Implementation optimized for ARM-Cortex-M7/M4/M3 (Size and Speed) Include protection against timing attack on X look up operations Note that implementation 'v7m_fpu' is faster and safe on all Cortex-M7 as of May 2020. */ #if defined(__DRYGASCON_ARM_SELECTOR_H__) .cpu cortex-m7 .syntax unified .code 16 .thumb_func .align 1 .global drygascon128_g_v7m_fpu_x .global drygascon128_f_v7m_fpu_x .global drygascon128_g0_v7m_fpu_x .equ C0, 0 .equ C1, C0+8 .equ C2, C0+16 .equ C3, C0+24 .equ C4, C0+32 .equ R0, 48 .equ R1, R0+8 .equ X0, 64 .equ X1, X0+8 .equ X0L, X0 .equ X1L, X1 .equ C0L, C0 .equ C1L, C1 .equ C2L, C2 .equ C3L, C3 .equ C4L, C4 .equ R0L, R0 .equ R1L, R1 .equ X0H, X0+4 .equ X1H, X1+4 .equ C0H, C0+4 .equ C1H, C1+4 .equ C2H, C2+4 .equ C3H, C3+4 .equ C4H, C4+4 .equ R0H, R0+4 .equ R1H, R1+4 .equ R32_0, R0L .equ R32_1, R0H .equ R32_2, R1L .equ R32_3, R1H .type drygascon128_g_v7m_fpu_x, %function drygascon128_g_v7m_fpu_x: //r0: state: c,r,x //r1: rounds push {r4, r5, r6, r7, r8, r9, r10, r11, r12, lr} //stack vars: // 8 round // 4 rounds (base address for lookups) // 0 state address //r=0 VSUB.F32 S10, S10, S10 VSUB.F32 S11, S11, S11 VSUB.F32 S12, S12, S12 VSUB.F32 S13, S13, S13 //round=r10=rounds-1; subs r11,r1,#1 //base = round_cst+12-rounds adr r10, round_cst adds r10,r10,#12 subs r10,r10,r1 push {r0,r10,r11} //Load C adds r14,r0,#C0 LDMIA.W r14,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} //loop entry //assume r11>0 at entry drygascon128_g_v7m_fpu_x_main_loop: //r0~r9: c //r10: base for round constants //r11: round, counting from rounds-1 to 0 //r11 = ((0xf - r11) << 4) | r11; ldrb r11,[r10,r11] //state: //r0 to r9: c //r11: constant to add as round constant //FPU: //s11 to s14: r // addition of round constant //C2L ^= round constant; eors r4,r4,r11 // substitution layer, lower half eors r0,r0,r8 eors r8,r8,r6 eors r4,r4,r2 mvns r10,r0 mvns r11,r6 mvns r12,r8 ands r10,r10,r2 ands r11,r11,r8 eors r8,r8,r10 ands r12,r12,r0 mvns r10,r4 ands r10,r10,r6 eors r6,r6,r12 mvns r12,r2 ands r12,r12,r4 eors r4,r4,r11 eors r6,r6,r4 mvns r4,r4 eors r0,r0,r12 eors r2,r2,r10 eors r2,r2,r0 eors r0,r0,r8 // substitution layer, upper half eors r1,r1,r9 eors r9,r9,r7 eors r5,r5,r3 mvns r10,r1 mvns r11,r7 mvns r12,r9 ands r10,r10,r3 ands r11,r11,r9 eors r9,r9,r10 ands r12,r12,r1 mvns r10,r5 ands r10,r10,r7 eors r7,r7,r12 mvns r12,r3 ands r12,r12,r5 eors r5,r5,r11 eors r7,r7,r5 mvns r5,r5 eors r1,r1,r12 eors r3,r3,r10 eors r3,r3,r1 eors r1,r1,r9 // linear diffusion layer //c4 ^= gascon_rotr64_interleaved(c4, 40) ^ gascon_rotr64_interleaved(c4, 7); //c4 high part rors r11,r9,#(20) eors r9,r11,r9 rors r10,r8,#(4) eors r9,r10,r9 //c4 low part rors r11,r11,#((32-20+3)%32) eors r11,r11,r8 rors r10,r8,#(20) eors r8,r10,r11 vmov r14,S11 //c0 ^= gascon_rotr64_interleaved(c0, 28) ^ gascon_rotr64_interleaved(c0, 19); //c0 high part rors r11,r1,#(14) eors r1,r11,r1 rors r10,r0,#(10) eors r1,r10,r1 //r14 is R32_1 eors r14,r14,r1 vmov r12,S10 //c0 low part rors r11,r11,#((32-14+9)%32) eors r11,r11,r0 rors r10,r0,#(14) eors r0,r10,r11 //r12 is R32_0 eors r12,r12,r0 //c2 ^= gascon_rotr64_interleaved(c2, 6) ^ gascon_rotr64_interleaved(c2, 1); //c2 high part rors r11,r5,#(3) eors r5,r11,r5 rors r10,r4,#(1) eors r5,r10,r5 //r12 is R32_0 eors r12,r12,r5 vmov S10,r12 vmov r12,S13 //c2 low part rors r11,r11,#((32-3+0)%32) eors r11,r11,r4 rors r10,r4,#(3) eors r4,r10,r11 //r12 is R32_3 eors r12,r12,r4 //c1 ^= gascon_rotr64_interleaved(c1, 38) ^ gascon_rotr64_interleaved(c1, 61); //c1 high part rors r11,r3,#(19) eors r3,r11,r3 rors r10,r2,#(31) eors r3,r10,r3 //r12 is R32_3 eors r12,r12,r3 vmov S13,r12 vmov r12,S12 //c1 low part rors r11,r11,#((32-19+30)%32) eors r11,r11,r2 rors r10,r2,#(19) eors r2,r10,r11 //r12 is R32_2 eors r12,r12,r2 //c3 ^= gascon_rotr64_interleaved(c3, 10) ^ gascon_rotr64_interleaved(c3, 17); //c3 high part rors r11,r7,#(5) eors r7,r11,r7 rors r10,r6,#(9) eors r7,r10,r7 //r12 is R32_2 eors r12,r12,r7 vmov S12,r12 //c3 low part rors r11,r11,#((32-5+8)%32) eors r11,r11,r6 rors r10,r6,#(5) eors r6,r10,r11 //r14 is R32_1 eors r14,r14,r6 vmov S11,r14 //state: //r0 to r9: c //r10,r11,r12 destroyed ldr r10,[sp,#4] ldr r11,[sp,#8] subs r11,#1 bmi drygascon128_g_v7m_fpu_x_exit str r11,[sp,#8] b drygascon128_g_v7m_fpu_x_main_loop drygascon128_g_v7m_fpu_x_exit: //update C ldr r14,[sp,#0] STMIA.W r14,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} //update R ldr r11,[sp,#0] adds r11,r11,#R0 VSTMIA.F32 r11, {S10,S11,S12,S13} add sp,sp,#12 pop {r4, r5, r6, r7, r8, r9, r10, r11, r12, pc} .size drygascon128_g_v7m_fpu_x, .-drygascon128_g_v7m_fpu_x .align 2 .type drygascon128_f_v7m_fpu_x, %function drygascon128_f_v7m_fpu_x: //r0:state //r1:input //r2:ds //r3:rounds push {r4, r5, r6, r7, r8, r9, r10, r11, r12, lr} //stack frame: //0: pointer on input //4: DS value //8 :pointer on state //12 : rounds for g //16 :mix round / g round movs r10,#0 //init of input bit counter push {r0,r3,r10} //make the same stack frame as drygascon128_g_cm7 push {r1,r2} //r=0 VSUB.F32 S10, S10, S10 VSUB.F32 S11, S11, S11 VSUB.F32 S12, S12, S12 VSUB.F32 S13, S13, S13 //Load C adds r11,r0,#C0 LDMIA.W r11,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} //Load X adds r11,#X0 VLDMIA.F32 r11, {s0,s1,s2,s3} drygascon128_f_v7m_fpu_x_mix128_main_loop: //r10 is input bit counter ldr r11,[sp,#0] //r11 is pointer on input //r10 r12 shift // 0 0 0 // 10 1 2 // 20 2 4 // 30 3 6 // 40 5 0 // 50 6 2 // 60 7 4 // 70 8 6 // 80 10 0 // 90 11 2 // 100 12 4 // 110 13 6 // 120 15 0 // 130 16 2 --> we do that operation for 2 last bits in a special last loop cmp r10,#120 bne drygascon128_f_v7m_fpu_x_mix128_main_loop.regular //we execute this only during the pen-ultimate operation //we add the 2 lsb from DS to r14 ldrb r14,[r11,#15] ldr r10,[sp,#4] lsl r10,r10,#8 eors r14,r14,r10 b drygascon128_f_v7m_fpu_x_mix128_main_loop.core drygascon128_f_v7m_fpu_x_mix128_main_loop.regular: //r12 is base byte: byte offset to read from input buffer lsr r12,r10,#3 //divide by 8 to get base byte //r10 becomes shift lsl r14,r12,#3 sub r10,r10,r14 ldr r14,[r11,r12] //M7 supports unalign access with ldr lsr r14,r14,r10 drygascon128_f_v7m_fpu_x_mix128_main_loop.core: tst r14,#2 VSELEQ.F64 D2, D0, D1 tst r14,#1 VSELEQ.F32 S6, S4, S5 VMOV r11,S6 eors r0,r0,r11 tst r14,#8 VSELEQ.F64 D2, D0, D1 tst r14,#4 VSELEQ.F32 S6, S4, S5 VMOV r11,S6 eors r2,r2,r11 tst r14,#32 VSELEQ.F64 D2, D0, D1 tst r14,#16 VSELEQ.F32 S6, S4, S5 VMOV r11,S6 eors r4,r4,r11 tst r14,#128 VSELEQ.F64 D2, D0, D1 tst r14,#64 VSELEQ.F32 S6, S4, S5 VMOV r11,S6 eors r6,r6,r11 tst r14,#512 VSELEQ.F64 D2, D0, D1 tst r14,#256 VSELEQ.F32 S6, S4, S5 VMOV r11,S6 eors r8,r8,r11 ldr r10,[sp,#16] adds r10,#10 cmp r10,#140 beq drygascon128_f_v7m_fpu_x_mix128_exit drygascon128_f_v7m_fpu_x_mix128_coreround: str r10,[sp,#16] movs r11,#0xf0 //state: //r0 to r9: c //r11: constant to add as round constant // addition of round constant //C2L ^= round constant; eors r4,r4,r11 // substitution layer, lower half eors r0,r0,r8 eors r8,r8,r6 eors r4,r4,r2 mvns r10,r0 mvns r11,r6 mvns r12,r8 ands r10,r10,r2 ands r11,r11,r8 eors r8,r8,r10 ands r12,r12,r0 mvns r10,r4 ands r10,r10,r6 eors r6,r6,r12 mvns r12,r2 ands r12,r12,r4 eors r4,r4,r11 eors r6,r6,r4 mvns r4,r4 eors r0,r0,r12 eors r2,r2,r10 eors r2,r2,r0 eors r0,r0,r8 // substitution layer, upper half eors r1,r1,r9 eors r9,r9,r7 eors r5,r5,r3 mvns r10,r1 mvns r11,r7 mvns r12,r9 ands r10,r10,r3 ands r11,r11,r9 eors r9,r9,r10 ands r12,r12,r1 mvns r10,r5 ands r10,r10,r7 eors r7,r7,r12 mvns r12,r3 ands r12,r12,r5 eors r5,r5,r11 eors r7,r7,r5 mvns r5,r5 eors r1,r1,r12 eors r3,r3,r10 eors r3,r3,r1 eors r1,r1,r9 // linear diffusion layer //c4 ^= gascon_rotr64_interleaved(c4, 40) ^ gascon_rotr64_interleaved(c4, 7); //c4 high part rors r11,r9,#(20) eors r9,r11,r9 rors r10,r8,#(4) eors r9,r10,r9 //c4 low part rors r11,r11,#((32-20+3)%32) eors r11,r11,r8 rors r10,r8,#(20) eors r8,r10,r11 //c0 ^= gascon_rotr64_interleaved(c0, 28) ^ gascon_rotr64_interleaved(c0, 19); //c0 high part rors r11,r1,#(14) eors r1,r11,r1 rors r10,r0,#(10) eors r1,r10,r1 //c0 low part rors r11,r11,#((32-14+9)%32) eors r11,r11,r0 rors r10,r0,#(14) eors r0,r10,r11 //c1 ^= gascon_rotr64_interleaved(c1, 38) ^ gascon_rotr64_interleaved(c1, 61); //c1 high part rors r11,r3,#(19) eors r3,r11,r3 rors r10,r2,#(31) eors r3,r10,r3 //c1 low part rors r11,r11,#((32-19+30)%32) eors r11,r11,r2 rors r10,r2,#(19) eors r2,r10,r11 //c2 ^= gascon_rotr64_interleaved(c2, 6) ^ gascon_rotr64_interleaved(c2, 1); //c2 high part rors r11,r5,#(3) eors r5,r11,r5 rors r10,r4,#(1) eors r5,r10,r5 //c2 low part rors r11,r11,#((32-3+0)%32) eors r11,r11,r4 rors r10,r4,#(3) eors r4,r10,r11 //c3 ^= gascon_rotr64_interleaved(c3, 10) ^ gascon_rotr64_interleaved(c3, 17); //c3 high part rors r11,r7,#(5) eors r7,r11,r7 rors r10,r6,#(9) eors r7,r10,r7 //c3 low part rors r11,r11,#((32-5+8)%32) eors r11,r11,r6 rors r10,r6,#(5) eors r6,r10,r11 //state: //r0 to r9: c //r10,r11,r12 destroyed ldr r10,[sp,#16] cmp r10,#130 bne drygascon128_f_v7m_fpu_x_mix128_main_loop //prepare the last loop: load DS 2 msb ldr r14,[sp,4] lsr r14,r14,#2 b drygascon128_f_v7m_fpu_x_mix128_main_loop.core drygascon128_f_v7m_fpu_x_mix128_exit: ldr r14,[sp,#12] //round=r10=rounds-1; subs r11,r14,#1 //base = round_cst+12-rounds adr r10, round_cst adds r10,r10,#12 subs r10,r10,r14 str r10,[sp,#12] str r11,[sp,#16] add sp,sp,#8 b drygascon128_g_v7m_fpu_x_main_loop .align 2 round_cst: .byte 0x4b .byte 0x5a .byte 0x69 .byte 0x78 .byte 0x87 .byte 0x96 .byte 0xa5 .byte 0xb4 .byte 0xc3 .byte 0xd2 .byte 0xe1 .byte 0xf0 .align 2 .size drygascon128_f_v7m_fpu_x, .-drygascon128_f_v7m_fpu_x .type drygascon128_g0_v7m_fpu_x, %function drygascon128_g0_v7m_fpu_x: //perform a single round without accumulate //r0: state push {r4, r5, r6, r7, r8, r9, r10, r11, r12, lr} //Load C adds r14,r0,#C0 LDMIA.W r14,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} //r0~r9: c //r11 = ((0xf - 0) << 4) | 0; movs r11,#0xf0 //state: //r0 to r9: c //r11: constant to add as round constant // addition of round constant //C2L ^= round constant; eors r4,r4,r11 // substitution layer, lower half eors r0,r0,r8 eors r8,r8,r6 eors r4,r4,r2 mvns r10,r0 mvns r11,r6 mvns r12,r8 ands r10,r10,r2 ands r11,r11,r8 eors r8,r8,r10 ands r12,r12,r0 mvns r10,r4 ands r10,r10,r6 eors r6,r6,r12 mvns r12,r2 ands r12,r12,r4 eors r4,r4,r11 eors r6,r6,r4 mvns r4,r4 eors r0,r0,r12 eors r2,r2,r10 eors r2,r2,r0 eors r0,r0,r8 // substitution layer, upper half eors r1,r1,r9 eors r9,r9,r7 eors r5,r5,r3 mvns r10,r1 mvns r11,r7 mvns r12,r9 ands r10,r10,r3 ands r11,r11,r9 eors r9,r9,r10 ands r12,r12,r1 mvns r10,r5 ands r10,r10,r7 eors r7,r7,r12 mvns r12,r3 ands r12,r12,r5 eors r5,r5,r11 eors r7,r7,r5 mvns r5,r5 eors r1,r1,r12 eors r3,r3,r10 eors r3,r3,r1 eors r1,r1,r9 // linear diffusion layer //c4 ^= gascon_rotr64_interleaved(c4, 40) ^ gascon_rotr64_interleaved(c4, 7); //c4 high part rors r11,r9,#(20) eors r9,r11,r9 rors r10,r8,#(4) eors r9,r10,r9 //c4 low part rors r11,r11,#((32-20+3)%32) eors r11,r11,r8 rors r10,r8,#(20) eors r8,r10,r11 //c0 ^= gascon_rotr64_interleaved(c0, 28) ^ gascon_rotr64_interleaved(c0, 19); //c0 high part rors r11,r1,#(14) eors r1,r11,r1 rors r10,r0,#(10) eors r1,r10,r1 //c0 low part rors r11,r11,#((32-14+9)%32) eors r11,r11,r0 rors r10,r0,#(14) eors r0,r10,r11 //c1 ^= gascon_rotr64_interleaved(c1, 38) ^ gascon_rotr64_interleaved(c1, 61); //c1 high part rors r11,r3,#(19) eors r3,r11,r3 rors r10,r2,#(31) eors r3,r10,r3 //c1 low part rors r11,r11,#((32-19+30)%32) eors r11,r11,r2 rors r10,r2,#(19) eors r2,r10,r11 //c2 ^= gascon_rotr64_interleaved(c2, 6) ^ gascon_rotr64_interleaved(c2, 1); //c2 high part rors r11,r5,#(3) eors r5,r11,r5 rors r10,r4,#(1) eors r5,r10,r5 //c2 low part rors r11,r11,#((32-3+0)%32) eors r11,r11,r4 rors r10,r4,#(3) eors r4,r10,r11 //c3 ^= gascon_rotr64_interleaved(c3, 10) ^ gascon_rotr64_interleaved(c3, 17); //c3 high part rors r11,r7,#(5) eors r7,r11,r7 rors r10,r6,#(9) eors r7,r10,r7 //c3 low part rors r11,r11,#((32-5+8)%32) eors r11,r11,r6 rors r10,r6,#(5) eors r6,r10,r11 //state: //r0 to r9: c //r10,r11,r12 destroyed //update C STMIA.W r14,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} pop {r4, r5, r6, r7, r8, r9, r10, r11, r12, pc} .size drygascon128_g0_v7m_fpu_x, .-drygascon128_g0_v7m_fpu_x #endif