diff --git a/wolfcrypt/src/port/arm/armv8-aes-asm.S b/wolfcrypt/src/port/arm/armv8-aes-asm.S index 431dbb1dde..f0fd188a89 100644 --- a/wolfcrypt/src/port/arm/armv8-aes-asm.S +++ b/wolfcrypt/src/port/arm/armv8-aes-asm.S @@ -43294,14 +43294,14 @@ _AES_set_encrypt_key_NEON: add x4, x4, :lo12:L_AES_ARM64_NEON_rcon #else adrp x4, L_AES_ARM64_NEON_rcon@PAGE - add x4, x4, :lo12:L_AES_ARM64_NEON_rcon@PAGEOFF + add x4, x4, L_AES_ARM64_NEON_rcon@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x5, L_AES_ARM64_NEON_te add x5, x5, :lo12:L_AES_ARM64_NEON_te #else adrp x5, L_AES_ARM64_NEON_te@PAGE - add x5, x5, :lo12:L_AES_ARM64_NEON_te@PAGEOFF + add x5, x5, L_AES_ARM64_NEON_te@PAGEOFF #endif /* __APPLE__ */ ld1 {v6.16b, v7.16b, v8.16b, v9.16b}, [x5], #0x40 ld1 {v10.16b, v11.16b, v12.16b, v13.16b}, [x5], #0x40 @@ -43543,14 +43543,14 @@ _AES_ECB_encrypt_NEON: add x5, x5, :lo12:L_AES_ARM64_NEON_te #else adrp x5, L_AES_ARM64_NEON_te@PAGE - add x5, x5, :lo12:L_AES_ARM64_NEON_te@PAGEOFF + add x5, x5, L_AES_ARM64_NEON_te@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x6, L_AES_ARM64_NEON_shift_rows_shuffle add x6, x6, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle #else adrp x6, L_AES_ARM64_NEON_shift_rows_shuffle@PAGE - add x6, x6, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF + add x6, x6, L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF #endif /* __APPLE__ */ ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x5], #0x40 ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x5], #0x40 @@ -44324,14 +44324,14 @@ _AES_CBC_encrypt_NEON: add x6, x6, :lo12:L_AES_ARM64_NEON_te #else adrp x6, L_AES_ARM64_NEON_te@PAGE - add x6, x6, :lo12:L_AES_ARM64_NEON_te@PAGEOFF + add x6, x6, L_AES_ARM64_NEON_te@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x7, L_AES_ARM64_NEON_shift_rows_shuffle add x7, x7, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle #else adrp x7, L_AES_ARM64_NEON_shift_rows_shuffle@PAGE - add x7, x7, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF + add x7, x7, L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF #endif /* __APPLE__ */ ld1 {v10.16b, v11.16b, v12.16b, v13.16b}, [x6], #0x40 ld1 {v14.16b, v15.16b, v16.16b, v17.16b}, [x6], #0x40 @@ -44489,14 +44489,14 @@ _AES_CTR_encrypt_NEON: add x6, x6, :lo12:L_AES_ARM64_NEON_te #else adrp x6, L_AES_ARM64_NEON_te@PAGE - add x6, x6, :lo12:L_AES_ARM64_NEON_te@PAGEOFF + add x6, x6, L_AES_ARM64_NEON_te@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x7, L_AES_ARM64_NEON_shift_rows_shuffle add x7, x7, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle #else adrp x7, L_AES_ARM64_NEON_shift_rows_shuffle@PAGE - add x7, x7, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF + add x7, x7, L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF #endif /* __APPLE__ */ ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x6], #0x40 ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x6], #0x40 @@ -45391,14 +45391,14 @@ _AES_ECB_decrypt_NEON: add x5, x5, :lo12:L_AES_ARM64_NEON_td #else adrp x5, L_AES_ARM64_NEON_td@PAGE - add x5, x5, :lo12:L_AES_ARM64_NEON_td@PAGEOFF + add x5, x5, L_AES_ARM64_NEON_td@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x6, L_AES_ARM64_NEON_shift_rows_invshuffle add x6, x6, :lo12:L_AES_ARM64_NEON_shift_rows_invshuffle #else adrp x6, L_AES_ARM64_NEON_shift_rows_invshuffle@PAGE - add x6, x6, :lo12:L_AES_ARM64_NEON_shift_rows_invshuffle@PAGEOFF + add x6, x6, L_AES_ARM64_NEON_shift_rows_invshuffle@PAGEOFF #endif /* __APPLE__ */ ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x5], #0x40 ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x5], #0x40 @@ -46441,14 +46441,14 @@ _AES_CBC_decrypt_NEON: add x6, x6, :lo12:L_AES_ARM64_NEON_td #else adrp x6, L_AES_ARM64_NEON_td@PAGE - add x6, x6, :lo12:L_AES_ARM64_NEON_td@PAGEOFF + add x6, x6, L_AES_ARM64_NEON_td@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x7, L_AES_ARM64_NEON_shift_rows_invshuffle add x7, x7, :lo12:L_AES_ARM64_NEON_shift_rows_invshuffle #else adrp x7, L_AES_ARM64_NEON_shift_rows_invshuffle@PAGE - add x7, x7, :lo12:L_AES_ARM64_NEON_shift_rows_invshuffle@PAGEOFF + add x7, x7, L_AES_ARM64_NEON_shift_rows_invshuffle@PAGEOFF #endif /* __APPLE__ */ ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x6], #0x40 ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x6], #0x40 @@ -47857,14 +47857,14 @@ _AES_GCM_encrypt_NEON: add x9, x9, :lo12:L_AES_ARM64_NEON_te #else adrp x9, L_AES_ARM64_NEON_te@PAGE - add x9, x9, :lo12:L_AES_ARM64_NEON_te@PAGEOFF + add x9, x9, L_AES_ARM64_NEON_te@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x10, L_AES_ARM64_NEON_shift_rows_shuffle add x10, x10, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle #else adrp x10, L_AES_ARM64_NEON_shift_rows_shuffle@PAGE - add x10, x10, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF + add x10, x10, L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF #endif /* __APPLE__ */ ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x9], #0x40 ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x9], #0x40 @@ -48666,14 +48666,14 @@ _AES_XTS_encrypt_NEON: add x19, x19, :lo12:L_AES_ARM64_NEON_te #else adrp x19, L_AES_ARM64_NEON_te@PAGE - add x19, x19, :lo12:L_AES_ARM64_NEON_te@PAGEOFF + add x19, x19, L_AES_ARM64_NEON_te@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x20, L_AES_ARM64_NEON_shift_rows_shuffle add x20, x20, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle #else adrp x20, L_AES_ARM64_NEON_shift_rows_shuffle@PAGE - add x20, x20, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF + add x20, x20, L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF #endif /* __APPLE__ */ ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x19], #0x40 ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x19], #0x40 @@ -49749,28 +49749,28 @@ _AES_XTS_decrypt_NEON: add x20, x20, :lo12:L_AES_ARM64_NEON_te #else adrp x20, L_AES_ARM64_NEON_te@PAGE - add x20, x20, :lo12:L_AES_ARM64_NEON_te@PAGEOFF + add x20, x20, L_AES_ARM64_NEON_te@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x21, L_AES_ARM64_NEON_td add x21, x21, :lo12:L_AES_ARM64_NEON_td #else adrp x21, L_AES_ARM64_NEON_td@PAGE - add x21, x21, :lo12:L_AES_ARM64_NEON_td@PAGEOFF + add x21, x21, L_AES_ARM64_NEON_td@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x22, L_AES_ARM64_NEON_shift_rows_shuffle add x22, x22, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle #else adrp x22, L_AES_ARM64_NEON_shift_rows_shuffle@PAGE - add x22, x22, :lo12:L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF + add x22, x22, L_AES_ARM64_NEON_shift_rows_shuffle@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x23, L_AES_ARM64_NEON_shift_rows_invshuffle add x23, x23, :lo12:L_AES_ARM64_NEON_shift_rows_invshuffle #else adrp x23, L_AES_ARM64_NEON_shift_rows_invshuffle@PAGE - add x23, x23, :lo12:L_AES_ARM64_NEON_shift_rows_invshuffle@PAGEOFF + add x23, x23, L_AES_ARM64_NEON_shift_rows_invshuffle@PAGEOFF #endif /* __APPLE__ */ ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x20], #0x40 ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x20], #0x40 @@ -51851,14 +51851,14 @@ _AES_invert_key: add x2, x2, :lo12:L_AES_ARM64_te #else adrp x2, L_AES_ARM64_te@PAGE - add x2, x2, :lo12:L_AES_ARM64_te@PAGEOFF + add x2, x2, L_AES_ARM64_te@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x3, L_AES_ARM64_td add x3, x3, :lo12:L_AES_ARM64_td #else adrp x3, L_AES_ARM64_td@PAGE - add x3, x3, :lo12:L_AES_ARM64_td@PAGEOFF + add x3, x3, L_AES_ARM64_td@PAGEOFF #endif /* __APPLE__ */ add x12, x0, x1, lsl 4 mov w13, w1 @@ -52008,14 +52008,14 @@ _AES_set_encrypt_key: add x5, x5, :lo12:L_AES_ARM64_rcon #else adrp x5, L_AES_ARM64_rcon@PAGE - add x5, x5, :lo12:L_AES_ARM64_rcon@PAGEOFF + add x5, x5, L_AES_ARM64_rcon@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x12, L_AES_ARM64_te add x12, x12, :lo12:L_AES_ARM64_te #else adrp x12, L_AES_ARM64_te@PAGE - add x12, x12, :lo12:L_AES_ARM64_te@PAGEOFF + add x12, x12, L_AES_ARM64_te@PAGEOFF #endif /* __APPLE__ */ cmp x1, #0x80 beq L_AES_set_encrypt_key_start_128 @@ -52271,7 +52271,7 @@ _AES_ECB_encrypt: add x5, x5, :lo12:L_AES_ARM64_te #else adrp x5, L_AES_ARM64_te@PAGE - add x5, x5, :lo12:L_AES_ARM64_te@PAGEOFF + add x5, x5, L_AES_ARM64_te@PAGEOFF #endif /* __APPLE__ */ L_AES_ECB_encrypt_loop_block_128: mov x17, x3 @@ -52604,7 +52604,7 @@ _AES_CBC_encrypt: add x6, x6, :lo12:L_AES_ARM64_te #else adrp x6, L_AES_ARM64_te@PAGE - add x6, x6, :lo12:L_AES_ARM64_te@PAGEOFF + add x6, x6, L_AES_ARM64_te@PAGEOFF #endif /* __APPLE__ */ ldp x7, x8, [x5] L_AES_CBC_encrypt_loop_block: @@ -52941,7 +52941,7 @@ _AES_CTR_encrypt: add x6, x6, :lo12:L_AES_ARM64_te #else adrp x6, L_AES_ARM64_te@PAGE - add x6, x6, :lo12:L_AES_ARM64_te@PAGEOFF + add x6, x6, L_AES_ARM64_te@PAGEOFF #endif /* __APPLE__ */ ldp x15, x16, [x5] rev32 x15, x15 @@ -53335,14 +53335,14 @@ _AES_ECB_decrypt: add x5, x5, :lo12:L_AES_ARM64_td #else adrp x5, L_AES_ARM64_td@PAGE - add x5, x5, :lo12:L_AES_ARM64_td@PAGEOFF + add x5, x5, L_AES_ARM64_td@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x6, L_AES_ARM64_td4 add x6, x6, :lo12:L_AES_ARM64_td4 #else adrp x6, L_AES_ARM64_td4@PAGE - add x6, x6, :lo12:L_AES_ARM64_td4@PAGEOFF + add x6, x6, L_AES_ARM64_td4@PAGEOFF #endif /* __APPLE__ */ L_AES_ECB_decrypt_loop_block: mov x19, x3 @@ -53659,14 +53659,14 @@ _AES_CBC_decrypt: add x6, x6, :lo12:L_AES_ARM64_td4 #else adrp x6, L_AES_ARM64_td4@PAGE - add x6, x6, :lo12:L_AES_ARM64_td4@PAGEOFF + add x6, x6, L_AES_ARM64_td4@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x7, L_AES_ARM64_td add x7, x7, :lo12:L_AES_ARM64_td #else adrp x7, L_AES_ARM64_td@PAGE - add x7, x7, :lo12:L_AES_ARM64_td@PAGEOFF + add x7, x7, L_AES_ARM64_td@PAGEOFF #endif /* __APPLE__ */ L_AES_CBC_decrypt_loop_block: mov x20, x3 @@ -54327,7 +54327,7 @@ _GCM_gmult_len: add x10, x10, :lo12:L_GCM_gmult_len_r #else adrp x10, L_GCM_gmult_len_r@PAGE - add x10, x10, :lo12:L_GCM_gmult_len_r@PAGEOFF + add x10, x10, L_GCM_gmult_len_r@PAGEOFF #endif /* __APPLE__ */ L_GCM_gmult_len_start_block: ldp x4, x5, [x0] @@ -54754,7 +54754,7 @@ _AES_GCM_encrypt: add x19, x19, :lo12:L_AES_ARM64_te #else adrp x19, L_AES_ARM64_te@PAGE - add x19, x19, :lo12:L_AES_ARM64_te@PAGEOFF + add x19, x19, L_AES_ARM64_te@PAGEOFF #endif /* __APPLE__ */ ldp x16, x17, [x5] rev32 x16, x16 @@ -55100,7 +55100,7 @@ _AES_XTS_encrypt: add x8, x8, :lo12:L_AES_ARM64_te #else adrp x8, L_AES_ARM64_te@PAGE - add x8, x8, :lo12:L_AES_ARM64_te@PAGEOFF + add x8, x8, L_AES_ARM64_te@PAGEOFF #endif /* __APPLE__ */ mov x9, #0x87 mov x26, x5 @@ -56056,21 +56056,21 @@ _AES_XTS_decrypt: add x8, x8, :lo12:L_AES_ARM64_td #else adrp x8, L_AES_ARM64_td@PAGE - add x8, x8, :lo12:L_AES_ARM64_td@PAGEOFF + add x8, x8, L_AES_ARM64_td@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x9, L_AES_ARM64_td4 add x9, x9, :lo12:L_AES_ARM64_td4 #else adrp x9, L_AES_ARM64_td4@PAGE - add x9, x9, :lo12:L_AES_ARM64_td4@PAGEOFF + add x9, x9, L_AES_ARM64_td4@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x10, L_AES_ARM64_te add x10, x10, :lo12:L_AES_ARM64_te #else adrp x10, L_AES_ARM64_te@PAGE - add x10, x10, :lo12:L_AES_ARM64_te@PAGEOFF + add x10, x10, L_AES_ARM64_te@PAGEOFF #endif /* __APPLE__ */ ands w11, w2, #15 cset w11, ne diff --git a/wolfcrypt/src/port/arm/armv8-chacha-asm.S b/wolfcrypt/src/port/arm/armv8-chacha-asm.S index 8a5481125a..94acf28712 100644 --- a/wolfcrypt/src/port/arm/armv8-chacha-asm.S +++ b/wolfcrypt/src/port/arm/armv8-chacha-asm.S @@ -95,14 +95,14 @@ _wc_chacha_crypt_bytes: add x5, x5, :lo12:L_chacha20_arm64_rol8 #else adrp x5, L_chacha20_arm64_rol8@PAGE - add x5, x5, :lo12:L_chacha20_arm64_rol8@PAGEOFF + add x5, x5, L_chacha20_arm64_rol8@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x6, L_chacha20_arm64_ctr add x6, x6, :lo12:L_chacha20_arm64_ctr #else adrp x6, L_chacha20_arm64_ctr@PAGE - add x6, x6, :lo12:L_chacha20_arm64_ctr@PAGEOFF + add x6, x6, L_chacha20_arm64_ctr@PAGEOFF #endif /* __APPLE__ */ eor v29.16b, v29.16b, v29.16b mov x26, #5 @@ -1134,7 +1134,7 @@ _wc_chacha_setkey: add x3, x3, :lo12:L_chacha_setkey_arm64_constant #else adrp x3, L_chacha_setkey_arm64_constant@PAGE - add x3, x3, :lo12:L_chacha_setkey_arm64_constant@PAGEOFF + add x3, x3, L_chacha_setkey_arm64_constant@PAGEOFF #endif /* __APPLE__ */ subs x2, x2, #16 add x3, x3, x2 diff --git a/wolfcrypt/src/port/arm/armv8-mlkem-asm.S b/wolfcrypt/src/port/arm/armv8-mlkem-asm.S index a45475c9f1..1ded4afde5 100644 --- a/wolfcrypt/src/port/arm/armv8-mlkem-asm.S +++ b/wolfcrypt/src/port/arm/armv8-mlkem-asm.S @@ -168,21 +168,21 @@ _mlkem_ntt: add x2, x2, :lo12:L_mlkem_aarch64_zetas #else adrp x2, L_mlkem_aarch64_zetas@PAGE - add x2, x2, :lo12:L_mlkem_aarch64_zetas@PAGEOFF + add x2, x2, L_mlkem_aarch64_zetas@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x3, L_mlkem_aarch64_zetas_qinv add x3, x3, :lo12:L_mlkem_aarch64_zetas_qinv #else adrp x3, L_mlkem_aarch64_zetas_qinv@PAGE - add x3, x3, :lo12:L_mlkem_aarch64_zetas_qinv@PAGEOFF + add x3, x3, L_mlkem_aarch64_zetas_qinv@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x4, L_mlkem_aarch64_consts add x4, x4, :lo12:L_mlkem_aarch64_consts #else adrp x4, L_mlkem_aarch64_consts@PAGE - add x4, x4, :lo12:L_mlkem_aarch64_consts@PAGEOFF + add x4, x4, L_mlkem_aarch64_consts@PAGEOFF #endif /* __APPLE__ */ add x1, x0, #0x100 ldr q4, [x4] @@ -1562,21 +1562,21 @@ _mlkem_invntt: add x2, x2, :lo12:L_mlkem_aarch64_zetas_inv #else adrp x2, L_mlkem_aarch64_zetas_inv@PAGE - add x2, x2, :lo12:L_mlkem_aarch64_zetas_inv@PAGEOFF + add x2, x2, L_mlkem_aarch64_zetas_inv@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x3, L_mlkem_aarch64_zetas_inv_qinv add x3, x3, :lo12:L_mlkem_aarch64_zetas_inv_qinv #else adrp x3, L_mlkem_aarch64_zetas_inv_qinv@PAGE - add x3, x3, :lo12:L_mlkem_aarch64_zetas_inv_qinv@PAGEOFF + add x3, x3, L_mlkem_aarch64_zetas_inv_qinv@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x4, L_mlkem_aarch64_consts add x4, x4, :lo12:L_mlkem_aarch64_consts #else adrp x4, L_mlkem_aarch64_consts@PAGE - add x4, x4, :lo12:L_mlkem_aarch64_consts@PAGEOFF + add x4, x4, L_mlkem_aarch64_consts@PAGEOFF #endif /* __APPLE__ */ add x1, x0, #0x100 ldr q8, [x4] @@ -3013,21 +3013,21 @@ _mlkem_ntt_sqrdmlsh: add x2, x2, :lo12:L_mlkem_aarch64_zetas #else adrp x2, L_mlkem_aarch64_zetas@PAGE - add x2, x2, :lo12:L_mlkem_aarch64_zetas@PAGEOFF + add x2, x2, L_mlkem_aarch64_zetas@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x3, L_mlkem_aarch64_zetas_qinv add x3, x3, :lo12:L_mlkem_aarch64_zetas_qinv #else adrp x3, L_mlkem_aarch64_zetas_qinv@PAGE - add x3, x3, :lo12:L_mlkem_aarch64_zetas_qinv@PAGEOFF + add x3, x3, L_mlkem_aarch64_zetas_qinv@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x4, L_mlkem_aarch64_consts add x4, x4, :lo12:L_mlkem_aarch64_consts #else adrp x4, L_mlkem_aarch64_consts@PAGE - add x4, x4, :lo12:L_mlkem_aarch64_consts@PAGEOFF + add x4, x4, L_mlkem_aarch64_consts@PAGEOFF #endif /* __APPLE__ */ add x1, x0, #0x100 ldr q4, [x4] @@ -4195,21 +4195,21 @@ _mlkem_invntt_sqrdmlsh: add x2, x2, :lo12:L_mlkem_aarch64_zetas_inv #else adrp x2, L_mlkem_aarch64_zetas_inv@PAGE - add x2, x2, :lo12:L_mlkem_aarch64_zetas_inv@PAGEOFF + add x2, x2, L_mlkem_aarch64_zetas_inv@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x3, L_mlkem_aarch64_zetas_inv_qinv add x3, x3, :lo12:L_mlkem_aarch64_zetas_inv_qinv #else adrp x3, L_mlkem_aarch64_zetas_inv_qinv@PAGE - add x3, x3, :lo12:L_mlkem_aarch64_zetas_inv_qinv@PAGEOFF + add x3, x3, L_mlkem_aarch64_zetas_inv_qinv@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x4, L_mlkem_aarch64_consts add x4, x4, :lo12:L_mlkem_aarch64_consts #else adrp x4, L_mlkem_aarch64_consts@PAGE - add x4, x4, :lo12:L_mlkem_aarch64_consts@PAGEOFF + add x4, x4, L_mlkem_aarch64_consts@PAGEOFF #endif /* __APPLE__ */ add x1, x0, #0x100 ldr q8, [x4] @@ -5532,14 +5532,14 @@ _mlkem_basemul_mont: add x3, x3, :lo12:L_mlkem_aarch64_zetas_mul #else adrp x3, L_mlkem_aarch64_zetas_mul@PAGE - add x3, x3, :lo12:L_mlkem_aarch64_zetas_mul@PAGEOFF + add x3, x3, L_mlkem_aarch64_zetas_mul@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x4, L_mlkem_aarch64_consts add x4, x4, :lo12:L_mlkem_aarch64_consts #else adrp x4, L_mlkem_aarch64_consts@PAGE - add x4, x4, :lo12:L_mlkem_aarch64_consts@PAGEOFF + add x4, x4, L_mlkem_aarch64_consts@PAGEOFF #endif /* __APPLE__ */ ldr q1, [x4] ldp q2, q3, [x1] @@ -6230,14 +6230,14 @@ _mlkem_basemul_mont_add: add x3, x3, :lo12:L_mlkem_aarch64_zetas_mul #else adrp x3, L_mlkem_aarch64_zetas_mul@PAGE - add x3, x3, :lo12:L_mlkem_aarch64_zetas_mul@PAGEOFF + add x3, x3, L_mlkem_aarch64_zetas_mul@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x4, L_mlkem_aarch64_consts add x4, x4, :lo12:L_mlkem_aarch64_consts #else adrp x4, L_mlkem_aarch64_consts@PAGE - add x4, x4, :lo12:L_mlkem_aarch64_consts@PAGEOFF + add x4, x4, L_mlkem_aarch64_consts@PAGEOFF #endif /* __APPLE__ */ ldr q1, [x4] ldp q2, q3, [x1] @@ -6991,7 +6991,7 @@ _mlkem_csubq_neon: add x1, x1, :lo12:L_mlkem_aarch64_q #else adrp x1, L_mlkem_aarch64_q@PAGE - add x1, x1, :lo12:L_mlkem_aarch64_q@PAGEOFF + add x1, x1, L_mlkem_aarch64_q@PAGEOFF #endif /* __APPLE__ */ ldr q20, [x1] ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #0x40 @@ -7172,7 +7172,7 @@ _mlkem_add_reduce: add x2, x2, :lo12:L_mlkem_aarch64_consts #else adrp x2, L_mlkem_aarch64_consts@PAGE - add x2, x2, :lo12:L_mlkem_aarch64_consts@PAGEOFF + add x2, x2, L_mlkem_aarch64_consts@PAGEOFF #endif /* __APPLE__ */ ldr q0, [x2] ld4 {v1.8h, v2.8h, v3.8h, v4.8h}, [x0], #0x40 @@ -7363,7 +7363,7 @@ _mlkem_add3_reduce: add x3, x3, :lo12:L_mlkem_aarch64_consts #else adrp x3, L_mlkem_aarch64_consts@PAGE - add x3, x3, :lo12:L_mlkem_aarch64_consts@PAGEOFF + add x3, x3, L_mlkem_aarch64_consts@PAGEOFF #endif /* __APPLE__ */ ldr q0, [x3] ld4 {v1.8h, v2.8h, v3.8h, v4.8h}, [x0], #0x40 @@ -7594,7 +7594,7 @@ _mlkem_rsub_reduce: add x2, x2, :lo12:L_mlkem_aarch64_consts #else adrp x2, L_mlkem_aarch64_consts@PAGE - add x2, x2, :lo12:L_mlkem_aarch64_consts@PAGEOFF + add x2, x2, L_mlkem_aarch64_consts@PAGEOFF #endif /* __APPLE__ */ ldr q0, [x2] ld4 {v1.8h, v2.8h, v3.8h, v4.8h}, [x0], #0x40 @@ -7785,7 +7785,7 @@ _mlkem_to_mont: add x1, x1, :lo12:L_mlkem_aarch64_consts #else adrp x1, L_mlkem_aarch64_consts@PAGE - add x1, x1, :lo12:L_mlkem_aarch64_consts@PAGEOFF + add x1, x1, L_mlkem_aarch64_consts@PAGEOFF #endif /* __APPLE__ */ ldr q0, [x1] ld4 {v1.8h, v2.8h, v3.8h, v4.8h}, [x0], #0x40 @@ -7999,7 +7999,7 @@ _mlkem_to_mont_sqrdmlsh: add x1, x1, :lo12:L_mlkem_aarch64_consts #else adrp x1, L_mlkem_aarch64_consts@PAGE - add x1, x1, :lo12:L_mlkem_aarch64_consts@PAGEOFF + add x1, x1, L_mlkem_aarch64_consts@PAGEOFF #endif /* __APPLE__ */ ldr q0, [x1] ld4 {v1.8h, v2.8h, v3.8h, v4.8h}, [x0], #0x40 @@ -8226,21 +8226,21 @@ _mlkem_to_msg_neon: add x2, x2, :lo12:L_mlkem_to_msg_low #else adrp x2, L_mlkem_to_msg_low@PAGE - add x2, x2, :lo12:L_mlkem_to_msg_low@PAGEOFF + add x2, x2, L_mlkem_to_msg_low@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x3, L_mlkem_to_msg_high add x3, x3, :lo12:L_mlkem_to_msg_high #else adrp x3, L_mlkem_to_msg_high@PAGE - add x3, x3, :lo12:L_mlkem_to_msg_high@PAGEOFF + add x3, x3, L_mlkem_to_msg_high@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x4, L_mlkem_to_msg_bits add x4, x4, :lo12:L_mlkem_to_msg_bits #else adrp x4, L_mlkem_to_msg_bits@PAGE - add x4, x4, :lo12:L_mlkem_to_msg_bits@PAGEOFF + add x4, x4, L_mlkem_to_msg_bits@PAGEOFF #endif /* __APPLE__ */ ldr q0, [x2] ldr q1, [x3] @@ -8506,14 +8506,14 @@ _mlkem_from_msg_neon: add x2, x2, :lo12:L_mlkem_from_msg_q1half #else adrp x2, L_mlkem_from_msg_q1half@PAGE - add x2, x2, :lo12:L_mlkem_from_msg_q1half@PAGEOFF + add x2, x2, L_mlkem_from_msg_q1half@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x3, L_mlkem_from_msg_bits add x3, x3, :lo12:L_mlkem_from_msg_bits #else adrp x3, L_mlkem_from_msg_bits@PAGE - add x3, x3, :lo12:L_mlkem_from_msg_bits@PAGEOFF + add x3, x3, L_mlkem_from_msg_bits@PAGEOFF #endif /* __APPLE__ */ ld1 {v2.16b, v3.16b}, [x1] ldr q1, [x2] @@ -9517,28 +9517,28 @@ _mlkem_rej_uniform_neon: add x4, x4, :lo12:L_mlkem_rej_uniform_mask #else adrp x4, L_mlkem_rej_uniform_mask@PAGE - add x4, x4, :lo12:L_mlkem_rej_uniform_mask@PAGEOFF + add x4, x4, L_mlkem_rej_uniform_mask@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x5, L_mlkem_aarch64_q add x5, x5, :lo12:L_mlkem_aarch64_q #else adrp x5, L_mlkem_aarch64_q@PAGE - add x5, x5, :lo12:L_mlkem_aarch64_q@PAGEOFF + add x5, x5, L_mlkem_aarch64_q@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x6, L_mlkem_rej_uniform_bits add x6, x6, :lo12:L_mlkem_rej_uniform_bits #else adrp x6, L_mlkem_rej_uniform_bits@PAGE - add x6, x6, :lo12:L_mlkem_rej_uniform_bits@PAGEOFF + add x6, x6, L_mlkem_rej_uniform_bits@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x7, L_mlkem_rej_uniform_indices add x7, x7, :lo12:L_mlkem_rej_uniform_indices #else adrp x7, L_mlkem_rej_uniform_indices@PAGE - add x7, x7, :lo12:L_mlkem_rej_uniform_indices@PAGEOFF + add x7, x7, L_mlkem_rej_uniform_indices@PAGEOFF #endif /* __APPLE__ */ eor v1.16b, v1.16b, v1.16b eor v12.16b, v12.16b, v12.16b @@ -9754,7 +9754,7 @@ _mlkem_sha3_blocksx3_neon: add x27, x27, :lo12:L_sha3_aarch64_r #else adrp x27, L_sha3_aarch64_r@PAGE - add x27, x27, :lo12:L_sha3_aarch64_r@PAGEOFF + add x27, x27, L_sha3_aarch64_r@PAGEOFF #endif /* __APPLE__ */ str x0, [x29, #40] ld4 {v0.d, v1.d, v2.d, v3.d}[0], [x0], #32 @@ -10079,7 +10079,7 @@ _mlkem_shake128_blocksx3_seed_neon: add x28, x28, :lo12:L_sha3_aarch64_r #else adrp x28, L_sha3_aarch64_r@PAGE - add x28, x28, :lo12:L_sha3_aarch64_r@PAGEOFF + add x28, x28, L_sha3_aarch64_r@PAGEOFF #endif /* __APPLE__ */ str x0, [x29, #40] add x0, x0, #32 @@ -10426,7 +10426,7 @@ _mlkem_shake256_blocksx3_seed_neon: add x28, x28, :lo12:L_sha3_aarch64_r #else adrp x28, L_sha3_aarch64_r@PAGE - add x28, x28, :lo12:L_sha3_aarch64_r@PAGEOFF + add x28, x28, L_sha3_aarch64_r@PAGEOFF #endif /* __APPLE__ */ str x0, [x29, #40] add x0, x0, #32 @@ -10774,7 +10774,7 @@ _mlkem_sha3_blocksx3_neon: add x27, x27, :lo12:L_sha3_aarch64_r #else adrp x27, L_sha3_aarch64_r@PAGE - add x27, x27, :lo12:L_sha3_aarch64_r@PAGEOFF + add x27, x27, L_sha3_aarch64_r@PAGEOFF #endif /* __APPLE__ */ str x0, [x29, #40] ld4 {v0.d, v1.d, v2.d, v3.d}[0], [x0], #32 @@ -11184,7 +11184,7 @@ _mlkem_shake128_blocksx3_seed_neon: add x28, x28, :lo12:L_sha3_aarch64_r #else adrp x28, L_sha3_aarch64_r@PAGE - add x28, x28, :lo12:L_sha3_aarch64_r@PAGEOFF + add x28, x28, L_sha3_aarch64_r@PAGEOFF #endif /* __APPLE__ */ str x0, [x29, #40] add x0, x0, #32 @@ -11616,7 +11616,7 @@ _mlkem_shake256_blocksx3_seed_neon: add x28, x28, :lo12:L_sha3_aarch64_r #else adrp x28, L_sha3_aarch64_r@PAGE - add x28, x28, :lo12:L_sha3_aarch64_r@PAGEOFF + add x28, x28, L_sha3_aarch64_r@PAGEOFF #endif /* __APPLE__ */ str x0, [x29, #40] add x0, x0, #32 diff --git a/wolfcrypt/src/port/arm/armv8-poly1305-asm.S b/wolfcrypt/src/port/arm/armv8-poly1305-asm.S index 880d041e3d..dd50ac3e8c 100644 --- a/wolfcrypt/src/port/arm/armv8-poly1305-asm.S +++ b/wolfcrypt/src/port/arm/armv8-poly1305-asm.S @@ -474,7 +474,7 @@ _poly1305_set_key: add x2, x2, :lo12:L_poly1305_set_key_arm64_clamp #else adrp x2, L_poly1305_set_key_arm64_clamp@PAGE - add x2, x2, :lo12:L_poly1305_set_key_arm64_clamp@PAGEOFF + add x2, x2, L_poly1305_set_key_arm64_clamp@PAGEOFF #endif /* __APPLE__ */ # Load key and pad. ldp x11, x12, [x1] diff --git a/wolfcrypt/src/port/arm/armv8-sha256-asm.S b/wolfcrypt/src/port/arm/armv8-sha256-asm.S index 7ed0fa2869..fb0795365e 100644 --- a/wolfcrypt/src/port/arm/armv8-sha256-asm.S +++ b/wolfcrypt/src/port/arm/armv8-sha256-asm.S @@ -133,7 +133,7 @@ _Transform_Sha256_Len_neon: add x3, x3, :lo12:L_SHA256_transform_neon_len_k #else adrp x3, L_SHA256_transform_neon_len_k@PAGE - add x3, x3, :lo12:L_SHA256_transform_neon_len_k@PAGEOFF + add x3, x3, L_SHA256_transform_neon_len_k@PAGEOFF #endif /* __APPLE__ */ # Load digest into working vars ldr w4, [x0] @@ -1200,7 +1200,7 @@ _Transform_Sha256_Len_crypto: add x3, x3, :lo12:L_SHA256_trans_crypto_len_k #else adrp x3, L_SHA256_trans_crypto_len_k@PAGE - add x3, x3, :lo12:L_SHA256_trans_crypto_len_k@PAGEOFF + add x3, x3, L_SHA256_trans_crypto_len_k@PAGEOFF #endif /* __APPLE__ */ # Load K into vector registers ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [x3], #0x40 diff --git a/wolfcrypt/src/port/arm/armv8-sha3-asm.S b/wolfcrypt/src/port/arm/armv8-sha3-asm.S index 5414ba713e..e45626386b 100644 --- a/wolfcrypt/src/port/arm/armv8-sha3-asm.S +++ b/wolfcrypt/src/port/arm/armv8-sha3-asm.S @@ -92,7 +92,7 @@ _BlockSha3_crypto: add x1, x1, :lo12:L_SHA3_transform_crypto_r #else adrp x1, L_SHA3_transform_crypto_r@PAGE - add x1, x1, :lo12:L_SHA3_transform_crypto_r@PAGEOFF + add x1, x1, L_SHA3_transform_crypto_r@PAGEOFF #endif /* __APPLE__ */ #ifdef __APPLE__ .arch_extension sha3 @@ -268,7 +268,7 @@ _BlockSha3_base: add x27, x27, :lo12:L_SHA3_transform_base_r #else adrp x27, L_SHA3_transform_base_r@PAGE - add x27, x27, :lo12:L_SHA3_transform_base_r@PAGEOFF + add x27, x27, L_SHA3_transform_base_r@PAGEOFF #endif /* __APPLE__ */ ldp x1, x2, [x0] ldp x3, x4, [x0, #16] diff --git a/wolfcrypt/src/port/arm/armv8-sha512-asm.S b/wolfcrypt/src/port/arm/armv8-sha512-asm.S index fde25d54e5..9da9a236a1 100644 --- a/wolfcrypt/src/port/arm/armv8-sha512-asm.S +++ b/wolfcrypt/src/port/arm/armv8-sha512-asm.S @@ -165,14 +165,14 @@ _Transform_Sha512_Len_neon: add x3, x3, :lo12:L_SHA512_transform_neon_len_k #else adrp x3, L_SHA512_transform_neon_len_k@PAGE - add x3, x3, :lo12:L_SHA512_transform_neon_len_k@PAGEOFF + add x3, x3, L_SHA512_transform_neon_len_k@PAGEOFF #endif /* __APPLE__ */ #ifndef __APPLE__ adrp x27, L_SHA512_transform_neon_len_r8 add x27, x27, :lo12:L_SHA512_transform_neon_len_r8 #else adrp x27, L_SHA512_transform_neon_len_r8@PAGE - add x27, x27, :lo12:L_SHA512_transform_neon_len_r8@PAGEOFF + add x27, x27, L_SHA512_transform_neon_len_r8@PAGEOFF #endif /* __APPLE__ */ ld1 {v11.16b}, [x27] # Load digest into working vars @@ -1202,7 +1202,7 @@ _Transform_Sha512_Len_crypto: add x4, x4, :lo12:L_SHA512_trans_crypto_len_k #else adrp x4, L_SHA512_trans_crypto_len_k@PAGE - add x4, x4, :lo12:L_SHA512_trans_crypto_len_k@PAGEOFF + add x4, x4, L_SHA512_trans_crypto_len_k@PAGEOFF #endif /* __APPLE__ */ #ifdef __APPLE__ .arch_extension sha3