ran go mod vendor on the whole project
This commit is contained in:
parent
26f96f361f
commit
28d10d9d39
346 changed files with 12917 additions and 66931 deletions
308
vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s
generated
vendored
308
vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s
generated
vendored
|
@ -1,308 +0,0 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
// +build !gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define NUM_ROUNDS 10
|
||||
|
||||
// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
|
||||
TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
|
||||
MOVD dst+0(FP), R1
|
||||
MOVD src+24(FP), R2
|
||||
MOVD src_len+32(FP), R3
|
||||
MOVD key+48(FP), R4
|
||||
MOVD nonce+56(FP), R6
|
||||
MOVD counter+64(FP), R7
|
||||
|
||||
MOVD $·constants(SB), R10
|
||||
MOVD $·incRotMatrix(SB), R11
|
||||
|
||||
MOVW (R7), R20
|
||||
|
||||
AND $~255, R3, R13
|
||||
ADD R2, R13, R12 // R12 for block end
|
||||
AND $255, R3, R13
|
||||
loop:
|
||||
MOVD $NUM_ROUNDS, R21
|
||||
VLD1 (R11), [V30.S4, V31.S4]
|
||||
|
||||
// load contants
|
||||
// VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4]
|
||||
WORD $0x4D60E940
|
||||
|
||||
// load keys
|
||||
// VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4]
|
||||
WORD $0x4DFFE884
|
||||
// VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4]
|
||||
WORD $0x4DFFE888
|
||||
SUB $32, R4
|
||||
|
||||
// load counter + nonce
|
||||
// VLD1R (R7), [V12.S4]
|
||||
WORD $0x4D40C8EC
|
||||
|
||||
// VLD3R (R6), [V13.S4, V14.S4, V15.S4]
|
||||
WORD $0x4D40E8CD
|
||||
|
||||
// update counter
|
||||
VADD V30.S4, V12.S4, V12.S4
|
||||
|
||||
chacha:
|
||||
// V0..V3 += V4..V7
|
||||
// V12..V15 <<<= ((V12..V15 XOR V0..V3), 16)
|
||||
VADD V0.S4, V4.S4, V0.S4
|
||||
VADD V1.S4, V5.S4, V1.S4
|
||||
VADD V2.S4, V6.S4, V2.S4
|
||||
VADD V3.S4, V7.S4, V3.S4
|
||||
VEOR V12.B16, V0.B16, V12.B16
|
||||
VEOR V13.B16, V1.B16, V13.B16
|
||||
VEOR V14.B16, V2.B16, V14.B16
|
||||
VEOR V15.B16, V3.B16, V15.B16
|
||||
VREV32 V12.H8, V12.H8
|
||||
VREV32 V13.H8, V13.H8
|
||||
VREV32 V14.H8, V14.H8
|
||||
VREV32 V15.H8, V15.H8
|
||||
// V8..V11 += V12..V15
|
||||
// V4..V7 <<<= ((V4..V7 XOR V8..V11), 12)
|
||||
VADD V8.S4, V12.S4, V8.S4
|
||||
VADD V9.S4, V13.S4, V9.S4
|
||||
VADD V10.S4, V14.S4, V10.S4
|
||||
VADD V11.S4, V15.S4, V11.S4
|
||||
VEOR V8.B16, V4.B16, V16.B16
|
||||
VEOR V9.B16, V5.B16, V17.B16
|
||||
VEOR V10.B16, V6.B16, V18.B16
|
||||
VEOR V11.B16, V7.B16, V19.B16
|
||||
VSHL $12, V16.S4, V4.S4
|
||||
VSHL $12, V17.S4, V5.S4
|
||||
VSHL $12, V18.S4, V6.S4
|
||||
VSHL $12, V19.S4, V7.S4
|
||||
VSRI $20, V16.S4, V4.S4
|
||||
VSRI $20, V17.S4, V5.S4
|
||||
VSRI $20, V18.S4, V6.S4
|
||||
VSRI $20, V19.S4, V7.S4
|
||||
|
||||
// V0..V3 += V4..V7
|
||||
// V12..V15 <<<= ((V12..V15 XOR V0..V3), 8)
|
||||
VADD V0.S4, V4.S4, V0.S4
|
||||
VADD V1.S4, V5.S4, V1.S4
|
||||
VADD V2.S4, V6.S4, V2.S4
|
||||
VADD V3.S4, V7.S4, V3.S4
|
||||
VEOR V12.B16, V0.B16, V12.B16
|
||||
VEOR V13.B16, V1.B16, V13.B16
|
||||
VEOR V14.B16, V2.B16, V14.B16
|
||||
VEOR V15.B16, V3.B16, V15.B16
|
||||
VTBL V31.B16, [V12.B16], V12.B16
|
||||
VTBL V31.B16, [V13.B16], V13.B16
|
||||
VTBL V31.B16, [V14.B16], V14.B16
|
||||
VTBL V31.B16, [V15.B16], V15.B16
|
||||
|
||||
// V8..V11 += V12..V15
|
||||
// V4..V7 <<<= ((V4..V7 XOR V8..V11), 7)
|
||||
VADD V12.S4, V8.S4, V8.S4
|
||||
VADD V13.S4, V9.S4, V9.S4
|
||||
VADD V14.S4, V10.S4, V10.S4
|
||||
VADD V15.S4, V11.S4, V11.S4
|
||||
VEOR V8.B16, V4.B16, V16.B16
|
||||
VEOR V9.B16, V5.B16, V17.B16
|
||||
VEOR V10.B16, V6.B16, V18.B16
|
||||
VEOR V11.B16, V7.B16, V19.B16
|
||||
VSHL $7, V16.S4, V4.S4
|
||||
VSHL $7, V17.S4, V5.S4
|
||||
VSHL $7, V18.S4, V6.S4
|
||||
VSHL $7, V19.S4, V7.S4
|
||||
VSRI $25, V16.S4, V4.S4
|
||||
VSRI $25, V17.S4, V5.S4
|
||||
VSRI $25, V18.S4, V6.S4
|
||||
VSRI $25, V19.S4, V7.S4
|
||||
|
||||
// V0..V3 += V5..V7, V4
|
||||
// V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16)
|
||||
VADD V0.S4, V5.S4, V0.S4
|
||||
VADD V1.S4, V6.S4, V1.S4
|
||||
VADD V2.S4, V7.S4, V2.S4
|
||||
VADD V3.S4, V4.S4, V3.S4
|
||||
VEOR V15.B16, V0.B16, V15.B16
|
||||
VEOR V12.B16, V1.B16, V12.B16
|
||||
VEOR V13.B16, V2.B16, V13.B16
|
||||
VEOR V14.B16, V3.B16, V14.B16
|
||||
VREV32 V12.H8, V12.H8
|
||||
VREV32 V13.H8, V13.H8
|
||||
VREV32 V14.H8, V14.H8
|
||||
VREV32 V15.H8, V15.H8
|
||||
|
||||
// V10 += V15; V5 <<<= ((V10 XOR V5), 12)
|
||||
// ...
|
||||
VADD V15.S4, V10.S4, V10.S4
|
||||
VADD V12.S4, V11.S4, V11.S4
|
||||
VADD V13.S4, V8.S4, V8.S4
|
||||
VADD V14.S4, V9.S4, V9.S4
|
||||
VEOR V10.B16, V5.B16, V16.B16
|
||||
VEOR V11.B16, V6.B16, V17.B16
|
||||
VEOR V8.B16, V7.B16, V18.B16
|
||||
VEOR V9.B16, V4.B16, V19.B16
|
||||
VSHL $12, V16.S4, V5.S4
|
||||
VSHL $12, V17.S4, V6.S4
|
||||
VSHL $12, V18.S4, V7.S4
|
||||
VSHL $12, V19.S4, V4.S4
|
||||
VSRI $20, V16.S4, V5.S4
|
||||
VSRI $20, V17.S4, V6.S4
|
||||
VSRI $20, V18.S4, V7.S4
|
||||
VSRI $20, V19.S4, V4.S4
|
||||
|
||||
// V0 += V5; V15 <<<= ((V0 XOR V15), 8)
|
||||
// ...
|
||||
VADD V5.S4, V0.S4, V0.S4
|
||||
VADD V6.S4, V1.S4, V1.S4
|
||||
VADD V7.S4, V2.S4, V2.S4
|
||||
VADD V4.S4, V3.S4, V3.S4
|
||||
VEOR V0.B16, V15.B16, V15.B16
|
||||
VEOR V1.B16, V12.B16, V12.B16
|
||||
VEOR V2.B16, V13.B16, V13.B16
|
||||
VEOR V3.B16, V14.B16, V14.B16
|
||||
VTBL V31.B16, [V12.B16], V12.B16
|
||||
VTBL V31.B16, [V13.B16], V13.B16
|
||||
VTBL V31.B16, [V14.B16], V14.B16
|
||||
VTBL V31.B16, [V15.B16], V15.B16
|
||||
|
||||
// V10 += V15; V5 <<<= ((V10 XOR V5), 7)
|
||||
// ...
|
||||
VADD V15.S4, V10.S4, V10.S4
|
||||
VADD V12.S4, V11.S4, V11.S4
|
||||
VADD V13.S4, V8.S4, V8.S4
|
||||
VADD V14.S4, V9.S4, V9.S4
|
||||
VEOR V10.B16, V5.B16, V16.B16
|
||||
VEOR V11.B16, V6.B16, V17.B16
|
||||
VEOR V8.B16, V7.B16, V18.B16
|
||||
VEOR V9.B16, V4.B16, V19.B16
|
||||
VSHL $7, V16.S4, V5.S4
|
||||
VSHL $7, V17.S4, V6.S4
|
||||
VSHL $7, V18.S4, V7.S4
|
||||
VSHL $7, V19.S4, V4.S4
|
||||
VSRI $25, V16.S4, V5.S4
|
||||
VSRI $25, V17.S4, V6.S4
|
||||
VSRI $25, V18.S4, V7.S4
|
||||
VSRI $25, V19.S4, V4.S4
|
||||
|
||||
SUB $1, R21
|
||||
CBNZ R21, chacha
|
||||
|
||||
// VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4]
|
||||
WORD $0x4D60E950
|
||||
|
||||
// VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4]
|
||||
WORD $0x4DFFE894
|
||||
VADD V30.S4, V12.S4, V12.S4
|
||||
VADD V16.S4, V0.S4, V0.S4
|
||||
VADD V17.S4, V1.S4, V1.S4
|
||||
VADD V18.S4, V2.S4, V2.S4
|
||||
VADD V19.S4, V3.S4, V3.S4
|
||||
// VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4]
|
||||
WORD $0x4DFFE898
|
||||
// restore R4
|
||||
SUB $32, R4
|
||||
|
||||
// load counter + nonce
|
||||
// VLD1R (R7), [V28.S4]
|
||||
WORD $0x4D40C8FC
|
||||
// VLD3R (R6), [V29.S4, V30.S4, V31.S4]
|
||||
WORD $0x4D40E8DD
|
||||
|
||||
VADD V20.S4, V4.S4, V4.S4
|
||||
VADD V21.S4, V5.S4, V5.S4
|
||||
VADD V22.S4, V6.S4, V6.S4
|
||||
VADD V23.S4, V7.S4, V7.S4
|
||||
VADD V24.S4, V8.S4, V8.S4
|
||||
VADD V25.S4, V9.S4, V9.S4
|
||||
VADD V26.S4, V10.S4, V10.S4
|
||||
VADD V27.S4, V11.S4, V11.S4
|
||||
VADD V28.S4, V12.S4, V12.S4
|
||||
VADD V29.S4, V13.S4, V13.S4
|
||||
VADD V30.S4, V14.S4, V14.S4
|
||||
VADD V31.S4, V15.S4, V15.S4
|
||||
|
||||
VZIP1 V1.S4, V0.S4, V16.S4
|
||||
VZIP2 V1.S4, V0.S4, V17.S4
|
||||
VZIP1 V3.S4, V2.S4, V18.S4
|
||||
VZIP2 V3.S4, V2.S4, V19.S4
|
||||
VZIP1 V5.S4, V4.S4, V20.S4
|
||||
VZIP2 V5.S4, V4.S4, V21.S4
|
||||
VZIP1 V7.S4, V6.S4, V22.S4
|
||||
VZIP2 V7.S4, V6.S4, V23.S4
|
||||
VZIP1 V9.S4, V8.S4, V24.S4
|
||||
VZIP2 V9.S4, V8.S4, V25.S4
|
||||
VZIP1 V11.S4, V10.S4, V26.S4
|
||||
VZIP2 V11.S4, V10.S4, V27.S4
|
||||
VZIP1 V13.S4, V12.S4, V28.S4
|
||||
VZIP2 V13.S4, V12.S4, V29.S4
|
||||
VZIP1 V15.S4, V14.S4, V30.S4
|
||||
VZIP2 V15.S4, V14.S4, V31.S4
|
||||
VZIP1 V18.D2, V16.D2, V0.D2
|
||||
VZIP2 V18.D2, V16.D2, V4.D2
|
||||
VZIP1 V19.D2, V17.D2, V8.D2
|
||||
VZIP2 V19.D2, V17.D2, V12.D2
|
||||
VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16]
|
||||
|
||||
VZIP1 V22.D2, V20.D2, V1.D2
|
||||
VZIP2 V22.D2, V20.D2, V5.D2
|
||||
VZIP1 V23.D2, V21.D2, V9.D2
|
||||
VZIP2 V23.D2, V21.D2, V13.D2
|
||||
VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16]
|
||||
VZIP1 V26.D2, V24.D2, V2.D2
|
||||
VZIP2 V26.D2, V24.D2, V6.D2
|
||||
VZIP1 V27.D2, V25.D2, V10.D2
|
||||
VZIP2 V27.D2, V25.D2, V14.D2
|
||||
VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16]
|
||||
VZIP1 V30.D2, V28.D2, V3.D2
|
||||
VZIP2 V30.D2, V28.D2, V7.D2
|
||||
VZIP1 V31.D2, V29.D2, V11.D2
|
||||
VZIP2 V31.D2, V29.D2, V15.D2
|
||||
VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16]
|
||||
VEOR V0.B16, V16.B16, V16.B16
|
||||
VEOR V1.B16, V17.B16, V17.B16
|
||||
VEOR V2.B16, V18.B16, V18.B16
|
||||
VEOR V3.B16, V19.B16, V19.B16
|
||||
VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1)
|
||||
VEOR V4.B16, V20.B16, V20.B16
|
||||
VEOR V5.B16, V21.B16, V21.B16
|
||||
VEOR V6.B16, V22.B16, V22.B16
|
||||
VEOR V7.B16, V23.B16, V23.B16
|
||||
VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1)
|
||||
VEOR V8.B16, V24.B16, V24.B16
|
||||
VEOR V9.B16, V25.B16, V25.B16
|
||||
VEOR V10.B16, V26.B16, V26.B16
|
||||
VEOR V11.B16, V27.B16, V27.B16
|
||||
VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1)
|
||||
VEOR V12.B16, V28.B16, V28.B16
|
||||
VEOR V13.B16, V29.B16, V29.B16
|
||||
VEOR V14.B16, V30.B16, V30.B16
|
||||
VEOR V15.B16, V31.B16, V31.B16
|
||||
VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1)
|
||||
|
||||
ADD $4, R20
|
||||
MOVW R20, (R7) // update counter
|
||||
|
||||
CMP R2, R12
|
||||
BGT loop
|
||||
|
||||
RET
|
||||
|
||||
|
||||
DATA ·constants+0x00(SB)/4, $0x61707865
|
||||
DATA ·constants+0x04(SB)/4, $0x3320646e
|
||||
DATA ·constants+0x08(SB)/4, $0x79622d32
|
||||
DATA ·constants+0x0c(SB)/4, $0x6b206574
|
||||
GLOBL ·constants(SB), NOPTR|RODATA, $32
|
||||
|
||||
DATA ·incRotMatrix+0x00(SB)/4, $0x00000000
|
||||
DATA ·incRotMatrix+0x04(SB)/4, $0x00000001
|
||||
DATA ·incRotMatrix+0x08(SB)/4, $0x00000002
|
||||
DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003
|
||||
DATA ·incRotMatrix+0x10(SB)/4, $0x02010003
|
||||
DATA ·incRotMatrix+0x14(SB)/4, $0x06050407
|
||||
DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B
|
||||
DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F
|
||||
GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32
|
668
vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s
generated
vendored
668
vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s
generated
vendored
|
@ -1,668 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Based on CRYPTOGAMS code with the following comment:
|
||||
// # ====================================================================
|
||||
// # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
|
||||
// # project. The module is, however, dual licensed under OpenSSL and
|
||||
// # CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
// # details see http://www.openssl.org/~appro/cryptogams/.
|
||||
// # ====================================================================
|
||||
|
||||
// Original code can be found at the link below:
|
||||
// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91e5c39ca79126a4a876d5d8ff
|
||||
|
||||
// There are some differences between CRYPTOGAMS code and this one. The round
|
||||
// loop for "_int" isn't the same as the original. Some adjustments were
|
||||
// necessary because there are less vector registers available. For example, some
|
||||
// X variables (r12, r13, r14, and r15) share the same register used by the
|
||||
// counter. The original code uses ctr to name the counter. Here we use CNT
|
||||
// because golang uses CTR as the counter register name.
|
||||
|
||||
// +build ppc64le,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define OUT R3
|
||||
#define INP R4
|
||||
#define LEN R5
|
||||
#define KEY R6
|
||||
#define CNT R7
|
||||
|
||||
#define TEMP R8
|
||||
|
||||
#define X0 R11
|
||||
#define X1 R12
|
||||
#define X2 R14
|
||||
#define X3 R15
|
||||
#define X4 R16
|
||||
#define X5 R17
|
||||
#define X6 R18
|
||||
#define X7 R19
|
||||
#define X8 R20
|
||||
#define X9 R21
|
||||
#define X10 R22
|
||||
#define X11 R23
|
||||
#define X12 R24
|
||||
#define X13 R25
|
||||
#define X14 R26
|
||||
#define X15 R27
|
||||
|
||||
#define CON0 X0
|
||||
#define CON1 X1
|
||||
#define CON2 X2
|
||||
#define CON3 X3
|
||||
|
||||
#define KEY0 X4
|
||||
#define KEY1 X5
|
||||
#define KEY2 X6
|
||||
#define KEY3 X7
|
||||
#define KEY4 X8
|
||||
#define KEY5 X9
|
||||
#define KEY6 X10
|
||||
#define KEY7 X11
|
||||
|
||||
#define CNT0 X12
|
||||
#define CNT1 X13
|
||||
#define CNT2 X14
|
||||
#define CNT3 X15
|
||||
|
||||
#define TMP0 R9
|
||||
#define TMP1 R10
|
||||
#define TMP2 R28
|
||||
#define TMP3 R29
|
||||
|
||||
#define CONSTS R8
|
||||
|
||||
#define A0 V0
|
||||
#define B0 V1
|
||||
#define C0 V2
|
||||
#define D0 V3
|
||||
#define A1 V4
|
||||
#define B1 V5
|
||||
#define C1 V6
|
||||
#define D1 V7
|
||||
#define A2 V8
|
||||
#define B2 V9
|
||||
#define C2 V10
|
||||
#define D2 V11
|
||||
#define T0 V12
|
||||
#define T1 V13
|
||||
#define T2 V14
|
||||
|
||||
#define K0 V15
|
||||
#define K1 V16
|
||||
#define K2 V17
|
||||
#define K3 V18
|
||||
#define K4 V19
|
||||
#define K5 V20
|
||||
|
||||
#define FOUR V21
|
||||
#define SIXTEEN V22
|
||||
#define TWENTY4 V23
|
||||
#define TWENTY V24
|
||||
#define TWELVE V25
|
||||
#define TWENTY5 V26
|
||||
#define SEVEN V27
|
||||
|
||||
#define INPPERM V28
|
||||
#define OUTPERM V29
|
||||
#define OUTMASK V30
|
||||
|
||||
#define DD0 V31
|
||||
#define DD1 SEVEN
|
||||
#define DD2 T0
|
||||
#define DD3 T1
|
||||
#define DD4 T2
|
||||
|
||||
DATA ·consts+0x00(SB)/8, $0x3320646e61707865
|
||||
DATA ·consts+0x08(SB)/8, $0x6b20657479622d32
|
||||
DATA ·consts+0x10(SB)/8, $0x0000000000000001
|
||||
DATA ·consts+0x18(SB)/8, $0x0000000000000000
|
||||
DATA ·consts+0x20(SB)/8, $0x0000000000000004
|
||||
DATA ·consts+0x28(SB)/8, $0x0000000000000000
|
||||
DATA ·consts+0x30(SB)/8, $0x0a0b08090e0f0c0d
|
||||
DATA ·consts+0x38(SB)/8, $0x0203000106070405
|
||||
DATA ·consts+0x40(SB)/8, $0x090a0b080d0e0f0c
|
||||
DATA ·consts+0x48(SB)/8, $0x0102030005060704
|
||||
GLOBL ·consts(SB), RODATA, $80
|
||||
|
||||
//func chaCha20_ctr32_vmx(out, inp *byte, len int, key *[32]byte, counter *[16]byte)
|
||||
TEXT ·chaCha20_ctr32_vmx(SB),NOSPLIT|NOFRAME,$0
|
||||
// Load the arguments inside the registers
|
||||
MOVD out+0(FP), OUT
|
||||
MOVD inp+8(FP), INP
|
||||
MOVD len+16(FP), LEN
|
||||
MOVD key+24(FP), KEY
|
||||
MOVD counter+32(FP), CNT
|
||||
|
||||
MOVD $·consts(SB), CONSTS // point to consts addr
|
||||
|
||||
MOVD $16, X0
|
||||
MOVD $32, X1
|
||||
MOVD $48, X2
|
||||
MOVD $64, X3
|
||||
MOVD $31, X4
|
||||
MOVD $15, X5
|
||||
|
||||
// Load key
|
||||
LVX (KEY)(R0), K1
|
||||
LVSR (KEY)(R0), T0
|
||||
LVX (KEY)(X0), K2
|
||||
LVX (KEY)(X4), DD0
|
||||
|
||||
// Load counter
|
||||
LVX (CNT)(R0), K3
|
||||
LVSR (CNT)(R0), T1
|
||||
LVX (CNT)(X5), DD1
|
||||
|
||||
// Load constants
|
||||
LVX (CONSTS)(R0), K0
|
||||
LVX (CONSTS)(X0), K5
|
||||
LVX (CONSTS)(X1), FOUR
|
||||
LVX (CONSTS)(X2), SIXTEEN
|
||||
LVX (CONSTS)(X3), TWENTY4
|
||||
|
||||
// Align key and counter
|
||||
VPERM K2, K1, T0, K1
|
||||
VPERM DD0, K2, T0, K2
|
||||
VPERM DD1, K3, T1, K3
|
||||
|
||||
// Load counter to GPR
|
||||
MOVWZ 0(CNT), CNT0
|
||||
MOVWZ 4(CNT), CNT1
|
||||
MOVWZ 8(CNT), CNT2
|
||||
MOVWZ 12(CNT), CNT3
|
||||
|
||||
// Adjust vectors for the initial state
|
||||
VADDUWM K3, K5, K3
|
||||
VADDUWM K3, K5, K4
|
||||
VADDUWM K4, K5, K5
|
||||
|
||||
// Synthesized constants
|
||||
VSPLTISW $-12, TWENTY
|
||||
VSPLTISW $12, TWELVE
|
||||
VSPLTISW $-7, TWENTY5
|
||||
|
||||
VXOR T0, T0, T0
|
||||
VSPLTISW $-1, OUTMASK
|
||||
LVSR (INP)(R0), INPPERM
|
||||
LVSL (OUT)(R0), OUTPERM
|
||||
VPERM OUTMASK, T0, OUTPERM, OUTMASK
|
||||
|
||||
loop_outer_vmx:
|
||||
// Load constant
|
||||
MOVD $0x61707865, CON0
|
||||
MOVD $0x3320646e, CON1
|
||||
MOVD $0x79622d32, CON2
|
||||
MOVD $0x6b206574, CON3
|
||||
|
||||
VOR K0, K0, A0
|
||||
VOR K0, K0, A1
|
||||
VOR K0, K0, A2
|
||||
VOR K1, K1, B0
|
||||
|
||||
MOVD $10, TEMP
|
||||
|
||||
// Load key to GPR
|
||||
MOVWZ 0(KEY), X4
|
||||
MOVWZ 4(KEY), X5
|
||||
MOVWZ 8(KEY), X6
|
||||
MOVWZ 12(KEY), X7
|
||||
VOR K1, K1, B1
|
||||
VOR K1, K1, B2
|
||||
MOVWZ 16(KEY), X8
|
||||
MOVWZ 0(CNT), X12
|
||||
MOVWZ 20(KEY), X9
|
||||
MOVWZ 4(CNT), X13
|
||||
VOR K2, K2, C0
|
||||
VOR K2, K2, C1
|
||||
MOVWZ 24(KEY), X10
|
||||
MOVWZ 8(CNT), X14
|
||||
VOR K2, K2, C2
|
||||
VOR K3, K3, D0
|
||||
MOVWZ 28(KEY), X11
|
||||
MOVWZ 12(CNT), X15
|
||||
VOR K4, K4, D1
|
||||
VOR K5, K5, D2
|
||||
|
||||
MOVD X4, TMP0
|
||||
MOVD X5, TMP1
|
||||
MOVD X6, TMP2
|
||||
MOVD X7, TMP3
|
||||
VSPLTISW $7, SEVEN
|
||||
|
||||
MOVD TEMP, CTR
|
||||
|
||||
loop_vmx:
|
||||
// CRYPTOGAMS uses a macro to create a loop using perl. This isn't possible
|
||||
// using assembly macros. Therefore, the macro expansion result was used
|
||||
// in order to maintain the algorithm efficiency.
|
||||
// This loop generates three keystream blocks using VMX instructions and,
|
||||
// in parallel, one keystream block using scalar instructions.
|
||||
ADD X4, X0, X0
|
||||
ADD X5, X1, X1
|
||||
VADDUWM A0, B0, A0
|
||||
VADDUWM A1, B1, A1
|
||||
ADD X6, X2, X2
|
||||
ADD X7, X3, X3
|
||||
VADDUWM A2, B2, A2
|
||||
VXOR D0, A0, D0
|
||||
XOR X0, X12, X12
|
||||
XOR X1, X13, X13
|
||||
VXOR D1, A1, D1
|
||||
VXOR D2, A2, D2
|
||||
XOR X2, X14, X14
|
||||
XOR X3, X15, X15
|
||||
VPERM D0, D0, SIXTEEN, D0
|
||||
VPERM D1, D1, SIXTEEN, D1
|
||||
ROTLW $16, X12, X12
|
||||
ROTLW $16, X13, X13
|
||||
VPERM D2, D2, SIXTEEN, D2
|
||||
VADDUWM C0, D0, C0
|
||||
ROTLW $16, X14, X14
|
||||
ROTLW $16, X15, X15
|
||||
VADDUWM C1, D1, C1
|
||||
VADDUWM C2, D2, C2
|
||||
ADD X12, X8, X8
|
||||
ADD X13, X9, X9
|
||||
VXOR B0, C0, T0
|
||||
VXOR B1, C1, T1
|
||||
ADD X14, X10, X10
|
||||
ADD X15, X11, X11
|
||||
VXOR B2, C2, T2
|
||||
VRLW T0, TWELVE, B0
|
||||
XOR X8, X4, X4
|
||||
XOR X9, X5, X5
|
||||
VRLW T1, TWELVE, B1
|
||||
VRLW T2, TWELVE, B2
|
||||
XOR X10, X6, X6
|
||||
XOR X11, X7, X7
|
||||
VADDUWM A0, B0, A0
|
||||
VADDUWM A1, B1, A1
|
||||
ROTLW $12, X4, X4
|
||||
ROTLW $12, X5, X5
|
||||
VADDUWM A2, B2, A2
|
||||
VXOR D0, A0, D0
|
||||
ROTLW $12, X6, X6
|
||||
ROTLW $12, X7, X7
|
||||
VXOR D1, A1, D1
|
||||
VXOR D2, A2, D2
|
||||
ADD X4, X0, X0
|
||||
ADD X5, X1, X1
|
||||
VPERM D0, D0, TWENTY4, D0
|
||||
VPERM D1, D1, TWENTY4, D1
|
||||
ADD X6, X2, X2
|
||||
ADD X7, X3, X3
|
||||
VPERM D2, D2, TWENTY4, D2
|
||||
VADDUWM C0, D0, C0
|
||||
XOR X0, X12, X12
|
||||
XOR X1, X13, X13
|
||||
VADDUWM C1, D1, C1
|
||||
VADDUWM C2, D2, C2
|
||||
XOR X2, X14, X14
|
||||
XOR X3, X15, X15
|
||||
VXOR B0, C0, T0
|
||||
VXOR B1, C1, T1
|
||||
ROTLW $8, X12, X12
|
||||
ROTLW $8, X13, X13
|
||||
VXOR B2, C2, T2
|
||||
VRLW T0, SEVEN, B0
|
||||
ROTLW $8, X14, X14
|
||||
ROTLW $8, X15, X15
|
||||
VRLW T1, SEVEN, B1
|
||||
VRLW T2, SEVEN, B2
|
||||
ADD X12, X8, X8
|
||||
ADD X13, X9, X9
|
||||
VSLDOI $8, C0, C0, C0
|
||||
VSLDOI $8, C1, C1, C1
|
||||
ADD X14, X10, X10
|
||||
ADD X15, X11, X11
|
||||
VSLDOI $8, C2, C2, C2
|
||||
VSLDOI $12, B0, B0, B0
|
||||
XOR X8, X4, X4
|
||||
XOR X9, X5, X5
|
||||
VSLDOI $12, B1, B1, B1
|
||||
VSLDOI $12, B2, B2, B2
|
||||
XOR X10, X6, X6
|
||||
XOR X11, X7, X7
|
||||
VSLDOI $4, D0, D0, D0
|
||||
VSLDOI $4, D1, D1, D1
|
||||
ROTLW $7, X4, X4
|
||||
ROTLW $7, X5, X5
|
||||
VSLDOI $4, D2, D2, D2
|
||||
VADDUWM A0, B0, A0
|
||||
ROTLW $7, X6, X6
|
||||
ROTLW $7, X7, X7
|
||||
VADDUWM A1, B1, A1
|
||||
VADDUWM A2, B2, A2
|
||||
ADD X5, X0, X0
|
||||
ADD X6, X1, X1
|
||||
VXOR D0, A0, D0
|
||||
VXOR D1, A1, D1
|
||||
ADD X7, X2, X2
|
||||
ADD X4, X3, X3
|
||||
VXOR D2, A2, D2
|
||||
VPERM D0, D0, SIXTEEN, D0
|
||||
XOR X0, X15, X15
|
||||
XOR X1, X12, X12
|
||||
VPERM D1, D1, SIXTEEN, D1
|
||||
VPERM D2, D2, SIXTEEN, D2
|
||||
XOR X2, X13, X13
|
||||
XOR X3, X14, X14
|
||||
VADDUWM C0, D0, C0
|
||||
VADDUWM C1, D1, C1
|
||||
ROTLW $16, X15, X15
|
||||
ROTLW $16, X12, X12
|
||||
VADDUWM C2, D2, C2
|
||||
VXOR B0, C0, T0
|
||||
ROTLW $16, X13, X13
|
||||
ROTLW $16, X14, X14
|
||||
VXOR B1, C1, T1
|
||||
VXOR B2, C2, T2
|
||||
ADD X15, X10, X10
|
||||
ADD X12, X11, X11
|
||||
VRLW T0, TWELVE, B0
|
||||
VRLW T1, TWELVE, B1
|
||||
ADD X13, X8, X8
|
||||
ADD X14, X9, X9
|
||||
VRLW T2, TWELVE, B2
|
||||
VADDUWM A0, B0, A0
|
||||
XOR X10, X5, X5
|
||||
XOR X11, X6, X6
|
||||
VADDUWM A1, B1, A1
|
||||
VADDUWM A2, B2, A2
|
||||
XOR X8, X7, X7
|
||||
XOR X9, X4, X4
|
||||
VXOR D0, A0, D0
|
||||
VXOR D1, A1, D1
|
||||
ROTLW $12, X5, X5
|
||||
ROTLW $12, X6, X6
|
||||
VXOR D2, A2, D2
|
||||
VPERM D0, D0, TWENTY4, D0
|
||||
ROTLW $12, X7, X7
|
||||
ROTLW $12, X4, X4
|
||||
VPERM D1, D1, TWENTY4, D1
|
||||
VPERM D2, D2, TWENTY4, D2
|
||||
ADD X5, X0, X0
|
||||
ADD X6, X1, X1
|
||||
VADDUWM C0, D0, C0
|
||||
VADDUWM C1, D1, C1
|
||||
ADD X7, X2, X2
|
||||
ADD X4, X3, X3
|
||||
VADDUWM C2, D2, C2
|
||||
VXOR B0, C0, T0
|
||||
XOR X0, X15, X15
|
||||
XOR X1, X12, X12
|
||||
VXOR B1, C1, T1
|
||||
VXOR B2, C2, T2
|
||||
XOR X2, X13, X13
|
||||
XOR X3, X14, X14
|
||||
VRLW T0, SEVEN, B0
|
||||
VRLW T1, SEVEN, B1
|
||||
ROTLW $8, X15, X15
|
||||
ROTLW $8, X12, X12
|
||||
VRLW T2, SEVEN, B2
|
||||
VSLDOI $8, C0, C0, C0
|
||||
ROTLW $8, X13, X13
|
||||
ROTLW $8, X14, X14
|
||||
VSLDOI $8, C1, C1, C1
|
||||
VSLDOI $8, C2, C2, C2
|
||||
ADD X15, X10, X10
|
||||
ADD X12, X11, X11
|
||||
VSLDOI $4, B0, B0, B0
|
||||
VSLDOI $4, B1, B1, B1
|
||||
ADD X13, X8, X8
|
||||
ADD X14, X9, X9
|
||||
VSLDOI $4, B2, B2, B2
|
||||
VSLDOI $12, D0, D0, D0
|
||||
XOR X10, X5, X5
|
||||
XOR X11, X6, X6
|
||||
VSLDOI $12, D1, D1, D1
|
||||
VSLDOI $12, D2, D2, D2
|
||||
XOR X8, X7, X7
|
||||
XOR X9, X4, X4
|
||||
ROTLW $7, X5, X5
|
||||
ROTLW $7, X6, X6
|
||||
ROTLW $7, X7, X7
|
||||
ROTLW $7, X4, X4
|
||||
BC 0x10, 0, loop_vmx
|
||||
|
||||
SUB $256, LEN, LEN
|
||||
|
||||
// Accumulate key block
|
||||
ADD $0x61707865, X0, X0
|
||||
ADD $0x3320646e, X1, X1
|
||||
ADD $0x79622d32, X2, X2
|
||||
ADD $0x6b206574, X3, X3
|
||||
ADD TMP0, X4, X4
|
||||
ADD TMP1, X5, X5
|
||||
ADD TMP2, X6, X6
|
||||
ADD TMP3, X7, X7
|
||||
MOVWZ 16(KEY), TMP0
|
||||
MOVWZ 20(KEY), TMP1
|
||||
MOVWZ 24(KEY), TMP2
|
||||
MOVWZ 28(KEY), TMP3
|
||||
ADD TMP0, X8, X8
|
||||
ADD TMP1, X9, X9
|
||||
ADD TMP2, X10, X10
|
||||
ADD TMP3, X11, X11
|
||||
|
||||
MOVWZ 12(CNT), TMP0
|
||||
MOVWZ 8(CNT), TMP1
|
||||
MOVWZ 4(CNT), TMP2
|
||||
MOVWZ 0(CNT), TEMP
|
||||
ADD TMP0, X15, X15
|
||||
ADD TMP1, X14, X14
|
||||
ADD TMP2, X13, X13
|
||||
ADD TEMP, X12, X12
|
||||
|
||||
// Accumulate key block
|
||||
VADDUWM A0, K0, A0
|
||||
VADDUWM A1, K0, A1
|
||||
VADDUWM A2, K0, A2
|
||||
VADDUWM B0, K1, B0
|
||||
VADDUWM B1, K1, B1
|
||||
VADDUWM B2, K1, B2
|
||||
VADDUWM C0, K2, C0
|
||||
VADDUWM C1, K2, C1
|
||||
VADDUWM C2, K2, C2
|
||||
VADDUWM D0, K3, D0
|
||||
VADDUWM D1, K4, D1
|
||||
VADDUWM D2, K5, D2
|
||||
|
||||
// Increment counter
|
||||
ADD $4, TEMP, TEMP
|
||||
MOVW TEMP, 0(CNT)
|
||||
|
||||
VADDUWM K3, FOUR, K3
|
||||
VADDUWM K4, FOUR, K4
|
||||
VADDUWM K5, FOUR, K5
|
||||
|
||||
// XOR the input slice (INP) with the keystream, which is stored in GPRs (X0-X3).
|
||||
|
||||
// Load input (aligned or not)
|
||||
MOVWZ 0(INP), TMP0
|
||||
MOVWZ 4(INP), TMP1
|
||||
MOVWZ 8(INP), TMP2
|
||||
MOVWZ 12(INP), TMP3
|
||||
|
||||
// XOR with input
|
||||
XOR TMP0, X0, X0
|
||||
XOR TMP1, X1, X1
|
||||
XOR TMP2, X2, X2
|
||||
XOR TMP3, X3, X3
|
||||
MOVWZ 16(INP), TMP0
|
||||
MOVWZ 20(INP), TMP1
|
||||
MOVWZ 24(INP), TMP2
|
||||
MOVWZ 28(INP), TMP3
|
||||
XOR TMP0, X4, X4
|
||||
XOR TMP1, X5, X5
|
||||
XOR TMP2, X6, X6
|
||||
XOR TMP3, X7, X7
|
||||
MOVWZ 32(INP), TMP0
|
||||
MOVWZ 36(INP), TMP1
|
||||
MOVWZ 40(INP), TMP2
|
||||
MOVWZ 44(INP), TMP3
|
||||
XOR TMP0, X8, X8
|
||||
XOR TMP1, X9, X9
|
||||
XOR TMP2, X10, X10
|
||||
XOR TMP3, X11, X11
|
||||
MOVWZ 48(INP), TMP0
|
||||
MOVWZ 52(INP), TMP1
|
||||
MOVWZ 56(INP), TMP2
|
||||
MOVWZ 60(INP), TMP3
|
||||
XOR TMP0, X12, X12
|
||||
XOR TMP1, X13, X13
|
||||
XOR TMP2, X14, X14
|
||||
XOR TMP3, X15, X15
|
||||
|
||||
// Store output (aligned or not)
|
||||
MOVW X0, 0(OUT)
|
||||
MOVW X1, 4(OUT)
|
||||
MOVW X2, 8(OUT)
|
||||
MOVW X3, 12(OUT)
|
||||
|
||||
ADD $64, INP, INP // INP points to the end of the slice for the alignment code below
|
||||
|
||||
MOVW X4, 16(OUT)
|
||||
MOVD $16, TMP0
|
||||
MOVW X5, 20(OUT)
|
||||
MOVD $32, TMP1
|
||||
MOVW X6, 24(OUT)
|
||||
MOVD $48, TMP2
|
||||
MOVW X7, 28(OUT)
|
||||
MOVD $64, TMP3
|
||||
MOVW X8, 32(OUT)
|
||||
MOVW X9, 36(OUT)
|
||||
MOVW X10, 40(OUT)
|
||||
MOVW X11, 44(OUT)
|
||||
MOVW X12, 48(OUT)
|
||||
MOVW X13, 52(OUT)
|
||||
MOVW X14, 56(OUT)
|
||||
MOVW X15, 60(OUT)
|
||||
ADD $64, OUT, OUT
|
||||
|
||||
// Load input
|
||||
LVX (INP)(R0), DD0
|
||||
LVX (INP)(TMP0), DD1
|
||||
LVX (INP)(TMP1), DD2
|
||||
LVX (INP)(TMP2), DD3
|
||||
LVX (INP)(TMP3), DD4
|
||||
ADD $64, INP, INP
|
||||
|
||||
VPERM DD1, DD0, INPPERM, DD0 // Align input
|
||||
VPERM DD2, DD1, INPPERM, DD1
|
||||
VPERM DD3, DD2, INPPERM, DD2
|
||||
VPERM DD4, DD3, INPPERM, DD3
|
||||
VXOR A0, DD0, A0 // XOR with input
|
||||
VXOR B0, DD1, B0
|
||||
LVX (INP)(TMP0), DD1 // Keep loading input
|
||||
VXOR C0, DD2, C0
|
||||
LVX (INP)(TMP1), DD2
|
||||
VXOR D0, DD3, D0
|
||||
LVX (INP)(TMP2), DD3
|
||||
LVX (INP)(TMP3), DD0
|
||||
ADD $64, INP, INP
|
||||
MOVD $63, TMP3 // 63 is not a typo
|
||||
VPERM A0, A0, OUTPERM, A0
|
||||
VPERM B0, B0, OUTPERM, B0
|
||||
VPERM C0, C0, OUTPERM, C0
|
||||
VPERM D0, D0, OUTPERM, D0
|
||||
|
||||
VPERM DD1, DD4, INPPERM, DD4 // Align input
|
||||
VPERM DD2, DD1, INPPERM, DD1
|
||||
VPERM DD3, DD2, INPPERM, DD2
|
||||
VPERM DD0, DD3, INPPERM, DD3
|
||||
VXOR A1, DD4, A1
|
||||
VXOR B1, DD1, B1
|
||||
LVX (INP)(TMP0), DD1 // Keep loading
|
||||
VXOR C1, DD2, C1
|
||||
LVX (INP)(TMP1), DD2
|
||||
VXOR D1, DD3, D1
|
||||
LVX (INP)(TMP2), DD3
|
||||
|
||||
// Note that the LVX address is always rounded down to the nearest 16-byte
|
||||
// boundary, and that it always points to at most 15 bytes beyond the end of
|
||||
// the slice, so we cannot cross a page boundary.
|
||||
LVX (INP)(TMP3), DD4 // Redundant in aligned case.
|
||||
ADD $64, INP, INP
|
||||
VPERM A1, A1, OUTPERM, A1 // Pre-misalign output
|
||||
VPERM B1, B1, OUTPERM, B1
|
||||
VPERM C1, C1, OUTPERM, C1
|
||||
VPERM D1, D1, OUTPERM, D1
|
||||
|
||||
VPERM DD1, DD0, INPPERM, DD0 // Align Input
|
||||
VPERM DD2, DD1, INPPERM, DD1
|
||||
VPERM DD3, DD2, INPPERM, DD2
|
||||
VPERM DD4, DD3, INPPERM, DD3
|
||||
VXOR A2, DD0, A2
|
||||
VXOR B2, DD1, B2
|
||||
VXOR C2, DD2, C2
|
||||
VXOR D2, DD3, D2
|
||||
VPERM A2, A2, OUTPERM, A2
|
||||
VPERM B2, B2, OUTPERM, B2
|
||||
VPERM C2, C2, OUTPERM, C2
|
||||
VPERM D2, D2, OUTPERM, D2
|
||||
|
||||
ANDCC $15, OUT, X1 // Is out aligned?
|
||||
MOVD OUT, X0
|
||||
|
||||
VSEL A0, B0, OUTMASK, DD0 // Collect pre-misaligned output
|
||||
VSEL B0, C0, OUTMASK, DD1
|
||||
VSEL C0, D0, OUTMASK, DD2
|
||||
VSEL D0, A1, OUTMASK, DD3
|
||||
VSEL A1, B1, OUTMASK, B0
|
||||
VSEL B1, C1, OUTMASK, C0
|
||||
VSEL C1, D1, OUTMASK, D0
|
||||
VSEL D1, A2, OUTMASK, A1
|
||||
VSEL A2, B2, OUTMASK, B1
|
||||
VSEL B2, C2, OUTMASK, C1
|
||||
VSEL C2, D2, OUTMASK, D1
|
||||
|
||||
STVX DD0, (OUT+TMP0)
|
||||
STVX DD1, (OUT+TMP1)
|
||||
STVX DD2, (OUT+TMP2)
|
||||
ADD $64, OUT, OUT
|
||||
STVX DD3, (OUT+R0)
|
||||
STVX B0, (OUT+TMP0)
|
||||
STVX C0, (OUT+TMP1)
|
||||
STVX D0, (OUT+TMP2)
|
||||
ADD $64, OUT, OUT
|
||||
STVX A1, (OUT+R0)
|
||||
STVX B1, (OUT+TMP0)
|
||||
STVX C1, (OUT+TMP1)
|
||||
STVX D1, (OUT+TMP2)
|
||||
ADD $64, OUT, OUT
|
||||
|
||||
BEQ aligned_vmx
|
||||
|
||||
SUB X1, OUT, X2 // in misaligned case edges
|
||||
MOVD $0, X3 // are written byte-by-byte
|
||||
|
||||
unaligned_tail_vmx:
|
||||
STVEBX D2, (X2+X3)
|
||||
ADD $1, X3, X3
|
||||
CMPW X3, X1
|
||||
BNE unaligned_tail_vmx
|
||||
SUB X1, X0, X2
|
||||
|
||||
unaligned_head_vmx:
|
||||
STVEBX A0, (X2+X1)
|
||||
CMPW X1, $15
|
||||
ADD $1, X1, X1
|
||||
BNE unaligned_head_vmx
|
||||
|
||||
CMPU LEN, $255 // done with 256-byte block yet?
|
||||
BGT loop_outer_vmx
|
||||
|
||||
JMP done_vmx
|
||||
|
||||
aligned_vmx:
|
||||
STVX A0, (X0+R0)
|
||||
CMPU LEN, $255 // done with 256-byte block yet?
|
||||
BGT loop_outer_vmx
|
||||
|
||||
done_vmx:
|
||||
RET
|
31
vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go
generated
vendored
31
vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go
generated
vendored
|
@ -1,31 +0,0 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
// +build !gccgo
|
||||
|
||||
package chacha20
|
||||
|
||||
const (
|
||||
haveAsm = true
|
||||
bufSize = 256
|
||||
)
|
||||
|
||||
//go:noescape
|
||||
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
|
||||
|
||||
func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
|
||||
|
||||
if len(src) >= bufSize {
|
||||
xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter)
|
||||
}
|
||||
|
||||
if len(src)%bufSize != 0 {
|
||||
i := len(src) - len(src)%bufSize
|
||||
c.buf = [bufSize]byte{}
|
||||
copy(c.buf[:], src[i:])
|
||||
xorKeyStreamVX(c.buf[:], c.buf[:], &c.key, &c.nonce, &c.counter)
|
||||
c.len = bufSize - copy(dst[i:], c.buf[:len(src)%bufSize])
|
||||
}
|
||||
}
|
2
vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go
generated
vendored
2
vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go
generated
vendored
|
@ -2,7 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ppc64le,!arm64,!s390x arm64,!go1.11 gccgo appengine
|
||||
// +build !s390x gccgo appengine
|
||||
|
||||
package chacha20
|
||||
|
||||
|
|
52
vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go
generated
vendored
52
vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go
generated
vendored
|
@ -1,52 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ppc64le,!gccgo,!appengine
|
||||
|
||||
package chacha20
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
const (
|
||||
bufSize = 256
|
||||
haveAsm = true
|
||||
)
|
||||
|
||||
//go:noescape
|
||||
func chaCha20_ctr32_vmx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
|
||||
|
||||
func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
|
||||
if len(src) >= bufSize {
|
||||
chaCha20_ctr32_vmx(&dst[0], &src[0], len(src)-len(src)%bufSize, &c.key, &c.counter)
|
||||
}
|
||||
if len(src)%bufSize != 0 {
|
||||
chaCha20_ctr32_vmx(&c.buf[0], &c.buf[0], bufSize, &c.key, &c.counter)
|
||||
start := len(src) - len(src)%bufSize
|
||||
ts, td, tb := src[start:], dst[start:], c.buf[:]
|
||||
// Unroll loop to XOR 32 bytes per iteration.
|
||||
for i := 0; i < len(ts)-32; i += 32 {
|
||||
td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination
|
||||
s0 := binary.LittleEndian.Uint64(ts[0:8])
|
||||
s1 := binary.LittleEndian.Uint64(ts[8:16])
|
||||
s2 := binary.LittleEndian.Uint64(ts[16:24])
|
||||
s3 := binary.LittleEndian.Uint64(ts[24:32])
|
||||
b0 := binary.LittleEndian.Uint64(tb[0:8])
|
||||
b1 := binary.LittleEndian.Uint64(tb[8:16])
|
||||
b2 := binary.LittleEndian.Uint64(tb[16:24])
|
||||
b3 := binary.LittleEndian.Uint64(tb[24:32])
|
||||
binary.LittleEndian.PutUint64(td[0:8], s0^b0)
|
||||
binary.LittleEndian.PutUint64(td[8:16], s1^b1)
|
||||
binary.LittleEndian.PutUint64(td[16:24], s2^b2)
|
||||
binary.LittleEndian.PutUint64(td[24:32], s3^b3)
|
||||
ts, td, tb = ts[32:], td[32:], tb[32:]
|
||||
}
|
||||
td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination
|
||||
for i, v := range ts {
|
||||
td[i] = tb[i] ^ v
|
||||
}
|
||||
c.len = bufSize - (len(src) % bufSize)
|
||||
|
||||
}
|
||||
|
||||
}
|
11
vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go
generated
vendored
11
vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go
generated
vendored
|
@ -6,14 +6,15 @@
|
|||
|
||||
package chacha20
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/cpu"
|
||||
)
|
||||
|
||||
var haveAsm = cpu.S390X.HasVX
|
||||
var haveAsm = hasVectorFacility()
|
||||
|
||||
const bufSize = 256
|
||||
|
||||
// hasVectorFacility reports whether the machine supports the vector
|
||||
// facility (vx).
|
||||
// Implementation in asm_s390x.s.
|
||||
func hasVectorFacility() bool
|
||||
|
||||
// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
|
||||
// be called when the vector facility is available.
|
||||
// Implementation in asm_s390x.s.
|
||||
|
|
23
vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s
generated
vendored
23
vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s
generated
vendored
|
@ -258,3 +258,26 @@ tail:
|
|||
MOVD R8, R3
|
||||
MOVD $0, R4
|
||||
JMP continue
|
||||
|
||||
// func hasVectorFacility() bool
|
||||
TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
|
||||
MOVD $x-24(SP), R1
|
||||
XC $24, 0(R1), 0(R1) // clear the storage
|
||||
MOVD $2, R0 // R0 is the number of double words stored -1
|
||||
WORD $0xB2B01000 // STFLE 0(R1)
|
||||
XOR R0, R0 // reset the value of R0
|
||||
MOVBZ z-8(SP), R1
|
||||
AND $0x40, R1
|
||||
BEQ novector
|
||||
|
||||
vectorinstalled:
|
||||
// check if the vector instruction has been enabled
|
||||
VLEIB $0, $0xF, V16
|
||||
VLGVB $0, V16, R1
|
||||
CMPBNE R1, $0xF, novector
|
||||
MOVB $1, ret+0(FP) // have vx
|
||||
RET
|
||||
|
||||
novector:
|
||||
MOVB $0, ret+0(FP) // no vx
|
||||
RET
|
||||
|
|
Reference in a new issue