[FFmpeg-cvslog] checkasm: arm: Make the indentation consistent with other files

Martin Storsjö git at videolan.org
Fri May 15 21:33:19 EEST 2020


ffmpeg | branch: master | Martin Storsjö <martin at martin.st> | Tue May 12 13:44:05 2020 +0300| [89cf9e1fb642f5852afebdf2b534eb31e9410d2d] | committer: Martin Storsjö

checkasm: arm: Make the indentation consistent with other files

This makes it easier to share code with e.g. the dav1d implementation
of checkasm.

Signed-off-by: Martin Storsjö <martin at martin.st>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=89cf9e1fb642f5852afebdf2b534eb31e9410d2d
---

 tests/checkasm/aarch64/checkasm.S | 196 +++++++++++++++++++-------------------
 tests/checkasm/arm/checkasm.S     | 144 ++++++++++++++--------------
 2 files changed, 170 insertions(+), 170 deletions(-)

diff --git a/tests/checkasm/aarch64/checkasm.S b/tests/checkasm/aarch64/checkasm.S
index 89f2b77548..0dbfe8025e 100644
--- a/tests/checkasm/aarch64/checkasm.S
+++ b/tests/checkasm/aarch64/checkasm.S
@@ -23,29 +23,29 @@
 #include "libavutil/aarch64/asm.S"
 
 const register_init, align=4
-    .quad 0x21f86d66c8ca00ce
-    .quad 0x75b6ba21077c48ad
-    .quad 0xed56bb2dcb3c7736
-    .quad 0x8bda43d3fd1a7e06
-    .quad 0xb64a9c9e5d318408
-    .quad 0xdf9a54b303f1d3a3
-    .quad 0x4a75479abd64e097
-    .quad 0x249214109d5d1c88
-    .quad 0x1a1b2550a612b48c
-    .quad 0x79445c159ce79064
-    .quad 0x2eed899d5a28ddcd
-    .quad 0x86b2536fcd8cf636
-    .quad 0xb0856806085e7943
-    .quad 0x3f2bf84fc0fcca4e
-    .quad 0xacbd382dcf5b8de2
-    .quad 0xd229e1f5b281303f
-    .quad 0x71aeaff20b095fd9
-    .quad 0xab63e2e11fa38ed9
+        .quad 0x21f86d66c8ca00ce
+        .quad 0x75b6ba21077c48ad
+        .quad 0xed56bb2dcb3c7736
+        .quad 0x8bda43d3fd1a7e06
+        .quad 0xb64a9c9e5d318408
+        .quad 0xdf9a54b303f1d3a3
+        .quad 0x4a75479abd64e097
+        .quad 0x249214109d5d1c88
+        .quad 0x1a1b2550a612b48c
+        .quad 0x79445c159ce79064
+        .quad 0x2eed899d5a28ddcd
+        .quad 0x86b2536fcd8cf636
+        .quad 0xb0856806085e7943
+        .quad 0x3f2bf84fc0fcca4e
+        .quad 0xacbd382dcf5b8de2
+        .quad 0xd229e1f5b281303f
+        .quad 0x71aeaff20b095fd9
+        .quad 0xab63e2e11fa38ed9
 endconst
 
 
 const error_message
-    .asciz "failed to preserve register"
+        .asciz "failed to preserve register"
 endconst
 
 
@@ -55,103 +55,103 @@ endconst
 #define CLOBBER_STACK ((8*MAX_ARGS + 15) & ~15)
 
 function checkasm_stack_clobber, export=1
-    mov         x3,  sp
-    mov         x2,  #CLOBBER_STACK
+        mov             x3,  sp
+        mov             x2,  #CLOBBER_STACK
 1:
-    stp         x0,  x1,  [sp, #-16]!
-    subs        x2,  x2,  #16
-    b.gt        1b
-    mov         sp,  x3
-    ret
+        stp             x0,  x1,  [sp, #-16]!
+        subs            x2,  x2,  #16
+        b.gt            1b
+        mov             sp,  x3
+        ret
 endfunc
 
 #define ARG_STACK ((8*(MAX_ARGS - 8) + 15) & ~15)
 
 function checkasm_checked_call, export=1
-    stp         x29, x30, [sp, #-16]!
-    mov         x29, sp
-    stp         x19, x20, [sp, #-16]!
-    stp         x21, x22, [sp, #-16]!
-    stp         x23, x24, [sp, #-16]!
-    stp         x25, x26, [sp, #-16]!
-    stp         x27, x28, [sp, #-16]!
-    stp         d8,  d9,  [sp, #-16]!
-    stp         d10, d11, [sp, #-16]!
-    stp         d12, d13, [sp, #-16]!
-    stp         d14, d15, [sp, #-16]!
-
-    movrel      x9, register_init
-    ldp         d8,  d9,  [x9], #16
-    ldp         d10, d11, [x9], #16
-    ldp         d12, d13, [x9], #16
-    ldp         d14, d15, [x9], #16
-    ldp         x19, x20, [x9], #16
-    ldp         x21, x22, [x9], #16
-    ldp         x23, x24, [x9], #16
-    ldp         x25, x26, [x9], #16
-    ldp         x27, x28, [x9], #16
-
-    sub         sp,  sp,  #ARG_STACK
+        stp             x29, x30, [sp, #-16]!
+        mov             x29, sp
+        stp             x19, x20, [sp, #-16]!
+        stp             x21, x22, [sp, #-16]!
+        stp             x23, x24, [sp, #-16]!
+        stp             x25, x26, [sp, #-16]!
+        stp             x27, x28, [sp, #-16]!
+        stp             d8,  d9,  [sp, #-16]!
+        stp             d10, d11, [sp, #-16]!
+        stp             d12, d13, [sp, #-16]!
+        stp             d14, d15, [sp, #-16]!
+
+        movrel          x9, register_init
+        ldp             d8,  d9,  [x9], #16
+        ldp             d10, d11, [x9], #16
+        ldp             d12, d13, [x9], #16
+        ldp             d14, d15, [x9], #16
+        ldp             x19, x20, [x9], #16
+        ldp             x21, x22, [x9], #16
+        ldp             x23, x24, [x9], #16
+        ldp             x25, x26, [x9], #16
+        ldp             x27, x28, [x9], #16
+
+        sub             sp,  sp,  #ARG_STACK
 .equ pos, 0
 .rept MAX_ARGS-8
-    // Skip the first 8 args, that are loaded into registers
-    ldr         x9, [x29, #16 + 8*8 + pos]
-    str         x9, [sp, #pos]
+        // Skip the first 8 args, that are loaded into registers
+        ldr             x9, [x29, #16 + 8*8 + pos]
+        str             x9, [sp, #pos]
 .equ pos, pos + 8
 .endr
 
-    mov         x12, x0
-    ldp         x0,  x1,  [x29, #16]
-    ldp         x2,  x3,  [x29, #32]
-    ldp         x4,  x5,  [x29, #48]
-    ldp         x6,  x7,  [x29, #64]
-    blr         x12
-    add         sp,  sp,  #ARG_STACK
-    stp         x0,  x1,  [sp, #-16]!
-    movrel      x9, register_init
-    movi        v3.8h,  #0
+        mov             x12, x0
+        ldp             x0,  x1,  [x29, #16]
+        ldp             x2,  x3,  [x29, #32]
+        ldp             x4,  x5,  [x29, #48]
+        ldp             x6,  x7,  [x29, #64]
+        blr             x12
+        add             sp,  sp,  #ARG_STACK
+        stp             x0,  x1,  [sp, #-16]!
+        movrel          x9, register_init
+        movi            v3.8h,  #0
 
 .macro check_reg_neon reg1, reg2
-    ldr         q1,  [x9], #16
-    uzp1        v2.2d,  v\reg1\().2d, v\reg2\().2d
-    eor         v1.16b, v1.16b, v2.16b
-    orr         v3.16b, v3.16b, v1.16b
+        ldr             q1,  [x9], #16
+        uzp1            v2.2d,  v\reg1\().2d, v\reg2\().2d
+        eor             v1.16b, v1.16b, v2.16b
+        orr             v3.16b, v3.16b, v1.16b
 .endm
-    check_reg_neon  8,  9
-    check_reg_neon  10, 11
-    check_reg_neon  12, 13
-    check_reg_neon  14, 15
-    uqxtn       v3.8b,  v3.8h
-    umov        x3,  v3.d[0]
+        check_reg_neon  8,  9
+        check_reg_neon  10, 11
+        check_reg_neon  12, 13
+        check_reg_neon  14, 15
+        uqxtn           v3.8b,  v3.8h
+        umov            x3,  v3.d[0]
 
 .macro check_reg reg1, reg2
-    ldp         x0,  x1,  [x9], #16
-    eor         x0,  x0,  \reg1
-    eor         x1,  x1,  \reg2
-    orr         x3,  x3,  x0
-    orr         x3,  x3,  x1
+        ldp             x0,  x1,  [x9], #16
+        eor             x0,  x0,  \reg1
+        eor             x1,  x1,  \reg2
+        orr             x3,  x3,  x0
+        orr             x3,  x3,  x1
 .endm
-    check_reg   x19, x20
-    check_reg   x21, x22
-    check_reg   x23, x24
-    check_reg   x25, x26
-    check_reg   x27, x28
+        check_reg       x19, x20
+        check_reg       x21, x22
+        check_reg       x23, x24
+        check_reg       x25, x26
+        check_reg       x27, x28
 
-    cbz         x3,  0f
+        cbz             x3,  0f
 
-    movrel      x0, error_message
-    bl          X(checkasm_fail_func)
+        movrel          x0, error_message
+        bl              X(checkasm_fail_func)
 0:
-    ldp         x0,  x1,  [sp], #16
-    ldp         d14, d15, [sp], #16
-    ldp         d12, d13, [sp], #16
-    ldp         d10, d11, [sp], #16
-    ldp         d8,  d9,  [sp], #16
-    ldp         x27, x28, [sp], #16
-    ldp         x25, x26, [sp], #16
-    ldp         x23, x24, [sp], #16
-    ldp         x21, x22, [sp], #16
-    ldp         x19, x20, [sp], #16
-    ldp         x29, x30, [sp], #16
-    ret
+        ldp             x0,  x1,  [sp], #16
+        ldp             d14, d15, [sp], #16
+        ldp             d12, d13, [sp], #16
+        ldp             d10, d11, [sp], #16
+        ldp             d8,  d9,  [sp], #16
+        ldp             x27, x28, [sp], #16
+        ldp             x25, x26, [sp], #16
+        ldp             x23, x24, [sp], #16
+        ldp             x21, x22, [sp], #16
+        ldp             x19, x20, [sp], #16
+        ldp             x29, x30, [sp], #16
+        ret
 endfunc
diff --git a/tests/checkasm/arm/checkasm.S b/tests/checkasm/arm/checkasm.S
index 2051b290f6..838c383671 100644
--- a/tests/checkasm/arm/checkasm.S
+++ b/tests/checkasm/arm/checkasm.S
@@ -29,22 +29,22 @@ ELF     .eabi_attribute 10, 0           @ suppress Tag_FP_arch
 #endif
 
 const register_init, align=3
-    .quad 0x21f86d66c8ca00ce
-    .quad 0x75b6ba21077c48ad
-    .quad 0xed56bb2dcb3c7736
-    .quad 0x8bda43d3fd1a7e06
-    .quad 0xb64a9c9e5d318408
-    .quad 0xdf9a54b303f1d3a3
-    .quad 0x4a75479abd64e097
-    .quad 0x249214109d5d1c88
+        .quad 0x21f86d66c8ca00ce
+        .quad 0x75b6ba21077c48ad
+        .quad 0xed56bb2dcb3c7736
+        .quad 0x8bda43d3fd1a7e06
+        .quad 0xb64a9c9e5d318408
+        .quad 0xdf9a54b303f1d3a3
+        .quad 0x4a75479abd64e097
+        .quad 0x249214109d5d1c88
 endconst
 
 const error_message_fpscr
-    .asciz "failed to preserve register FPSCR, changed bits: %x"
+        .asciz "failed to preserve register FPSCR, changed bits: %x"
 error_message_gpr:
-    .asciz "failed to preserve register r%d"
+        .asciz "failed to preserve register r%d"
 error_message_vfp:
-    .asciz "failed to preserve register d%d"
+        .asciz "failed to preserve register d%d"
 endconst
 
 @ max number of args used by any asm function.
@@ -58,107 +58,107 @@ endconst
 .macro clobbercheck variant
 .equ pushed, 4*9
 function checkasm_checked_call_\variant, export=1
-    push        {r4-r11, lr}
+        push            {r4-r11, lr}
 .ifc \variant, vfp
-    vpush       {d8-d15}
-    fmrx        r4,  FPSCR
-    push        {r4}
+        vpush           {d8-d15}
+        fmrx            r4,  FPSCR
+        push            {r4}
 .equ pushed, pushed + 16*4 + 4
 .endif
 
-    movrel      r12, register_init
+        movrel          r12, register_init
 .ifc \variant, vfp
-    vldm        r12, {d8-d15}
+        vldm            r12, {d8-d15}
 .endif
-    ldm         r12, {r4-r11}
+        ldm             r12, {r4-r11}
 
-    sub         sp,  sp,  #ARG_STACK_A
+        sub             sp,  sp,  #ARG_STACK_A
 .equ pos, 0
 .rept MAX_ARGS-4
-    ldr         r12, [sp, #ARG_STACK_A + pushed + 8 + pos]
-    str         r12, [sp, #pos]
+        ldr             r12, [sp, #ARG_STACK_A + pushed + 8 + pos]
+        str             r12, [sp, #pos]
 .equ pos, pos + 4
 .endr
 
-    mov         r12, r0
-    mov         r0,  r2
-    mov         r1,  r3
-    ldrd        r2,  r3,  [sp, #ARG_STACK_A + pushed]
-    blx         r12
-    add         sp,  sp,  #ARG_STACK_A
+        mov             r12, r0
+        mov             r0,  r2
+        mov             r1,  r3
+        ldrd            r2,  r3,  [sp, #ARG_STACK_A + pushed]
+        blx             r12
+        add             sp,  sp,  #ARG_STACK_A
 
-    push        {r0, r1}
-    movrel      r12, register_init
+        push            {r0, r1}
+        movrel          r12, register_init
 .ifc \variant, vfp
 .macro check_reg_vfp, dreg, offset
-    ldrd        r2,  r3,  [r12, #8 * (\offset)]
-    vmov        r0,  lr,  \dreg
-    eor         r2,  r2,  r0
-    eor         r3,  r3,  lr
-    orrs        r2,  r2,  r3
-    bne         4f
+        ldrd            r2,  r3,  [r12, #8 * (\offset)]
+        vmov            r0,  lr,  \dreg
+        eor             r2,  r2,  r0
+        eor             r3,  r3,  lr
+        orrs            r2,  r2,  r3
+        bne             4f
 .endm
 
 .irp n, 8, 9, 10, 11, 12, 13, 14, 15
-    @ keep track of the checked double/SIMD register
-    mov         r1,  #\n
-    check_reg_vfp d\n, \n-8
+        @ keep track of the checked double/SIMD register
+        mov             r1,  #\n
+        check_reg_vfp   d\n, \n-8
 .endr
 .purgem check_reg_vfp
 
-    fmrx        r1,  FPSCR
-    ldr         r3,  [sp, #8]
-    eor         r1,  r1,  r3
-    @ Ignore changes in bits 0-4 and 7
-    bic         r1,  r1,  #0x9f
-    @ Ignore changes in the topmost 5 bits
-    bics        r1,  r1,  #0xf8000000
-    bne         3f
+        fmrx            r1,  FPSCR
+        ldr             r3,  [sp, #8]
+        eor             r1,  r1,  r3
+        @ Ignore changes in bits 0-4 and 7
+        bic             r1,  r1,  #0x9f
+        @ Ignore changes in the topmost 5 bits
+        bics            r1,  r1,  #0xf8000000
+        bne             3f
 .endif
 
-    @ keep track of the checked GPR
-    mov         r1,  #4
+        @ keep track of the checked GPR
+        mov             r1,  #4
 .macro check_reg reg1, reg2=
-    ldrd        r2,  r3,  [r12], #8
-    eors        r2,  r2,  \reg1
-    bne         2f
-    add         r1,  r1,  #1
+        ldrd            r2,  r3,  [r12], #8
+        eors            r2,  r2,  \reg1
+        bne             2f
+        add             r1,  r1,  #1
 .ifnb \reg2
-    eors        r3,  r3,  \reg2
-    bne         2f
+        eors            r3,  r3,  \reg2
+        bne             2f
 .endif
-    add         r1,  r1,  #1
+        add             r1,  r1,  #1
 .endm
-    check_reg   r4,  r5
-    check_reg   r6,  r7
+        check_reg       r4,  r5
+        check_reg       r6,  r7
 @ r9 is a volatile register in the ios ABI
 #ifdef __APPLE__
-    check_reg   r8
+        check_reg       r8
 #else
-    check_reg   r8,  r9
+        check_reg       r8,  r9
 #endif
-    check_reg   r10, r11
+        check_reg       r10, r11
 .purgem check_reg
 
-    b           0f
+        b               0f
 4:
-    movrel      r0, error_message_vfp
-    b           1f
+        movrel          r0, error_message_vfp
+        b               1f
 3:
-    movrel      r0, error_message_fpscr
-    b           1f
+        movrel          r0, error_message_fpscr
+        b               1f
 2:
-    movrel      r0, error_message_gpr
+        movrel          r0, error_message_gpr
 1:
-    blx         X(checkasm_fail_func)
+        blx             X(checkasm_fail_func)
 0:
-    pop         {r0, r1}
+        pop             {r0, r1}
 .ifc \variant, vfp
-    pop         {r2}
-    fmxr        FPSCR, r2
-    vpop        {d8-d15}
+        pop             {r2}
+        fmxr            FPSCR, r2
+        vpop            {d8-d15}
 .endif
-    pop         {r4-r11, pc}
+        pop             {r4-r11, pc}
 endfunc
 .endm
 



More information about the ffmpeg-cvslog mailing list