[FFmpeg-trac] #1227(avcodec:new): crash in ff_put_h264_chroma_mc8_neon

FFmpeg trac at avcodec.org
Fri Sep 21 03:03:59 CEST 2012


#1227: crash in ff_put_h264_chroma_mc8_neon
------------------------------------+-----------------------------------
             Reporter:  elioxia     |                    Owner:
                 Type:  defect      |                   Status:  new
             Priority:  important   |                Component:  avcodec
              Version:  git-master  |               Resolution:
             Keywords:  arm crash   |               Blocked By:
             Blocking:              |  Reproduced by developer:  0
Analyzed by developer:  0           |
------------------------------------+-----------------------------------

Comment (by bruce-wu):

 After investigation, I found what the problem is: read memory outside of
 the bound of the array pointed to by register R1 in MACRO h264_chroma_mc8
 or MACRO h264_chroma_mc4 in libavcodec/arm/h264dsp_neon.S (verion 0.8.10)
 or libavcodec/arm/h264cmc_neon.S(version 0.11.1). I fixed the bug by
 modifying those two macros. Here is updated macros in version 0.8.10:

 /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y)
 */
         .macro  h264_chroma_mc8 type
 function ff_\type\()_h264_chroma_mc8_neon, export=1
         push            {r4-r7, lr}
         ldrd            r4,  [sp, #20]
 .ifc \type,avg
         mov             lr,  r0
 .endif
         pld             [r1]
         pld             [r1, r2]

         muls            r7,  r4,  r5
         rsb             r6,  r7,  r5,  lsl #3
         rsb             ip,  r7,  r4,  lsl #3
         sub             r4,  r7,  r4,  lsl #3
         sub             r4,  r4,  r5,  lsl #3
         add             r4,  r4,  #64

         beq             2f

         add             r5,  r1,  r2

         vdup.8          d0,  r4
         lsl             r4,  r2,  #1
         vdup.8          d1,  ip
         vld1.64         {d4, d5}, [r1], r4
         vdup.8          d2,  r6
         vdup.8          d3,  r7

         vext.8          d5,  d4,  d5,  #1

 1:
         vld1.64         {d6, d7}, [r5], r4
         pld             [r5]
         vmull.u8        q8,  d4,  d0
         vext.8          d7,  d6,  d7,  #1
         vmlal.u8        q8,  d5,  d1
         vld1.64         {d4, d5}, [r1], r4
         vmlal.u8        q8,  d6,  d2
         vext.8          d5,  d4,  d5,  #1
         vmlal.u8        q8,  d7,  d3
         vmull.u8        q9,  d6,  d0
         subs            r3,  r3,  #2
         vmlal.u8        q9,  d7,  d1
         vmlal.u8        q9,  d4,  d2
         vmlal.u8        q9,  d5,  d3
         vrshrn.u16      d16, q8,  #6
         /*vld1.64         {d6, d7}, [r5], r4*/
         pld             [r1]
         vrshrn.u16      d17, q9,  #6
 .ifc \type,avg
         vld1.64         {d20}, [lr,:64], r2
         vld1.64         {d21}, [lr,:64], r2
         vrhadd.u8       q8,  q8,  q10
 .endif
         /*vext.8          d7,  d6,  d7,  #1*/
         vst1.64         {d16}, [r0,:64], r2
         vst1.64         {d17}, [r0,:64], r2
         bgt             1b

         pop             {r4-r7, pc}

 2:      tst             r6,  r6
         add             ip,  ip,  r6
         vdup.8          d0,  r4
         vdup.8          d1,  ip

         beq             4f

         add             r5,  r1,  r2
         lsl             r4,  r2,  #1

 3:
         vld1.64         {d4}, [r1], r4
         vld1.64         {d6}, [r5], r4

         pld             [r5]
         vmull.u8        q8,  d4,  d0
         vmlal.u8        q8,  d6,  d1
         vmull.u8        q9,  d6,  d0
         vmlal.u8        q9,  d4,  d1
         vrshrn.u16      d16, q8,  #6
         vrshrn.u16      d17, q9,  #6
 .ifc \type,avg
         vld1.64         {d20}, [lr,:64], r2
         vld1.64         {d21}, [lr,:64], r2
         vrhadd.u8       q8,  q8,  q10
 .endif
         subs            r3,  r3,  #2
         pld             [r1]
         vst1.64         {d16}, [r0,:64], r2
         vst1.64         {d17}, [r0,:64], r2
         bgt             3b

         pop             {r4-r7, pc}

 4:      vld1.64         {d4, d5}, [r1], r2
         vld1.64         {d6, d7}, [r1], r2
         vext.8          d5,  d4,  d5,  #1
         vext.8          d7,  d6,  d7,  #1

         pld             [r1]
         subs            r3,  r3,  #2
         vmull.u8        q8,  d4,  d0
         vmlal.u8        q8,  d5,  d1
         vmull.u8        q9,  d6,  d0
         vmlal.u8        q9,  d7,  d1
         pld             [r1]
         vrshrn.u16      d16, q8,  #6
         vrshrn.u16      d17, q9,  #6
 .ifc \type,avg
         vld1.64         {d20}, [lr,:64], r2
         vld1.64         {d21}, [lr,:64], r2
         vrhadd.u8       q8,  q8,  q10
 .endif
         vst1.64         {d16}, [r0,:64], r2
         vst1.64         {d17}, [r0,:64], r2
         bgt             4b

         pop             {r4-r7, pc}
 endfunc
         .endm

 /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y)
 */
         .macro  h264_chroma_mc4 type
 function ff_\type\()_h264_chroma_mc4_neon, export=1
         push            {r4-r7, lr}
         ldrd            r4,  [sp, #20]
 .ifc \type,avg
         mov             lr,  r0
 .endif
         pld             [r1]
         pld             [r1, r2]

         muls            r7,  r4,  r5
         rsb             r6,  r7,  r5,  lsl #3
         rsb             ip,  r7,  r4,  lsl #3
         sub             r4,  r7,  r4,  lsl #3
         sub             r4,  r4,  r5,  lsl #3
         add             r4,  r4,  #64

         beq             2f

         add             r5,  r1,  r2

         vdup.8          d0,  r4
         lsl             r4,  r2,  #1
         vdup.8          d1,  ip
         vld1.64         {d4},     [r1], r4
         vdup.8          d2,  r6
         vdup.8          d3,  r7

         vext.8          d5,  d4,  d5,  #1
         vtrn.32         d0,  d1
         vtrn.32         d2,  d3
         vtrn.32         d4,  d5

 1:
         vld1.64         {d6},     [r5], r4
         pld             [r5]
         vext.8          d7,  d6,  d7,  #1
         vmull.u8        q8,  d4,  d0
         vtrn.32         d6,  d7

         vld1.64         {d4},     [r1], r4
         vmlal.u8        q8,  d6,  d2

         vext.8          d5,  d4,  d5,  #1
         vmull.u8        q9,  d6,  d0
         vtrn.32         d4,  d5
         vmlal.u8        q9,  d4,  d2

         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
         vrshrn.u16      d16, q8,  #6
         subs            r3,  r3,  #2
         pld             [r1]
 .ifc \type,avg
         vld1.32         {d20[0]}, [lr,:32], r2
         vld1.32         {d20[1]}, [lr,:32], r2
         vrhadd.u8       d16, d16, d20
 .endif
         vst1.32         {d16[0]}, [r0,:32], r2
         vst1.32         {d16[1]}, [r0,:32], r2
         bgt             1b

         pop             {r4-r7, pc}

 2:      tst             r6,  r6
         add             ip,  ip,  r6
         vdup.8          d0,  r4
         vdup.8          d1,  ip
         vtrn.32         d0,  d1

         beq             4f

         vext.32         d1,  d0,  d1,  #1
         add             r5,  r1,  r2
         lsl             r4,  r2,  #1
         vld1.32         {d4[0]},  [r1], r4

 3:
         vld1.32         {d4[1]},  [r5], r4
         pld             [r5]
         vmull.u8        q8,  d4,  d0
         vld1.32         {d4[0]},  [r1], r4
         vmull.u8        q9,  d4,  d1

         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
         vrshrn.u16      d16, q8,  #6
 .ifc \type,avg
         vld1.32         {d20[0]}, [lr,:32], r2
         vld1.32         {d20[1]}, [lr,:32], r2
         vrhadd.u8       d16, d16, d20
 .endif
         subs            r3,  r3,  #2
         pld             [r1]
         vst1.32         {d16[0]}, [r0,:32], r2
         vst1.32         {d16[1]}, [r0,:32], r2
         bgt             3b

         pop             {r4-r7, pc}

 4:      vld1.64         {d4},     [r1], r2
         vld1.64         {d6},     [r1], r2
         vext.8          d5,  d4,  d5,  #1
         vext.8          d7,  d6,  d7,  #1
         vtrn.32         d4,  d5
         vtrn.32         d6,  d7

         vmull.u8        q8,  d4,  d0
         vmull.u8        q9,  d6,  d0
         subs            r3,  r3,  #2

         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
         pld             [r1]
         vrshrn.u16      d16, q8,  #6
 .ifc \type,avg
         vld1.32         {d20[0]}, [lr,:32], r2
         vld1.32         {d20[1]}, [lr,:32], r2
         vrhadd.u8       d16, d16, d20
 .endif
         pld             [r1]
         vst1.32         {d16[0]}, [r0,:32], r2
         vst1.32         {d16[1]}, [r0,:32], r2
         bgt             4b

         pop             {r4-r7, pc}
 endfunc
         .endm

 As shown in the code, register R1 points to ARRAY  src (type is uint_t*).
 The idea in the modification is to test if register R3 (ARGUMENT h in
 caller of C program) is less or equal zero before reading elements pointed
 to by registe R1. If it is, then skip reading and jump to the end of
 function.

 I tested the code using several videos, and it works. For version 0.11.1,
 the modification is the same.

-- 
Ticket URL: <https://ffmpeg.org/trac/ffmpeg/ticket/1227#comment:8>
FFmpeg <http://ffmpeg.org>
FFmpeg issue tracker


More information about the FFmpeg-trac mailing list