@@ -71,8 +71,7 @@ libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES = png.c pngerror.c\
png.h pngconf.h pngdebug.h pnginfo.h pngpriv.h pngstruct.h pngusr.dfa
if PNG_ARM_NEON
-libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES += arm/arm_init.c\
- arm/filter_neon.S
+libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES += arm/arm_init.c
endif
nodist_libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES = pnglibconf.h
@@ -3,7 +3,7 @@
*
* Copyright (c) 2013 Glenn Randers-Pehrson
* Written by Mans Rullgard, 2011.
- * Last changed in libpng 1.6.5 [September 14, 2013]
+ * Last changed by James Yu, Oct.2013, support 64 bits ARM
*
* This code is released under the libpng license.
* For conditions of distribution and use, see the disclaimer
@@ -153,6 +153,302 @@ png_have_neon(png_structp png_ptr)
# error "ALIGNED_MEMORY is required; set: -DPNG_ALIGNED_MEMORY_SUPPORTED"
#endif
+#include <arm_neon.h>
+
+void static inline
+png_read_filter_row_up_neon(png_row_infop row_info, png_bytep row,
+ png_const_bytep prev_row)
+{
+ png_bytep rp = row;
+ png_bytep rp_stop = row + row_info->rowbytes;
+ png_const_bytep pp = prev_row;
+
+ uint8x16_t qrp, qpp;
+
+ for (; rp != rp_stop; rp += 16, pp += 16)
+ {
+ qrp = vld1q_u8(rp);
+ qpp = vld1q_u8(pp);
+ qrp = vaddq_u8(qrp, qpp);
+ vst1q_u8(rp, qrp);
+ }
+}
+
+void static inline
+png_read_filter_row_sub3_neon(png_row_infop row_info, png_bytep row,
+ png_const_bytep prev_row)
+{
+ png_bytep rp = row;
+ png_bytep rp_stop = row + row_info->rowbytes;
+
+ PNG_UNUSED(prev_row)
+
+ uint8x8_t vtmp1, vtmp2;
+ uint8x8x4_t vdest;
+ vdest.val[3] = vdup_n_u8(0);
+
+ uint8x16_t vtmp = vld1q_u8(rp);
+ uint8x8x2_t vrp = *((uint8x8x2_t *)(&vtmp));
+
+ for (; rp != rp_stop;)
+ {
+ vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3);
+ vdest.val[0] = vadd_u8(vdest.val[3], vrp.val[0]);
+ vtmp2 = vext_u8(vrp.val[0], vrp.val[1], 6);
+ vdest.val[1] = vadd_u8(vdest.val[0], vtmp1);
+
+ vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1);
+ vdest.val[2] = vadd_u8(vdest.val[1], vtmp2);
+ vdest.val[3] = vadd_u8(vdest.val[2], vtmp1);
+
+ vtmp = vld1q_u8(rp + 12);
+ vrp = *((uint8x8x2_t *)(&vtmp));
+
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[0])), 0);
+ rp += 3;
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[1])), 0);
+ rp += 3;
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[2])), 0);
+ rp += 3;
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[3])), 0);
+ rp += 3;
+ }
+}
+
+void static inline
+png_read_filter_row_sub4_neon(png_row_infop row_info, png_bytep row,
+ png_const_bytep prev_row)
+{
+ png_bytep rp = row;
+ png_bytep rp_stop = row + row_info->rowbytes;
+
+ PNG_UNUSED(prev_row)
+
+ uint8x8x4_t vdest;
+ vdest.val[3] = vdup_n_u8(0);
+
+ for (; rp != rp_stop; rp += 16)
+ {
+ uint32x2x4_t vtmp = vld4_u32((uint32_t *)rp);
+ uint8x8x4_t vrp = *((uint8x8x4_t *)(&vtmp));
+
+ vdest.val[0] = vadd_u8(vdest.val[3], vrp.val[0]);
+ vdest.val[1] = vadd_u8(vdest.val[0], vrp.val[1]);
+ vdest.val[2] = vadd_u8(vdest.val[1], vrp.val[2]);
+ vdest.val[3] = vadd_u8(vdest.val[2], vrp.val[3]);
+ vst4_lane_u32((uint32_t *)rp, *((uint32x2x4_t *)(&vdest)), 0);
+ }
+}
+
+void static inline
+png_read_filter_row_avg3_neon(png_row_infop row_info, png_bytep row,
+ png_const_bytep prev_row)
+{
+ png_bytep rp = row;
+ png_const_bytep pp = prev_row;
+ png_bytep rp_stop = row + row_info->rowbytes;
+
+ uint8x16_t vtmp;
+ uint8x8x2_t vrp, vpp;
+ uint8x8_t vtmp1, vtmp2, vtmp3;
+ uint8x8x4_t vdest;
+ vdest.val[3] = vdup_n_u8(0);
+
+ vtmp = vld1q_u8(rp);
+ vrp = *((uint8x8x2_t *)(&vtmp));
+
+ for (; rp != rp_stop; pp += 12)
+ {
+ vtmp = vld1q_u8(pp);
+ vpp = *((uint8x8x2_t *)(&vtmp));
+
+ vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3);
+ vdest.val[0] = vhadd_u8(vdest.val[3], vpp.val[0]);
+ vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]);
+
+ vtmp2 = vext_u8(vpp.val[0], vpp.val[1], 3);
+ vtmp3 = vext_u8(vrp.val[0], vrp.val[1], 6);
+ vdest.val[1] = vhadd_u8(vdest.val[0], vtmp2);
+ vdest.val[1] = vadd_u8(vdest.val[1], vtmp1);
+
+ vtmp2 = vext_u8(vpp.val[0], vpp.val[1], 6);
+ vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1);
+
+ vtmp = vld1q_u8(rp + 12);
+ vrp = *((uint8x8x2_t *)(&vtmp));
+
+ vdest.val[2] = vhadd_u8(vdest.val[1], vtmp2);
+ vdest.val[2] = vadd_u8(vdest.val[2], vtmp3);
+
+ vtmp2 = vext_u8(vpp.val[1], vpp.val[1], 1);
+
+ vdest.val[3] = vhadd_u8(vdest.val[2], vtmp2);
+ vdest.val[3] = vadd_u8(vdest.val[3], vtmp1);
+
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[0])), 0);
+ rp += 3;
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[1])), 0);
+ rp += 3;
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[2])), 0);
+ rp += 3;
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[3])), 0);
+ rp += 3;
+ }
+}
+
+void static inline
+png_read_filter_row_avg4_neon(png_row_infop row_info, png_bytep row,
+ png_const_bytep prev_row)
+{
+ png_bytep rp = row;
+ png_bytep rp_stop = row + row_info->rowbytes;
+ png_const_bytep pp = prev_row;
+
+ uint32x2x4_t vtmp;
+ uint8x8x4_t vrp, vpp;
+ uint8x8x4_t vdest;
+ vdest.val[3] = vdup_n_u8(0);
+
+ for (; rp != rp_stop; rp += 16, pp += 16)
+ {
+ vtmp = vld4_u32((uint32_t *)rp);
+ vrp = *((uint8x8x4_t *)(&vtmp));
+ vtmp = vld4_u32((uint32_t *)pp);
+ vpp = *((uint8x8x4_t *)(&vtmp));
+
+ vdest.val[0] = vhadd_u8(vdest.val[3], vpp.val[0]);
+ vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]);
+ vdest.val[1] = vhadd_u8(vdest.val[0], vpp.val[1]);
+ vdest.val[1] = vadd_u8(vdest.val[1], vrp.val[1]);
+ vdest.val[2] = vhadd_u8(vdest.val[1], vpp.val[2]);
+ vdest.val[2] = vadd_u8(vdest.val[2], vrp.val[2]);
+ vdest.val[3] = vhadd_u8(vdest.val[2], vpp.val[3]);
+ vdest.val[3] = vadd_u8(vdest.val[3], vrp.val[3]);
+
+ vst4_lane_u32((uint32_t *)rp, *((uint32x2x4_t *)(&vdest)), 0);
+ }
+}
+
+uint8x8_t static inline paeth(uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint8x8_t d, e;
+ uint16x8_t p1, pa, pb, pc;
+
+ p1 = vaddl_u8(a, b); /* a + b */
+ pc = vaddl_u8(c, c); /* c * 2 */
+ pa = vabdl_u8(b, c); /* pa */
+ pb = vabdl_u8(a, c); /* pb */
+ pc = vabdq_u16(p1, pc); /* pc */
+
+ p1 = vcleq_u16(pa, pb); /* pa <= pb */
+ pa = vcleq_u16(pa, pc); /* pa <= pc */
+ pb = vcleq_u16(pb, pc); /* pb <= pc */
+
+ p1 = vandq_u16(p1, pa); /* pa <= pb && pa <= pc */
+
+ d = vmovn_u16(pb);
+ e = vmovn_u16(p1);
+
+ d = vbsl_u8(d, b, c);
+ e = vbsl_u8(e, a, d);
+
+ return e;
+}
+
+void static inline
+png_read_filter_row_paeth3_neon(png_row_infop row_info, png_bytep row,
+ png_const_bytep prev_row)
+{
+ png_bytep rp = row;
+ png_const_bytep pp = prev_row;
+ png_bytep rp_stop = row + row_info->rowbytes;
+
+ uint8x16_t vtmp;
+ uint8x8x2_t vrp, vpp;
+ uint8x8_t vtmp1, vtmp2, vtmp3;
+ uint8x8_t vlast = vdup_n_u8(0);
+ uint8x8x4_t vdest;
+ vdest.val[3] = vdup_n_u8(0);
+
+ vtmp = vld1q_u8(rp);
+ vrp = *((uint8x8x2_t *)(&vtmp));
+
+ for (; rp != rp_stop; pp += 12)
+ {
+ vtmp = vld1q_u8(pp);
+ vpp = *((uint8x8x2_t *)(&vtmp));
+
+ vdest.val[0] = paeth(vdest.val[3], vpp.val[0], vlast);
+ vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]);
+
+ vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3);
+ vtmp2 = vext_u8(vpp.val[0], vpp.val[1], 3);
+ vdest.val[1] = paeth(vdest.val[0], vtmp2, vpp.val[0]);
+ vdest.val[1] = vadd_u8(vdest.val[1], vtmp1);
+
+ vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 6);
+ vtmp3 = vext_u8(vpp.val[0], vpp.val[1], 6);
+ vdest.val[2] = paeth(vdest.val[1], vtmp3, vtmp2);
+ vdest.val[2] = vadd_u8(vdest.val[2], vtmp1);
+
+ vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1);
+ vtmp2 = vext_u8(vpp.val[1], vpp.val[1], 1);
+
+ vtmp = vld1q_u8(rp + 12);
+ vrp = *((uint8x8x2_t *)(&vtmp));
+
+ vdest.val[3] = paeth(vdest.val[2], vtmp2, vtmp3);
+ vdest.val[3] = vadd_u8(vdest.val[3], vtmp1);
+
+ vlast = vtmp2;
+
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[0])), 0);
+ rp += 3;
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[1])), 0);
+ rp += 3;
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[2])), 0);
+ rp += 3;
+ vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[3])), 0);
+ rp += 3;
+ }
+}
+
+void static inline
+png_read_filter_row_paeth4_neon(png_row_infop row_info, png_bytep row,
+ png_const_bytep prev_row)
+{
+ png_bytep rp = row;
+ png_bytep rp_stop = row + row_info->rowbytes;
+ png_const_bytep pp = prev_row;
+
+ uint32x2x4_t vtmp;
+ uint8x8x4_t vrp, vpp;
+ uint8x8_t vlast = vdup_n_u8(0);
+ uint8x8x4_t vdest;
+ vdest.val[3] = vdup_n_u8(0);
+
+ for (; rp != rp_stop; rp += 16, pp += 16)
+ {
+ vtmp = vld4_u32((uint32_t *)rp);
+ vrp = *((uint8x8x4_t *)(&vtmp));
+ vtmp = vld4_u32((uint32_t *)pp);
+ vpp = *((uint8x8x4_t *)(&vtmp));
+
+ vdest.val[0] = paeth(vdest.val[3], vpp.val[0], vlast);
+ vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]);
+ vdest.val[1] = paeth(vdest.val[0], vpp.val[1], vpp.val[0]);
+ vdest.val[1] = vadd_u8(vdest.val[1], vrp.val[1]);
+ vdest.val[2] = paeth(vdest.val[1], vpp.val[2], vpp.val[1]);
+ vdest.val[2] = vadd_u8(vdest.val[2], vrp.val[2]);
+ vdest.val[3] = paeth(vdest.val[2], vpp.val[3], vpp.val[2]);
+ vdest.val[3] = vadd_u8(vdest.val[3], vrp.val[3]);
+
+ vlast = vpp.val[3];
+
+ vst4_lane_u32((uint32_t *)rp, *((uint32x2x4_t *)(&vdest)), 0);
+ }
+}
+
void
png_init_filter_functions_neon(png_structp pp, unsigned int bpp)
{
deleted file mode 100644
@@ -1,237 +0,0 @@
-
-/* filter_neon.S - NEON optimised filter functions
- *
- * Copyright (c) 2013 Glenn Randers-Pehrson
- * Written by Mans Rullgard, 2011.
- * Last changed in libpng 1.5.17 [July 18, 2013]
- *
- * This code is released under the libpng license.
- * For conditions of distribution and use, see the disclaimer
- * and license in png.h
- */
-
-/* This is required to get the symbol renames, which are #defines, and also
- * includes the definition (or not) of PNG_ARM_NEON_OPT.
- */
-#define PNG_VERSION_INFO_ONLY
-#include "../pngpriv.h"
-
-#if defined(__linux__) && defined(__ELF__)
-.section .note.GNU-stack,"",%progbits /* mark stack as non-executable */
-#endif
-
-#ifdef PNG_READ_SUPPORTED
-#if PNG_ARM_NEON_OPT > 0
-
-#ifdef __ELF__
-# define ELF
-#else
-# define ELF @
-#endif
-
- .arch armv7-a
- .fpu neon
-
-.macro func name, export=0
- .macro endfunc
-ELF .size \name, . - \name
- .endfunc
- .purgem endfunc
- .endm
- .text
- .if \export
- .global \name
- .endif
-ELF .type \name, STT_FUNC
- .func \name
-\name:
-.endm
-
-func png_read_filter_row_sub4_neon, export=1
- ldr r3, [r0, #4] @ rowbytes
- vmov.i8 d3, #0
-1:
- vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
- vadd.u8 d0, d3, d4
- vadd.u8 d1, d0, d5
- vadd.u8 d2, d1, d6
- vadd.u8 d3, d2, d7
- vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r1,:128]!
- subs r3, r3, #16
- bgt 1b
-
- bx lr
-endfunc
-
-func png_read_filter_row_sub3_neon, export=1
- ldr r3, [r0, #4] @ rowbytes
- vmov.i8 d3, #0
- mov r0, r1
- mov r2, #3
- mov r12, #12
- vld1.8 {q11}, [r0], r12
-1:
- vext.8 d5, d22, d23, #3
- vadd.u8 d0, d3, d22
- vext.8 d6, d22, d23, #6
- vadd.u8 d1, d0, d5
- vext.8 d7, d23, d23, #1
- vld1.8 {q11}, [r0], r12
- vst1.32 {d0[0]}, [r1,:32], r2
- vadd.u8 d2, d1, d6
- vst1.32 {d1[0]}, [r1], r2
- vadd.u8 d3, d2, d7
- vst1.32 {d2[0]}, [r1], r2
- vst1.32 {d3[0]}, [r1], r2
- subs r3, r3, #12
- bgt 1b
-
- bx lr
-endfunc
-
-func png_read_filter_row_up_neon, export=1
- ldr r3, [r0, #4] @ rowbytes
-1:
- vld1.8 {q0}, [r1,:128]
- vld1.8 {q1}, [r2,:128]!
- vadd.u8 q0, q0, q1
- vst1.8 {q0}, [r1,:128]!
- subs r3, r3, #16
- bgt 1b
-
- bx lr
-endfunc
-
-func png_read_filter_row_avg4_neon, export=1
- ldr r12, [r0, #4] @ rowbytes
- vmov.i8 d3, #0
-1:
- vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
- vld4.32 {d16[],d17[],d18[],d19[]},[r2,:128]!
- vhadd.u8 d0, d3, d16
- vadd.u8 d0, d0, d4
- vhadd.u8 d1, d0, d17
- vadd.u8 d1, d1, d5
- vhadd.u8 d2, d1, d18
- vadd.u8 d2, d2, d6
- vhadd.u8 d3, d2, d19
- vadd.u8 d3, d3, d7
- vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r1,:128]!
- subs r12, r12, #16
- bgt 1b
-
- bx lr
-endfunc
-
-func png_read_filter_row_avg3_neon, export=1
- push {r4,lr}
- ldr r12, [r0, #4] @ rowbytes
- vmov.i8 d3, #0
- mov r0, r1
- mov r4, #3
- mov lr, #12
- vld1.8 {q11}, [r0], lr
-1:
- vld1.8 {q10}, [r2], lr
- vext.8 d5, d22, d23, #3
- vhadd.u8 d0, d3, d20
- vext.8 d17, d20, d21, #3
- vadd.u8 d0, d0, d22
- vext.8 d6, d22, d23, #6
- vhadd.u8 d1, d0, d17
- vext.8 d18, d20, d21, #6
- vadd.u8 d1, d1, d5
- vext.8 d7, d23, d23, #1
- vld1.8 {q11}, [r0], lr
- vst1.32 {d0[0]}, [r1,:32], r4
- vhadd.u8 d2, d1, d18
- vst1.32 {d1[0]}, [r1], r4
- vext.8 d19, d21, d21, #1
- vadd.u8 d2, d2, d6
- vhadd.u8 d3, d2, d19
- vst1.32 {d2[0]}, [r1], r4
- vadd.u8 d3, d3, d7
- vst1.32 {d3[0]}, [r1], r4
- subs r12, r12, #12
- bgt 1b
-
- pop {r4,pc}
-endfunc
-
-.macro paeth rx, ra, rb, rc
- vaddl.u8 q12, \ra, \rb @ a + b
- vaddl.u8 q15, \rc, \rc @ 2*c
- vabdl.u8 q13, \rb, \rc @ pa
- vabdl.u8 q14, \ra, \rc @ pb
- vabd.u16 q15, q12, q15 @ pc
- vcle.u16 q12, q13, q14 @ pa <= pb
- vcle.u16 q13, q13, q15 @ pa <= pc
- vcle.u16 q14, q14, q15 @ pb <= pc
- vand q12, q12, q13 @ pa <= pb && pa <= pc
- vmovn.u16 d28, q14
- vmovn.u16 \rx, q12
- vbsl d28, \rb, \rc
- vbsl \rx, \ra, d28
-.endm
-
-func png_read_filter_row_paeth4_neon, export=1
- ldr r12, [r0, #4] @ rowbytes
- vmov.i8 d3, #0
- vmov.i8 d20, #0
-1:
- vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
- vld4.32 {d16[],d17[],d18[],d19[]},[r2,:128]!
- paeth d0, d3, d16, d20
- vadd.u8 d0, d0, d4
- paeth d1, d0, d17, d16
- vadd.u8 d1, d1, d5
- paeth d2, d1, d18, d17
- vadd.u8 d2, d2, d6
- paeth d3, d2, d19, d18
- vmov d20, d19
- vadd.u8 d3, d3, d7
- vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r1,:128]!
- subs r12, r12, #16
- bgt 1b
-
- bx lr
-endfunc
-
-func png_read_filter_row_paeth3_neon, export=1
- push {r4,lr}
- ldr r12, [r0, #4] @ rowbytes
- vmov.i8 d3, #0
- vmov.i8 d4, #0
- mov r0, r1
- mov r4, #3
- mov lr, #12
- vld1.8 {q11}, [r0], lr
-1:
- vld1.8 {q10}, [r2], lr
- paeth d0, d3, d20, d4
- vext.8 d5, d22, d23, #3
- vadd.u8 d0, d0, d22
- vext.8 d17, d20, d21, #3
- paeth d1, d0, d17, d20
- vst1.32 {d0[0]}, [r1,:32], r4
- vext.8 d6, d22, d23, #6
- vadd.u8 d1, d1, d5
- vext.8 d18, d20, d21, #6
- paeth d2, d1, d18, d17
- vext.8 d7, d23, d23, #1
- vld1.8 {q11}, [r0], lr
- vst1.32 {d1[0]}, [r1], r4
- vadd.u8 d2, d2, d6
- vext.8 d19, d21, d21, #1
- paeth d3, d2, d19, d18
- vst1.32 {d2[0]}, [r1], r4
- vmov d4, d19
- vadd.u8 d3, d3, d7
- vst1.32 {d3[0]}, [r1], r4
- subs r12, r12, #12
- bgt 1b
-
- pop {r4,pc}
-endfunc
-#endif /* PNG_ARM_NEON_OPT > 0 */
-#endif /* PNG_READ_SUPPORTED */
@@ -103,9 +103,9 @@
* it is necessary to ensure that all extern functions that *might* be used
* regardless of $(CFLAGS) get declared in this file. The test on __ARM_NEON__
* below is one example of this behavior because it is controlled by the
- * presence or not of -mfpu=neon on the GCC command line, it is possible to do
- * this in $(CC), e.g. "CC=gcc -mfpu=neon", but people who build libpng rarely
- * do this.
+ * presence or not of -mfpu=neon -O3 on the GCC command line, it is possible to
+ * do this in $(CC), e.g. "CC=gcc -mfpu=neon -O3", but people who build libpng
+ * rarely do this.
*/
#ifndef PNG_ARM_NEON_OPT
/* ARM NEON optimizations are being controlled by the compiler settings,
@@ -1140,21 +1140,6 @@ PNG_INTERNAL_FUNCTION(void,png_do_write_interlace,(png_row_infop row_info,
PNG_INTERNAL_FUNCTION(void,png_read_filter_row,(png_structrp pp, png_row_infop
row_info, png_bytep row, png_const_bytep prev_row, int filter),PNG_EMPTY);
-PNG_INTERNAL_FUNCTION(void,png_read_filter_row_up_neon,(png_row_infop row_info,
- png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
-PNG_INTERNAL_FUNCTION(void,png_read_filter_row_sub3_neon,(png_row_infop
- row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
-PNG_INTERNAL_FUNCTION(void,png_read_filter_row_sub4_neon,(png_row_infop
- row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
-PNG_INTERNAL_FUNCTION(void,png_read_filter_row_avg3_neon,(png_row_infop
- row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
-PNG_INTERNAL_FUNCTION(void,png_read_filter_row_avg4_neon,(png_row_infop
- row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
-PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth3_neon,(png_row_infop
- row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
-PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth4_neon,(png_row_infop
- row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
-
/* Choose the best filter to use and filter the row data */
PNG_INTERNAL_FUNCTION(void,png_write_find_filter,(png_structrp png_ptr,
png_row_infop row_info),PNG_EMPTY);
From: James Yu <james.yu@linaro.org> Using NEON intrinsics instead of NEON hand code assembler. Support both 32 and 64 bits ARM. --- Makefile.am | 3 +- arm/arm_init.c | 298 ++++++++++++++++++++++++++++++++++++++++++++++++++++- arm/filter_neon.S | 237 ------------------------------------------ pngpriv.h | 21 +--- 4 files changed, 301 insertions(+), 258 deletions(-) delete mode 100644 arm/filter_neon.S