From patchwork Wed Dec 4 12:34:12 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ragesh Radhakrishnan X-Patchwork-Id: 22018 Return-Path: X-Original-To: linaro@patches.linaro.org Delivered-To: linaro@patches.linaro.org Received: from mail-ve0-f198.google.com (mail-ve0-f198.google.com [209.85.128.198]) by ip-10-151-82-157.ec2.internal (Postfix) with ESMTPS id 0C46323FCB for ; Wed, 4 Dec 2013 12:34:29 +0000 (UTC) Received: by mail-ve0-f198.google.com with SMTP id oy12sf37292824veb.9 for ; Wed, 04 Dec 2013 04:34:28 -0800 (PST) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:mime-version:delivered-to:from:to:cc:subject :date:message-id:x-original-sender:x-original-authentication-results :precedence:mailing-list:list-id:list-post:list-help:list-archive :list-unsubscribe; bh=WpRfJMYEZxQUQRBZX2HBMWTzHVWBcA2SkQXRw5bllsU=; b=PRrDedROnEdEA7MkIAOPD7BBtkJSA+3IiM+qCNRheQ3OzSE3DAo18f4GP39+SJubZt zHoMr7SvwtfHzQnmmcfbCoT2CeyypQHcGndiOOJKlacxgHsfiExbjuERmo9bs3rAilCs yJbU0cd2zN/one+EARRSANGk7BpwNryzfk5vb8lNEUuQIq4aKD7uyYOJneN1RCZLAp/z 2qdxYaoho56hPMKppID/JR0APHWZHyZQS1YVR/kHFRUCjCA/mpbACQwPTCBlb0aii3Cf bb9iBUh7NaNGg5YtKHODAsI2PX0M8sTyPGSCAZFA+0BWV0ObopFk2mQtPQag5G/Wh9Nb Jigg== X-Gm-Message-State: ALoCoQkrchvwCEaHKCM1d7hMXfqDE4A1pWRt9yBGqtPLGR/vC2W2Out8fPqI826fqrewoXTVI8TV X-Received: by 10.58.215.130 with SMTP id oi2mr20255008vec.27.1386160468662; Wed, 04 Dec 2013 04:34:28 -0800 (PST) MIME-Version: 1.0 X-BeenThere: patchwork-forward@linaro.org Received: by 10.49.82.49 with SMTP id f17ls301414qey.54.gmail; Wed, 04 Dec 2013 04:34:28 -0800 (PST) X-Received: by 10.220.196.66 with SMTP id ef2mr60569788vcb.7.1386160468554; Wed, 04 Dec 2013 04:34:28 -0800 (PST) Received: from mail-vc0-f169.google.com (mail-vc0-f169.google.com [209.85.220.169]) by mx.google.com with ESMTPS id e6si698998vct.25.2013.12.04.04.34.28 for (version=TLSv1 cipher=ECDHE-RSA-RC4-SHA bits=128/128); Wed, 04 Dec 2013 04:34:28 -0800 (PST) Received-SPF: neutral (google.com: 209.85.220.169 is neither permitted nor denied by best guess record for domain of patch+caf_=patchwork-forward=linaro.org@linaro.org) client-ip=209.85.220.169; Received: by mail-vc0-f169.google.com with SMTP id hu19so11585433vcb.0 for ; Wed, 04 Dec 2013 04:34:28 -0800 (PST) X-Received: by 10.220.199.5 with SMTP id eq5mr59670638vcb.16.1386160468386; Wed, 04 Dec 2013 04:34:28 -0800 (PST) X-Forwarded-To: patchwork-forward@linaro.org X-Forwarded-For: patch@linaro.org patchwork-forward@linaro.org Delivered-To: patches@linaro.org Received: by 10.220.174.196 with SMTP id u4csp288583vcz; Wed, 4 Dec 2013 04:34:27 -0800 (PST) X-Received: by 10.68.58.137 with SMTP id r9mr17668637pbq.148.1386160467510; Wed, 04 Dec 2013 04:34:27 -0800 (PST) Received: from mail-pb0-f54.google.com (mail-pb0-f54.google.com [209.85.160.54]) by mx.google.com with ESMTPS id f4si26463688pbm.265.2013.12.04.04.34.27 for (version=TLSv1 cipher=ECDHE-RSA-RC4-SHA bits=128/128); Wed, 04 Dec 2013 04:34:27 -0800 (PST) Received-SPF: neutral (google.com: 209.85.160.54 is neither permitted nor denied by best guess record for domain of ragesh.r@linaro.org) client-ip=209.85.160.54; Received: by mail-pb0-f54.google.com with SMTP id un15so23271611pbc.41 for ; Wed, 04 Dec 2013 04:34:27 -0800 (PST) X-Received: by 10.67.24.7 with SMTP id ie7mr60937513pad.112.1386160467021; Wed, 04 Dec 2013 04:34:27 -0800 (PST) Received: from ragesh-Latitude-E6420.LGE.NET ([203.247.149.152]) by mx.google.com with ESMTPSA id ju10sm68559163pbd.33.2013.12.04.04.34.24 for (version=TLSv1.1 cipher=ECDHE-RSA-RC4-SHA bits=128/128); Wed, 04 Dec 2013 04:34:26 -0800 (PST) From: Ragesh Radhakrishnan To: patches@linaro.org Cc: Ragesh Radhakrishnan Subject: [PATCH 3/9] Add Armv8 port for idct_4x4 armv7 implementation Date: Wed, 4 Dec 2013 18:04:12 +0530 Message-Id: <1386160452-3016-1-git-send-email-ragesh.r@linaro.org> X-Mailer: git-send-email 1.7.9.5 X-Removed-Original-Auth: Dkim didn't pass. X-Original-Sender: ragesh.r@linaro.org X-Original-Authentication-Results: mx.google.com; spf=neutral (google.com: 209.85.220.169 is neither permitted nor denied by best guess record for domain of patch+caf_=patchwork-forward=linaro.org@linaro.org) smtp.mail=patch+caf_=patchwork-forward=linaro.org@linaro.org Precedence: list Mailing-list: list patchwork-forward@linaro.org; contact patchwork-forward+owners@linaro.org List-ID: X-Google-Group-Id: 836684582541 List-Post: , List-Help: , List-Archive: List-Unsubscribe: , Armv8 instruction changes ported to idct_4x4 armv7 implementation idct_helper macro changed to accomodate the new instruction and register literals. --- simd/jsimd_arm_neon_64.S | 255 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 255 insertions(+) diff --git a/simd/jsimd_arm_neon_64.S b/simd/jsimd_arm_neon_64.S index f7fec55..f510814 100644 --- a/simd/jsimd_arm_neon_64.S +++ b/simd/jsimd_arm_neon_64.S @@ -1086,3 +1086,258 @@ asm_function jsimd_idct_ifast_neon .unreq TMP3 .unreq TMP4 .endfunc + +/*****************************************************************************/ + +/* + * jsimd_idct_4x4_neon + * + * This function contains inverse-DCT code for getting reduced-size + * 4x4 pixels output from an 8x8 DCT block. It uses the same calculations + * and produces exactly the same output as IJG's original 'jpeg_idct_4x4' + * function from jpeg-6b (jidctred.c). + * + * NOTE: jpeg-8 has an improved implementation of 4x4 inverse-DCT, which + * requires much less arithmetic operations and hence should be faster. + * The primary purpose of this particular NEON optimized function is + * bit exact compatibility with jpeg-6b. + * + * TODO: a bit better instructions scheduling can be achieved by expanding + * idct_helper/transpose_4x4 macros and reordering instructions, + * but readability will suffer somewhat. + */ + +#define CONST_BITS 13 + +#define FIX_0_211164243 (1730) /* FIX(0.211164243) */ +#define FIX_0_509795579 (4176) /* FIX(0.509795579) */ +#define FIX_0_601344887 (4926) /* FIX(0.601344887) */ +#define FIX_0_720959822 (5906) /* FIX(0.720959822) */ +#define FIX_0_765366865 (6270) /* FIX(0.765366865) */ +#define FIX_0_850430095 (6967) /* FIX(0.850430095) */ +#define FIX_0_899976223 (7373) /* FIX(0.899976223) */ +#define FIX_1_061594337 (8697) /* FIX(1.061594337) */ +#define FIX_1_272758580 (10426) /* FIX(1.272758580) */ +#define FIX_1_451774981 (11893) /* FIX(1.451774981) */ +#define FIX_1_847759065 (15137) /* FIX(1.847759065) */ +#define FIX_2_172734803 (17799) /* FIX(2.172734803) */ +#define FIX_2_562915447 (20995) /* FIX(2.562915447) */ +#define FIX_3_624509785 (29692) /* FIX(3.624509785) */ + +.balign 16 +jsimd_idct_4x4_neon_consts: + .short FIX_1_847759065 /* v0.4h[0] */ + .short -FIX_0_765366865 /* v0.4h[1] */ + .short -FIX_0_211164243 /* v0.4h[2] */ + .short FIX_1_451774981 /* v0.4h[3] */ + .short -FIX_2_172734803 /* d1[0] */ + .short FIX_1_061594337 /* d1[1] */ + .short -FIX_0_509795579 /* d1[2] */ + .short -FIX_0_601344887 /* d1[3] */ + .short FIX_0_899976223 /* v2.4h[0] */ + .short FIX_2_562915447 /* v2.4h[1] */ + .short 1 << (CONST_BITS+1) /* v2.4h[2] */ + .short 0 /* v2.4h[3] */ + +.macro idct_helper x4, x6, x8, x10, x12, x14, x16, shift, y26, y27, y28, y29 + smull v28.4s, \x4, v2.4h[2] + smlal v28.4s, \x8, v0.4h[0] + smlal v28.4s, \x14, v0.4h[1] + + smull v26.4s, \x16, v1.4h[2] + smlal v26.4s, \x12, v1.4h[3] + smlal v26.4s, \x10, v2.4h[0] + smlal v26.4s, \x6, v2.4h[1] + + smull v30.4s, \x4, v2.4h[2] + smlsl v30.4s, \x8, v0.4h[0] + smlsl v30.4s, \x14, v0.4h[1] + + smull v24.4s, \x16, v0.4h[2] + smlal v24.4s, \x12, v0.4h[3] + smlal v24.4s, \x10, v1.4h[0] + smlal v24.4s, \x6, v1.4h[1] + + add v20.4s, v28.4s, v26.4s + sub v28.4s, v28.4s, v26.4s + +.if \shift > 16 + srshr v20.4s, v20.4s, #\shift + srshr v28.4s, v28.4s, #\shift + xtn \y26, v20.4s + xtn \y29, v28.4s +.else + rshrn \y26, v20.4s, #\shift + rshrn \y29, v28.4s, #\shift +.endif + + add v20.4s, v30.4s, v24.4s + sub v30.4s, v30.4s, v24.4s + +.if \shift > 16 + srshr v20.4s, v20.4s, #\shift + srshr v30.4s, v30.4s, #\shift + xtn \y27, v20.4s + xtn \y28, v30.4s +.else + rshrn \y27, v20.4s, #\shift + rshrn \y28, v30.4s, #\shift +.endif + +.endm + + +/****************************************************************************** +* +* jsimd_idct_4x4_neon +* +*******************************************************************************/ + + +asm_function jsimd_idct_4x4_neon + + DCT_TABLE .req x0 + COEF_BLOCK .req x1 + OUTPUT_BUF .req x2 + OUTPUT_COL .req x3 + TMP1 .req x0 + TMP2 .req x1 + TMP3 .req x2 + TMP4 .req x15 + + /*vpush {v8.4h-v15.4h}*/ + sub sp,sp,#32 + st1 {v8.4h-v11.4h}, [sp]/* save NEON registers */ + sub sp,sp,#32 + st1 {v12.4h-v15.4h},[sp] + + /* Load constants (v3.4h is just used for padding) */ + adr TMP4, jsimd_idct_4x4_neon_consts + ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [TMP4] + + /* Load all COEF_BLOCK into NEON registers with the following allocation: + * 0 1 2 3 | 4 5 6 7 + * ---------+-------- + * 0 | v4.4h | v5.4h + * 1 | v6.4h | v7.4h + * 2 | v8.4h | v9.4h + * 3 | v10.4h | v11.4h + * 4 | - | - + * 5 | v12.4h | v13.4h + * 6 | v14.4h | v15.4h + * 7 | v16.4h | v17.4h + */ + ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [COEF_BLOCK],32 + ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [COEF_BLOCK],32 + add COEF_BLOCK, COEF_BLOCK, #16 + ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [COEF_BLOCK],32 + ld1 {v16.4h, v17.4h}, [COEF_BLOCK],16 + /* dequantize */ + ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [DCT_TABLE],32 + mul v4.4h, v4.4h, v18.4h + mul v5.4h, v5.4h, v19.4h + ins v4.2d[1],v5.2d[0] /* 128 bit q4 */ + + ld1 {v22.4h, v23.4h, v24.4h, v25.4h}, [DCT_TABLE],32 + + mul v6.4h, v6.4h, v20.4h + mul v7.4h, v7.4h, v21.4h + ins v6.2d[1],v7.2d[0] /* 128 bit q6 */ + + mul v8.4h, v8.4h, v22.4h + mul v9.4h, v9.4h, v23.4h + ins v8.2d[1],v9.2d[0] /* 128 bit q8 */ + + add DCT_TABLE, DCT_TABLE, #16 + ld1 {v26.4h, v27.4h, v28.4h, v29.4h}, [DCT_TABLE],32 + + mul v10.4h, v10.4h, v24.4h + mul v11.4h, v11.4h, v25.4h + ins v10.2d[1],v11.2d[0] /* 128 bit q10 */ + + mul v12.4h, v12.4h, v26.4h + mul v13.4h, v13.4h, v27.4h + ins v12.2d[1],v13.2d[0] /* 128 bit q12 */ + + ld1 {v30.8h}, [DCT_TABLE],16 + + mul v14.4h, v14.4h, v28.4h + mul v15.4h, v15.4h, v29.4h + ins v14.2d[1],v15.2d[0] /* 128 bit q14 */ + + mul v16.4h, v16.4h, v30.4h + mul v17.4h, v17.4h, v31.4h + ins v16.2d[1],v17.2d[0] /* 128 bit q16 */ + /* Pass 1 */ + idct_helper v4.4h, v6.4h, v8.4h, v10.4h, v12.4h, v14.4h, v16.4h, 12, v4.4h, v6.4h, v8.4h, v10.4h + transpose_4x4 v4, v6, v8, v10,v3 + ins v10.2d[1],v11.2d[0] + idct_helper v5.4h, v7.4h, v9.4h, v11.4h, v13.4h, v15.4h, v17.4h, 12, v5.4h, v7.4h, v9.4h, v11.4h + transpose_4x4 v5, v7, v9, v11,v3 + ins v10.2d[1],v11.2d[0] + /* Pass 2 */ + idct_helper v4.4h, v6.4h, v8.4h, v10.4h, v7.4h, v9.4h, v11.4h, 19, v26.4h, v27.4h, v28.4h, v29.4h + transpose_4x4 v26, v27, v28, v29,v3 + + /* Range limit */ + movi v30.8h, #0x80 + ins v26.2d[1],v27.2d[0] + ins v28.2d[1],v29.2d[0] + add v26.8h, v26.8h, v30.8h + add v28.8h, v28.8h, v30.8h + sqxtun v26.8b, v26.8h + sqxtun v27.8b, v28.8h + + /* Store results to the output buffer */ + ldp TMP1,TMP2,[OUTPUT_BUF],16 + ldp TMP3,TMP4,[OUTPUT_BUF] + add TMP1, TMP1, OUTPUT_COL + add TMP2, TMP2, OUTPUT_COL + add TMP3, TMP3, OUTPUT_COL + add TMP4, TMP4, OUTPUT_COL + +#if defined(__ARMEL__) && !RESPECT_STRICT_ALIGNMENT + /* We can use much less instructions on little endian systems if the + * OS kernel is not configured to trap unaligned memory accesses + */ + st1 {v26.s}[0], [TMP1],4 + st1 {v27.s}[0], [TMP3],4 + st1 {v26.s}[1], [TMP2],4 + st1 {v27.s}[1], [TMP4],4 +#else + st1 {v26.b}[0] , [TMP1],1 + st1 {v27.b}[0] , [TMP3],1 + st1 {v26.b}[1] , [TMP1],1 + st1 {v27.b}[1] , [TMP3],1 + st1 {v26.b}[2] , [TMP1],1 + st1 {v27.b}[2] , [TMP3],1 + st1 {v26.b}[3] , [TMP1],1 + st1 {v27.b}[3] , [TMP3],1 + + st1 {v26.b}[4] , [TMP2],1 + st1 {v27.b}[4] , [TMP4],1 + st1 {v26.b}[5] , [TMP2],1 + st1 {v27.b}[5] , [TMP4],1 + st1 {v26.b}[6] , [TMP2],1 + st1 {v27.b}[6] , [TMP4],1 + st1 {v26.b}[7] , [TMP2],1 + st1 {v27.b}[7] , [TMP4],1 +#endif + + /*vpop {v8.4h-v15.4h} ;not available*/ + ld1 {v12.4h-v15.4h},[sp],32 + ld1 {v8.4h-v11.4h},[sp],32 + + blr x30 + + .unreq DCT_TABLE + .unreq COEF_BLOCK + .unreq OUTPUT_BUF + .unreq OUTPUT_COL + .unreq TMP1 + .unreq TMP2 + .unreq TMP3 + .unreq TMP4 +.endfunc + +.purgem idct_helper