]> code.delx.au - pulseaudio/blob - src/modules/bluetooth/sbc/sbc_primitives_neon.c
sbc: ARM NEON optimizations for input permutation in SBC encoder
[pulseaudio] / src / modules / bluetooth / sbc / sbc_primitives_neon.c
1 /*
2 *
3 * Bluetooth low-complexity, subband codec (SBC) library
4 *
5 * Copyright (C) 2004-2009 Marcel Holtmann <marcel@holtmann.org>
6 * Copyright (C) 2004-2005 Henryk Ploetz <henryk@ploetzli.ch>
7 * Copyright (C) 2005-2006 Brad Midgley <bmidgley@xmission.com>
8 *
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 *
24 */
25
26 #include <stdint.h>
27 #include <limits.h>
28 #include "sbc.h"
29 #include "sbc_math.h"
30 #include "sbc_tables.h"
31
32 #include "sbc_primitives_neon.h"
33
34 /*
35 * ARM NEON optimizations
36 */
37
38 #ifdef SBC_BUILD_WITH_NEON_SUPPORT
39
40 static inline void _sbc_analyze_four_neon(const int16_t *in, int32_t *out,
41 const FIXED_T *consts)
42 {
43 /* TODO: merge even and odd cases (or even merge all four calls to this
44 * function) in order to have only aligned reads from 'in' array
45 * and reduce number of load instructions */
46 asm volatile (
47 "vld1.16 {d4, d5}, [%0, :64]!\n"
48 "vld1.16 {d8, d9}, [%1, :128]!\n"
49
50 "vmull.s16 q0, d4, d8\n"
51 "vld1.16 {d6, d7}, [%0, :64]!\n"
52 "vmull.s16 q1, d5, d9\n"
53 "vld1.16 {d10, d11}, [%1, :128]!\n"
54
55 "vmlal.s16 q0, d6, d10\n"
56 "vld1.16 {d4, d5}, [%0, :64]!\n"
57 "vmlal.s16 q1, d7, d11\n"
58 "vld1.16 {d8, d9}, [%1, :128]!\n"
59
60 "vmlal.s16 q0, d4, d8\n"
61 "vld1.16 {d6, d7}, [%0, :64]!\n"
62 "vmlal.s16 q1, d5, d9\n"
63 "vld1.16 {d10, d11}, [%1, :128]!\n"
64
65 "vmlal.s16 q0, d6, d10\n"
66 "vld1.16 {d4, d5}, [%0, :64]!\n"
67 "vmlal.s16 q1, d7, d11\n"
68 "vld1.16 {d8, d9}, [%1, :128]!\n"
69
70 "vmlal.s16 q0, d4, d8\n"
71 "vmlal.s16 q1, d5, d9\n"
72
73 "vpadd.s32 d0, d0, d1\n"
74 "vpadd.s32 d1, d2, d3\n"
75
76 "vrshrn.s32 d0, q0, %3\n"
77
78 "vld1.16 {d2, d3, d4, d5}, [%1, :128]!\n"
79
80 "vdup.i32 d1, d0[1]\n" /* TODO: can be eliminated */
81 "vdup.i32 d0, d0[0]\n" /* TODO: can be eliminated */
82
83 "vmull.s16 q3, d2, d0\n"
84 "vmull.s16 q4, d3, d0\n"
85 "vmlal.s16 q3, d4, d1\n"
86 "vmlal.s16 q4, d5, d1\n"
87
88 "vpadd.s32 d0, d6, d7\n" /* TODO: can be eliminated */
89 "vpadd.s32 d1, d8, d9\n" /* TODO: can be eliminated */
90
91 "vst1.32 {d0, d1}, [%2, :128]\n"
92 : "+r" (in), "+r" (consts)
93 : "r" (out),
94 "i" (SBC_PROTO_FIXED4_SCALE)
95 : "memory",
96 "d0", "d1", "d2", "d3", "d4", "d5",
97 "d6", "d7", "d8", "d9", "d10", "d11");
98 }
99
100 static inline void _sbc_analyze_eight_neon(const int16_t *in, int32_t *out,
101 const FIXED_T *consts)
102 {
103 /* TODO: merge even and odd cases (or even merge all four calls to this
104 * function) in order to have only aligned reads from 'in' array
105 * and reduce number of load instructions */
106 asm volatile (
107 "vld1.16 {d4, d5}, [%0, :64]!\n"
108 "vld1.16 {d8, d9}, [%1, :128]!\n"
109
110 "vmull.s16 q6, d4, d8\n"
111 "vld1.16 {d6, d7}, [%0, :64]!\n"
112 "vmull.s16 q7, d5, d9\n"
113 "vld1.16 {d10, d11}, [%1, :128]!\n"
114 "vmull.s16 q8, d6, d10\n"
115 "vld1.16 {d4, d5}, [%0, :64]!\n"
116 "vmull.s16 q9, d7, d11\n"
117 "vld1.16 {d8, d9}, [%1, :128]!\n"
118
119 "vmlal.s16 q6, d4, d8\n"
120 "vld1.16 {d6, d7}, [%0, :64]!\n"
121 "vmlal.s16 q7, d5, d9\n"
122 "vld1.16 {d10, d11}, [%1, :128]!\n"
123 "vmlal.s16 q8, d6, d10\n"
124 "vld1.16 {d4, d5}, [%0, :64]!\n"
125 "vmlal.s16 q9, d7, d11\n"
126 "vld1.16 {d8, d9}, [%1, :128]!\n"
127
128 "vmlal.s16 q6, d4, d8\n"
129 "vld1.16 {d6, d7}, [%0, :64]!\n"
130 "vmlal.s16 q7, d5, d9\n"
131 "vld1.16 {d10, d11}, [%1, :128]!\n"
132 "vmlal.s16 q8, d6, d10\n"
133 "vld1.16 {d4, d5}, [%0, :64]!\n"
134 "vmlal.s16 q9, d7, d11\n"
135 "vld1.16 {d8, d9}, [%1, :128]!\n"
136
137 "vmlal.s16 q6, d4, d8\n"
138 "vld1.16 {d6, d7}, [%0, :64]!\n"
139 "vmlal.s16 q7, d5, d9\n"
140 "vld1.16 {d10, d11}, [%1, :128]!\n"
141 "vmlal.s16 q8, d6, d10\n"
142 "vld1.16 {d4, d5}, [%0, :64]!\n"
143 "vmlal.s16 q9, d7, d11\n"
144 "vld1.16 {d8, d9}, [%1, :128]!\n"
145
146 "vmlal.s16 q6, d4, d8\n"
147 "vld1.16 {d6, d7}, [%0, :64]!\n"
148 "vmlal.s16 q7, d5, d9\n"
149 "vld1.16 {d10, d11}, [%1, :128]!\n"
150
151 "vmlal.s16 q8, d6, d10\n"
152 "vmlal.s16 q9, d7, d11\n"
153
154 "vpadd.s32 d0, d12, d13\n"
155 "vpadd.s32 d1, d14, d15\n"
156 "vpadd.s32 d2, d16, d17\n"
157 "vpadd.s32 d3, d18, d19\n"
158
159 "vrshr.s32 q0, q0, %3\n"
160 "vrshr.s32 q1, q1, %3\n"
161 "vmovn.s32 d0, q0\n"
162 "vmovn.s32 d1, q1\n"
163
164 "vdup.i32 d3, d1[1]\n" /* TODO: can be eliminated */
165 "vdup.i32 d2, d1[0]\n" /* TODO: can be eliminated */
166 "vdup.i32 d1, d0[1]\n" /* TODO: can be eliminated */
167 "vdup.i32 d0, d0[0]\n" /* TODO: can be eliminated */
168
169 "vld1.16 {d4, d5}, [%1, :128]!\n"
170 "vmull.s16 q6, d4, d0\n"
171 "vld1.16 {d6, d7}, [%1, :128]!\n"
172 "vmull.s16 q7, d5, d0\n"
173 "vmull.s16 q8, d6, d0\n"
174 "vmull.s16 q9, d7, d0\n"
175
176 "vld1.16 {d4, d5}, [%1, :128]!\n"
177 "vmlal.s16 q6, d4, d1\n"
178 "vld1.16 {d6, d7}, [%1, :128]!\n"
179 "vmlal.s16 q7, d5, d1\n"
180 "vmlal.s16 q8, d6, d1\n"
181 "vmlal.s16 q9, d7, d1\n"
182
183 "vld1.16 {d4, d5}, [%1, :128]!\n"
184 "vmlal.s16 q6, d4, d2\n"
185 "vld1.16 {d6, d7}, [%1, :128]!\n"
186 "vmlal.s16 q7, d5, d2\n"
187 "vmlal.s16 q8, d6, d2\n"
188 "vmlal.s16 q9, d7, d2\n"
189
190 "vld1.16 {d4, d5}, [%1, :128]!\n"
191 "vmlal.s16 q6, d4, d3\n"
192 "vld1.16 {d6, d7}, [%1, :128]!\n"
193 "vmlal.s16 q7, d5, d3\n"
194 "vmlal.s16 q8, d6, d3\n"
195 "vmlal.s16 q9, d7, d3\n"
196
197 "vpadd.s32 d0, d12, d13\n" /* TODO: can be eliminated */
198 "vpadd.s32 d1, d14, d15\n" /* TODO: can be eliminated */
199 "vpadd.s32 d2, d16, d17\n" /* TODO: can be eliminated */
200 "vpadd.s32 d3, d18, d19\n" /* TODO: can be eliminated */
201
202 "vst1.32 {d0, d1, d2, d3}, [%2, :128]\n"
203 : "+r" (in), "+r" (consts)
204 : "r" (out),
205 "i" (SBC_PROTO_FIXED8_SCALE)
206 : "memory",
207 "d0", "d1", "d2", "d3", "d4", "d5",
208 "d6", "d7", "d8", "d9", "d10", "d11",
209 "d12", "d13", "d14", "d15", "d16", "d17",
210 "d18", "d19");
211 }
212
213 static inline void sbc_analyze_4b_4s_neon(int16_t *x,
214 int32_t *out, int out_stride)
215 {
216 /* Analyze blocks */
217 _sbc_analyze_four_neon(x + 12, out, analysis_consts_fixed4_simd_odd);
218 out += out_stride;
219 _sbc_analyze_four_neon(x + 8, out, analysis_consts_fixed4_simd_even);
220 out += out_stride;
221 _sbc_analyze_four_neon(x + 4, out, analysis_consts_fixed4_simd_odd);
222 out += out_stride;
223 _sbc_analyze_four_neon(x + 0, out, analysis_consts_fixed4_simd_even);
224 }
225
226 static inline void sbc_analyze_4b_8s_neon(int16_t *x,
227 int32_t *out, int out_stride)
228 {
229 /* Analyze blocks */
230 _sbc_analyze_eight_neon(x + 24, out, analysis_consts_fixed8_simd_odd);
231 out += out_stride;
232 _sbc_analyze_eight_neon(x + 16, out, analysis_consts_fixed8_simd_even);
233 out += out_stride;
234 _sbc_analyze_eight_neon(x + 8, out, analysis_consts_fixed8_simd_odd);
235 out += out_stride;
236 _sbc_analyze_eight_neon(x + 0, out, analysis_consts_fixed8_simd_even);
237 }
238
239 static void sbc_calc_scalefactors_neon(
240 int32_t sb_sample_f[16][2][8],
241 uint32_t scale_factor[2][8],
242 int blocks, int channels, int subbands)
243 {
244 int ch, sb;
245 for (ch = 0; ch < channels; ch++) {
246 for (sb = 0; sb < subbands; sb += 4) {
247 int blk = blocks;
248 int32_t *in = &sb_sample_f[0][ch][sb];
249 asm volatile (
250 "vmov.s32 q0, %[c1]\n"
251 "vmov.s32 q1, %[c1]\n"
252 "1:\n"
253 "vld1.32 {d16, d17}, [%[in], :128], %[inc]\n"
254 "vabs.s32 q8, q8\n"
255 "vld1.32 {d18, d19}, [%[in], :128], %[inc]\n"
256 "vabs.s32 q9, q9\n"
257 "vld1.32 {d20, d21}, [%[in], :128], %[inc]\n"
258 "vabs.s32 q10, q10\n"
259 "vld1.32 {d22, d23}, [%[in], :128], %[inc]\n"
260 "vabs.s32 q11, q11\n"
261 "vcgt.s32 q12, q8, #0\n"
262 "vcgt.s32 q13, q9, #0\n"
263 "vcgt.s32 q14, q10, #0\n"
264 "vcgt.s32 q15, q11, #0\n"
265 "vadd.s32 q8, q8, q12\n"
266 "vadd.s32 q9, q9, q13\n"
267 "vadd.s32 q10, q10, q14\n"
268 "vadd.s32 q11, q11, q15\n"
269 "vorr.s32 q0, q0, q8\n"
270 "vorr.s32 q1, q1, q9\n"
271 "vorr.s32 q0, q0, q10\n"
272 "vorr.s32 q1, q1, q11\n"
273 "subs %[blk], %[blk], #4\n"
274 "bgt 1b\n"
275 "vorr.s32 q0, q0, q1\n"
276 "vmov.s32 q15, %[c2]\n"
277 "vclz.s32 q0, q0\n"
278 "vsub.s32 q0, q15, q0\n"
279 "vst1.32 {d0, d1}, [%[out], :128]\n"
280 :
281 [blk] "+r" (blk),
282 [in] "+r" (in)
283 :
284 [inc] "r" ((char *) &sb_sample_f[1][0][0] -
285 (char *) &sb_sample_f[0][0][0]),
286 [out] "r" (&scale_factor[ch][sb]),
287 [c1] "i" (1 << SCALE_OUT_BITS),
288 [c2] "i" (31 - SCALE_OUT_BITS)
289 : "d0", "d1", "d2", "d3", "d16", "d17", "d18", "d19",
290 "d20", "d21", "d22", "d23", "d24", "d25", "d26",
291 "d27", "d28", "d29", "d30", "d31", "cc", "memory");
292 }
293 }
294 }
295
296 int sbc_calc_scalefactors_j_neon(
297 int32_t sb_sample_f[16][2][8],
298 uint32_t scale_factor[2][8],
299 int blocks, int subbands)
300 {
301 static SBC_ALIGNED int32_t joint_bits_mask[8] = {
302 8, 4, 2, 1, 128, 64, 32, 16
303 };
304 int joint, i;
305 int32_t *in0, *in1;
306 int32_t *in = &sb_sample_f[0][0][0];
307 uint32_t *out0, *out1;
308 uint32_t *out = &scale_factor[0][0];
309 int32_t *consts = joint_bits_mask;
310
311 i = subbands;
312
313 asm volatile (
314 /*
315 * constants: q13 = (31 - SCALE_OUT_BITS), q14 = 1
316 * input: q0 = ((1 << SCALE_OUT_BITS) + 1)
317 * %[in0] - samples for channel 0
318 * %[in1] - samples for shannel 1
319 * output: q0, q1 - scale factors without joint stereo
320 * q2, q3 - scale factors with joint stereo
321 * q15 - joint stereo selection mask
322 */
323 ".macro calc_scalefactors\n"
324 "vmov.s32 q1, q0\n"
325 "vmov.s32 q2, q0\n"
326 "vmov.s32 q3, q0\n"
327 "mov %[i], %[blocks]\n"
328 "1:\n"
329 "vld1.32 {d18, d19}, [%[in1], :128], %[inc]\n"
330 "vbic.s32 q11, q9, q14\n"
331 "vld1.32 {d16, d17}, [%[in0], :128], %[inc]\n"
332 "vhadd.s32 q10, q8, q11\n"
333 "vhsub.s32 q11, q8, q11\n"
334 "vabs.s32 q8, q8\n"
335 "vabs.s32 q9, q9\n"
336 "vabs.s32 q10, q10\n"
337 "vabs.s32 q11, q11\n"
338 "vmax.s32 q0, q0, q8\n"
339 "vmax.s32 q1, q1, q9\n"
340 "vmax.s32 q2, q2, q10\n"
341 "vmax.s32 q3, q3, q11\n"
342 "subs %[i], %[i], #1\n"
343 "bgt 1b\n"
344 "vsub.s32 q0, q0, q14\n"
345 "vsub.s32 q1, q1, q14\n"
346 "vsub.s32 q2, q2, q14\n"
347 "vsub.s32 q3, q3, q14\n"
348 "vclz.s32 q0, q0\n"
349 "vclz.s32 q1, q1\n"
350 "vclz.s32 q2, q2\n"
351 "vclz.s32 q3, q3\n"
352 "vsub.s32 q0, q13, q0\n"
353 "vsub.s32 q1, q13, q1\n"
354 "vsub.s32 q2, q13, q2\n"
355 "vsub.s32 q3, q13, q3\n"
356 ".endm\n"
357 /*
358 * constants: q14 = 1
359 * input: q15 - joint stereo selection mask
360 * %[in0] - value set by calc_scalefactors macro
361 * %[in1] - value set by calc_scalefactors macro
362 */
363 ".macro update_joint_stereo_samples\n"
364 "sub %[out1], %[in1], %[inc]\n"
365 "sub %[out0], %[in0], %[inc]\n"
366 "sub %[in1], %[in1], %[inc], asl #1\n"
367 "sub %[in0], %[in0], %[inc], asl #1\n"
368 "vld1.32 {d18, d19}, [%[in1], :128]\n"
369 "vbic.s32 q11, q9, q14\n"
370 "vld1.32 {d16, d17}, [%[in0], :128]\n"
371 "vld1.32 {d2, d3}, [%[out1], :128]\n"
372 "vbic.s32 q3, q1, q14\n"
373 "vld1.32 {d0, d1}, [%[out0], :128]\n"
374 "vhsub.s32 q10, q8, q11\n"
375 "vhadd.s32 q11, q8, q11\n"
376 "vhsub.s32 q2, q0, q3\n"
377 "vhadd.s32 q3, q0, q3\n"
378 "vbif.s32 q10, q9, q15\n"
379 "vbif.s32 d22, d16, d30\n"
380 "sub %[inc], %[zero], %[inc], asl #1\n"
381 "sub %[i], %[blocks], #2\n"
382 "2:\n"
383 "vbif.s32 d23, d17, d31\n"
384 "vst1.32 {d20, d21}, [%[in1], :128], %[inc]\n"
385 "vbif.s32 d4, d2, d30\n"
386 "vld1.32 {d18, d19}, [%[in1], :128]\n"
387 "vbif.s32 d5, d3, d31\n"
388 "vst1.32 {d22, d23}, [%[in0], :128], %[inc]\n"
389 "vbif.s32 d6, d0, d30\n"
390 "vld1.32 {d16, d17}, [%[in0], :128]\n"
391 "vbif.s32 d7, d1, d31\n"
392 "vst1.32 {d4, d5}, [%[out1], :128], %[inc]\n"
393 "vbic.s32 q11, q9, q14\n"
394 "vld1.32 {d2, d3}, [%[out1], :128]\n"
395 "vst1.32 {d6, d7}, [%[out0], :128], %[inc]\n"
396 "vbic.s32 q3, q1, q14\n"
397 "vld1.32 {d0, d1}, [%[out0], :128]\n"
398 "vhsub.s32 q10, q8, q11\n"
399 "vhadd.s32 q11, q8, q11\n"
400 "vhsub.s32 q2, q0, q3\n"
401 "vhadd.s32 q3, q0, q3\n"
402 "vbif.s32 q10, q9, q15\n"
403 "vbif.s32 d22, d16, d30\n"
404 "subs %[i], %[i], #2\n"
405 "bgt 2b\n"
406 "sub %[inc], %[zero], %[inc], asr #1\n"
407 "vbif.s32 d23, d17, d31\n"
408 "vst1.32 {d20, d21}, [%[in1], :128]\n"
409 "vbif.s32 q2, q1, q15\n"
410 "vst1.32 {d22, d23}, [%[in0], :128]\n"
411 "vbif.s32 q3, q0, q15\n"
412 "vst1.32 {d4, d5}, [%[out1], :128]\n"
413 "vst1.32 {d6, d7}, [%[out0], :128]\n"
414 ".endm\n"
415
416 "vmov.s32 q14, #1\n"
417 "vmov.s32 q13, %[c2]\n"
418
419 "cmp %[i], #4\n"
420 "bne 8f\n"
421
422 "4:\n" /* 4 subbands */
423 "add %[in0], %[in], #0\n"
424 "add %[in1], %[in], #32\n"
425 "add %[out0], %[out], #0\n"
426 "add %[out1], %[out], #32\n"
427 "vmov.s32 q0, %[c1]\n"
428 "vadd.s32 q0, q0, q14\n"
429
430 "calc_scalefactors\n"
431
432 /* check whether to use joint stereo for subbands 0, 1, 2 */
433 "vadd.s32 q15, q0, q1\n"
434 "vadd.s32 q9, q2, q3\n"
435 "vmov.s32 d31[1], %[zero]\n" /* last subband -> no joint */
436 "vld1.32 {d16, d17}, [%[consts], :128]!\n"
437 "vcgt.s32 q15, q15, q9\n"
438
439 /* calculate and save to memory 'joint' variable */
440 /* update and save scale factors to memory */
441 " vand.s32 q8, q8, q15\n"
442 "vbit.s32 q0, q2, q15\n"
443 " vpadd.s32 d16, d16, d17\n"
444 "vbit.s32 q1, q3, q15\n"
445 " vpadd.s32 d16, d16, d16\n"
446 "vst1.32 {d0, d1}, [%[out0], :128]\n"
447 "vst1.32 {d2, d3}, [%[out1], :128]\n"
448 " vst1.32 {d16[0]}, [%[joint]]\n"
449
450 "update_joint_stereo_samples\n"
451 "b 9f\n"
452
453 "8:\n" /* 8 subbands */
454 "add %[in0], %[in], #16\n\n"
455 "add %[in1], %[in], #48\n"
456 "add %[out0], %[out], #16\n\n"
457 "add %[out1], %[out], #48\n"
458 "vmov.s32 q0, %[c1]\n"
459 "vadd.s32 q0, q0, q14\n"
460
461 "calc_scalefactors\n"
462
463 /* check whether to use joint stereo for subbands 4, 5, 6 */
464 "vadd.s32 q15, q0, q1\n"
465 "vadd.s32 q9, q2, q3\n"
466 "vmov.s32 d31[1], %[zero]\n" /* last subband -> no joint */
467 "vld1.32 {d16, d17}, [%[consts], :128]!\n"
468 "vcgt.s32 q15, q15, q9\n"
469
470 /* calculate part of 'joint' variable and save it to d24 */
471 /* update and save scale factors to memory */
472 " vand.s32 q8, q8, q15\n"
473 "vbit.s32 q0, q2, q15\n"
474 " vpadd.s32 d16, d16, d17\n"
475 "vbit.s32 q1, q3, q15\n"
476 "vst1.32 {d0, d1}, [%[out0], :128]\n"
477 "vst1.32 {d2, d3}, [%[out1], :128]\n"
478 " vpadd.s32 d24, d16, d16\n"
479
480 "update_joint_stereo_samples\n"
481
482 "add %[in0], %[in], #0\n"
483 "add %[in1], %[in], #32\n"
484 "add %[out0], %[out], #0\n\n"
485 "add %[out1], %[out], #32\n"
486 "vmov.s32 q0, %[c1]\n"
487 "vadd.s32 q0, q0, q14\n"
488
489 "calc_scalefactors\n"
490
491 /* check whether to use joint stereo for subbands 0, 1, 2, 3 */
492 "vadd.s32 q15, q0, q1\n"
493 "vadd.s32 q9, q2, q3\n"
494 "vld1.32 {d16, d17}, [%[consts], :128]!\n"
495 "vcgt.s32 q15, q15, q9\n"
496
497 /* combine last part of 'joint' with d24 and save to memory */
498 /* update and save scale factors to memory */
499 " vand.s32 q8, q8, q15\n"
500 "vbit.s32 q0, q2, q15\n"
501 " vpadd.s32 d16, d16, d17\n"
502 "vbit.s32 q1, q3, q15\n"
503 " vpadd.s32 d16, d16, d16\n"
504 "vst1.32 {d0, d1}, [%[out0], :128]\n"
505 " vadd.s32 d16, d16, d24\n"
506 "vst1.32 {d2, d3}, [%[out1], :128]\n"
507 " vst1.32 {d16[0]}, [%[joint]]\n"
508
509 "update_joint_stereo_samples\n"
510 "9:\n"
511 ".purgem calc_scalefactors\n"
512 ".purgem update_joint_stereo_samples\n"
513 :
514 [i] "+&r" (i),
515 [in] "+&r" (in),
516 [in0] "=&r" (in0),
517 [in1] "=&r" (in1),
518 [out] "+&r" (out),
519 [out0] "=&r" (out0),
520 [out1] "=&r" (out1),
521 [consts] "+&r" (consts)
522 :
523 [inc] "r" ((char *) &sb_sample_f[1][0][0] -
524 (char *) &sb_sample_f[0][0][0]),
525 [blocks] "r" (blocks),
526 [joint] "r" (&joint),
527 [c1] "i" (1 << SCALE_OUT_BITS),
528 [c2] "i" (31 - SCALE_OUT_BITS),
529 [zero] "r" (0)
530 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
531 "d16", "d17", "d18", "d19", "d20", "d21", "d22",
532 "d23", "d24", "d25", "d26", "d27", "d28", "d29",
533 "d30", "d31", "cc", "memory");
534
535 return joint;
536 }
537
538 #define PERM_BE(a, b, c, d) { \
539 (a * 2) + 1, (a * 2) + 0, \
540 (b * 2) + 1, (b * 2) + 0, \
541 (c * 2) + 1, (c * 2) + 0, \
542 (d * 2) + 1, (d * 2) + 0 \
543 }
544 #define PERM_LE(a, b, c, d) { \
545 (a * 2) + 0, (a * 2) + 1, \
546 (b * 2) + 0, (b * 2) + 1, \
547 (c * 2) + 0, (c * 2) + 1, \
548 (d * 2) + 0, (d * 2) + 1 \
549 }
550
551 static SBC_ALWAYS_INLINE int sbc_enc_process_input_4s_neon_internal(
552 int position,
553 const uint8_t *pcm, int16_t X[2][SBC_X_BUFFER_SIZE],
554 int nsamples, int nchannels, int big_endian)
555 {
556 static SBC_ALIGNED uint8_t perm_be[2][8] = {
557 PERM_BE(7, 3, 6, 4),
558 PERM_BE(0, 2, 1, 5)
559 };
560 static SBC_ALIGNED uint8_t perm_le[2][8] = {
561 PERM_LE(7, 3, 6, 4),
562 PERM_LE(0, 2, 1, 5)
563 };
564 /* handle X buffer wraparound */
565 if (position < nsamples) {
566 int16_t *dst = &X[0][SBC_X_BUFFER_SIZE - 40];
567 int16_t *src = &X[0][position];
568 asm volatile (
569 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
570 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
571 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
572 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
573 "vld1.16 {d0}, [%[src], :64]!\n"
574 "vst1.16 {d0}, [%[dst], :64]!\n"
575 :
576 [dst] "+r" (dst),
577 [src] "+r" (src)
578 : : "memory", "d0", "d1", "d2", "d3");
579 if (nchannels > 1) {
580 dst = &X[1][SBC_X_BUFFER_SIZE - 40];
581 src = &X[1][position];
582 asm volatile (
583 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
584 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
585 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
586 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
587 "vld1.16 {d0}, [%[src], :64]!\n"
588 "vst1.16 {d0}, [%[dst], :64]!\n"
589 :
590 [dst] "+r" (dst),
591 [src] "+r" (src)
592 : : "memory", "d0", "d1", "d2", "d3");
593 }
594 position = SBC_X_BUFFER_SIZE - 40;
595 }
596
597 if ((nchannels > 1) && ((uintptr_t)pcm & 1)) {
598 /* poor 'pcm' alignment */
599 int16_t *x = &X[0][position];
600 int16_t *y = &X[1][position];
601 asm volatile (
602 "vld1.8 {d0, d1}, [%[perm], :128]\n"
603 "1:\n"
604 "sub %[x], %[x], #16\n"
605 "sub %[y], %[y], #16\n"
606 "sub %[position], %[position], #8\n"
607 "vld1.8 {d4, d5}, [%[pcm]]!\n"
608 "vuzp.16 d4, d5\n"
609 "vld1.8 {d20, d21}, [%[pcm]]!\n"
610 "vuzp.16 d20, d21\n"
611 "vswp d5, d20\n"
612 "vtbl.8 d16, {d4, d5}, d0\n"
613 "vtbl.8 d17, {d4, d5}, d1\n"
614 "vtbl.8 d18, {d20, d21}, d0\n"
615 "vtbl.8 d19, {d20, d21}, d1\n"
616 "vst1.16 {d16, d17}, [%[x], :128]\n"
617 "vst1.16 {d18, d19}, [%[y], :128]\n"
618 "subs %[nsamples], %[nsamples], #8\n"
619 "bgt 1b\n"
620 :
621 [x] "+r" (x),
622 [y] "+r" (y),
623 [pcm] "+r" (pcm),
624 [nsamples] "+r" (nsamples),
625 [position] "+r" (position)
626 :
627 [perm] "r" (big_endian ? perm_be : perm_le)
628 : "cc", "memory", "d0", "d1", "d2", "d3", "d4",
629 "d5", "d6", "d7", "d16", "d17", "d18", "d19",
630 "d20", "d21", "d22", "d23");
631 } else if (nchannels > 1) {
632 /* proper 'pcm' alignment */
633 int16_t *x = &X[0][position];
634 int16_t *y = &X[1][position];
635 asm volatile (
636 "vld1.8 {d0, d1}, [%[perm], :128]\n"
637 "1:\n"
638 "sub %[x], %[x], #16\n"
639 "sub %[y], %[y], #16\n"
640 "sub %[position], %[position], #8\n"
641 "vld2.16 {d4, d5}, [%[pcm]]!\n"
642 "vld2.16 {d20, d21}, [%[pcm]]!\n"
643 "vswp d5, d20\n"
644 "vtbl.8 d16, {d4, d5}, d0\n"
645 "vtbl.8 d17, {d4, d5}, d1\n"
646 "vtbl.8 d18, {d20, d21}, d0\n"
647 "vtbl.8 d19, {d20, d21}, d1\n"
648 "vst1.16 {d16, d17}, [%[x], :128]\n"
649 "vst1.16 {d18, d19}, [%[y], :128]\n"
650 "subs %[nsamples], %[nsamples], #8\n"
651 "bgt 1b\n"
652 :
653 [x] "+r" (x),
654 [y] "+r" (y),
655 [pcm] "+r" (pcm),
656 [nsamples] "+r" (nsamples),
657 [position] "+r" (position)
658 :
659 [perm] "r" (big_endian ? perm_be : perm_le)
660 : "cc", "memory", "d0", "d1", "d2", "d3", "d4",
661 "d5", "d6", "d7", "d16", "d17", "d18", "d19",
662 "d20", "d21", "d22", "d23");
663 } else {
664 int16_t *x = &X[0][position];
665 asm volatile (
666 "vld1.8 {d0, d1}, [%[perm], :128]\n"
667 "1:\n"
668 "sub %[x], %[x], #16\n"
669 "sub %[position], %[position], #8\n"
670 "vld1.8 {d4, d5}, [%[pcm]]!\n"
671 "vtbl.8 d16, {d4, d5}, d0\n"
672 "vtbl.8 d17, {d4, d5}, d1\n"
673 "vst1.16 {d16, d17}, [%[x], :128]\n"
674 "subs %[nsamples], %[nsamples], #8\n"
675 "bgt 1b\n"
676 :
677 [x] "+r" (x),
678 [pcm] "+r" (pcm),
679 [nsamples] "+r" (nsamples),
680 [position] "+r" (position)
681 :
682 [perm] "r" (big_endian ? perm_be : perm_le)
683 : "cc", "memory", "d0", "d1", "d2", "d3", "d4",
684 "d5", "d6", "d7", "d16", "d17", "d18", "d19");
685 }
686 return position;
687 }
688
689 static SBC_ALWAYS_INLINE int sbc_enc_process_input_8s_neon_internal(
690 int position,
691 const uint8_t *pcm, int16_t X[2][SBC_X_BUFFER_SIZE],
692 int nsamples, int nchannels, int big_endian)
693 {
694 static SBC_ALIGNED uint8_t perm_be[4][8] = {
695 PERM_BE(15, 7, 14, 8),
696 PERM_BE(13, 9, 12, 10),
697 PERM_BE(11, 3, 6, 0),
698 PERM_BE(5, 1, 4, 2)
699 };
700 static SBC_ALIGNED uint8_t perm_le[4][8] = {
701 PERM_LE(15, 7, 14, 8),
702 PERM_LE(13, 9, 12, 10),
703 PERM_LE(11, 3, 6, 0),
704 PERM_LE(5, 1, 4, 2)
705 };
706 /* handle X buffer wraparound */
707 if (position < nsamples) {
708 int16_t *dst = &X[0][SBC_X_BUFFER_SIZE - 72];
709 int16_t *src = &X[0][position];
710 asm volatile (
711 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
712 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
713 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
714 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
715 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
716 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
717 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
718 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
719 "vld1.16 {d0, d1}, [%[src], :128]!\n"
720 "vst1.16 {d0, d1}, [%[dst], :128]!\n"
721 :
722 [dst] "+r" (dst),
723 [src] "+r" (src)
724 : : "memory", "d0", "d1", "d2", "d3");
725 if (nchannels > 1) {
726 dst = &X[1][SBC_X_BUFFER_SIZE - 72];
727 src = &X[1][position];
728 asm volatile (
729 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
730 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
731 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
732 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
733 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
734 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
735 "vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
736 "vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
737 "vld1.16 {d0, d1}, [%[src], :128]!\n"
738 "vst1.16 {d0, d1}, [%[dst], :128]!\n"
739 :
740 [dst] "+r" (dst),
741 [src] "+r" (src)
742 : : "memory", "d0", "d1", "d2", "d3");
743 }
744 position = SBC_X_BUFFER_SIZE - 72;
745 }
746
747 if ((nchannels > 1) && ((uintptr_t)pcm & 1)) {
748 /* poor 'pcm' alignment */
749 int16_t *x = &X[0][position];
750 int16_t *y = &X[1][position];
751 asm volatile (
752 "vld1.8 {d0, d1, d2, d3}, [%[perm], :128]\n"
753 "1:\n"
754 "sub %[x], %[x], #32\n"
755 "sub %[y], %[y], #32\n"
756 "sub %[position], %[position], #16\n"
757 "vld1.8 {d4, d5, d6, d7}, [%[pcm]]!\n"
758 "vuzp.16 q2, q3\n"
759 "vld1.8 {d20, d21, d22, d23}, [%[pcm]]!\n"
760 "vuzp.16 q10, q11\n"
761 "vswp q3, q10\n"
762 "vtbl.8 d16, {d4, d5, d6, d7}, d0\n"
763 "vtbl.8 d17, {d4, d5, d6, d7}, d1\n"
764 "vtbl.8 d18, {d4, d5, d6, d7}, d2\n"
765 "vtbl.8 d19, {d4, d5, d6, d7}, d3\n"
766 "vst1.16 {d16, d17, d18, d19}, [%[x], :128]\n"
767 "vtbl.8 d16, {d20, d21, d22, d23}, d0\n"
768 "vtbl.8 d17, {d20, d21, d22, d23}, d1\n"
769 "vtbl.8 d18, {d20, d21, d22, d23}, d2\n"
770 "vtbl.8 d19, {d20, d21, d22, d23}, d3\n"
771 "vst1.16 {d16, d17, d18, d19}, [%[y], :128]\n"
772 "subs %[nsamples], %[nsamples], #16\n"
773 "bgt 1b\n"
774 :
775 [x] "+r" (x),
776 [y] "+r" (y),
777 [pcm] "+r" (pcm),
778 [nsamples] "+r" (nsamples),
779 [position] "+r" (position)
780 :
781 [perm] "r" (big_endian ? perm_be : perm_le)
782 : "cc", "memory", "d0", "d1", "d2", "d3", "d4",
783 "d5", "d6", "d7", "d16", "d17", "d18", "d19",
784 "d20", "d21", "d22", "d23");
785 } else if (nchannels > 1) {
786 /* proper 'pcm' alignment */
787 int16_t *x = &X[0][position];
788 int16_t *y = &X[1][position];
789 asm volatile (
790 "vld1.8 {d0, d1, d2, d3}, [%[perm], :128]\n"
791 "1:\n"
792 "sub %[x], %[x], #32\n"
793 "sub %[y], %[y], #32\n"
794 "sub %[position], %[position], #16\n"
795 "vld2.16 {d4, d5, d6, d7}, [%[pcm]]!\n"
796 "vld2.16 {d20, d21, d22, d23}, [%[pcm]]!\n"
797 "vswp q3, q10\n"
798 "vtbl.8 d16, {d4, d5, d6, d7}, d0\n"
799 "vtbl.8 d17, {d4, d5, d6, d7}, d1\n"
800 "vtbl.8 d18, {d4, d5, d6, d7}, d2\n"
801 "vtbl.8 d19, {d4, d5, d6, d7}, d3\n"
802 "vst1.16 {d16, d17, d18, d19}, [%[x], :128]\n"
803 "vtbl.8 d16, {d20, d21, d22, d23}, d0\n"
804 "vtbl.8 d17, {d20, d21, d22, d23}, d1\n"
805 "vtbl.8 d18, {d20, d21, d22, d23}, d2\n"
806 "vtbl.8 d19, {d20, d21, d22, d23}, d3\n"
807 "vst1.16 {d16, d17, d18, d19}, [%[y], :128]\n"
808 "subs %[nsamples], %[nsamples], #16\n"
809 "bgt 1b\n"
810 :
811 [x] "+r" (x),
812 [y] "+r" (y),
813 [pcm] "+r" (pcm),
814 [nsamples] "+r" (nsamples),
815 [position] "+r" (position)
816 :
817 [perm] "r" (big_endian ? perm_be : perm_le)
818 : "cc", "memory", "d0", "d1", "d2", "d3", "d4",
819 "d5", "d6", "d7", "d16", "d17", "d18", "d19",
820 "d20", "d21", "d22", "d23");
821 } else {
822 int16_t *x = &X[0][position];
823 asm volatile (
824 "vld1.8 {d0, d1, d2, d3}, [%[perm], :128]\n"
825 "1:\n"
826 "sub %[x], %[x], #32\n"
827 "sub %[position], %[position], #16\n"
828 "vld1.8 {d4, d5, d6, d7}, [%[pcm]]!\n"
829 "vtbl.8 d16, {d4, d5, d6, d7}, d0\n"
830 "vtbl.8 d17, {d4, d5, d6, d7}, d1\n"
831 "vtbl.8 d18, {d4, d5, d6, d7}, d2\n"
832 "vtbl.8 d19, {d4, d5, d6, d7}, d3\n"
833 "vst1.16 {d16, d17, d18, d19}, [%[x], :128]\n"
834 "subs %[nsamples], %[nsamples], #16\n"
835 "bgt 1b\n"
836 :
837 [x] "+r" (x),
838 [pcm] "+r" (pcm),
839 [nsamples] "+r" (nsamples),
840 [position] "+r" (position)
841 :
842 [perm] "r" (big_endian ? perm_be : perm_le)
843 : "cc", "memory", "d0", "d1", "d2", "d3", "d4",
844 "d5", "d6", "d7", "d16", "d17", "d18", "d19");
845 }
846 return position;
847 }
848
849 #undef PERM_BE
850 #undef PERM_LE
851
852 static int sbc_enc_process_input_4s_be_neon(int position, const uint8_t *pcm,
853 int16_t X[2][SBC_X_BUFFER_SIZE],
854 int nsamples, int nchannels)
855 {
856 return sbc_enc_process_input_4s_neon_internal(
857 position, pcm, X, nsamples, nchannels, 1);
858 }
859
860 static int sbc_enc_process_input_4s_le_neon(int position, const uint8_t *pcm,
861 int16_t X[2][SBC_X_BUFFER_SIZE],
862 int nsamples, int nchannels)
863 {
864 return sbc_enc_process_input_4s_neon_internal(
865 position, pcm, X, nsamples, nchannels, 0);
866 }
867
868 static int sbc_enc_process_input_8s_be_neon(int position, const uint8_t *pcm,
869 int16_t X[2][SBC_X_BUFFER_SIZE],
870 int nsamples, int nchannels)
871 {
872 return sbc_enc_process_input_8s_neon_internal(
873 position, pcm, X, nsamples, nchannels, 1);
874 }
875
876 static int sbc_enc_process_input_8s_le_neon(int position, const uint8_t *pcm,
877 int16_t X[2][SBC_X_BUFFER_SIZE],
878 int nsamples, int nchannels)
879 {
880 return sbc_enc_process_input_8s_neon_internal(
881 position, pcm, X, nsamples, nchannels, 0);
882 }
883
884 void sbc_init_primitives_neon(struct sbc_encoder_state *state)
885 {
886 state->sbc_analyze_4b_4s = sbc_analyze_4b_4s_neon;
887 state->sbc_analyze_4b_8s = sbc_analyze_4b_8s_neon;
888 state->sbc_calc_scalefactors = sbc_calc_scalefactors_neon;
889 state->sbc_calc_scalefactors_j = sbc_calc_scalefactors_j_neon;
890 state->sbc_enc_process_input_4s_le = sbc_enc_process_input_4s_le_neon;
891 state->sbc_enc_process_input_4s_be = sbc_enc_process_input_4s_be_neon;
892 state->sbc_enc_process_input_8s_le = sbc_enc_process_input_8s_le_neon;
893 state->sbc_enc_process_input_8s_be = sbc_enc_process_input_8s_be_neon;
894 state->implementation_info = "NEON";
895 }
896
897 #endif