Skip to content

Latest commit

 

History

History
426 lines (389 loc) · 9 KB

dct64_x86_64_float.S

File metadata and controls

426 lines (389 loc) · 9 KB
 
Nov 10, 2019
Nov 10, 2019
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
/*
dct64_x86_64_float: SSE optimized dct64 for x86-64 (float output version)
copyright 1995-2009 by the mpg123 project - free software under the terms of the LGPL 2.1
see COPYING and AUTHORS files in distribution or http://mpg123.org
initially written by Taihei Monma
*/
#include "mangle.h"
#ifdef IS_MSABI
/* short *out0 */
#define ARG0 %r9
/* short *out1 */
#define ARG1 %rdx
/* real *samples */
#define ARG2 %r8
#else
/* real *out0 */
#define ARG0 %rdi
/* real *out1 */
#define ARG1 %rsi
/* real *samples */
#define ARG2 %rdx
#endif
/*
void dct64_real_x86_64(real *out0, real *out1, real *samples);
*/
#ifndef __APPLE__
.section .rodata
#else
.data
#endif
ALIGN32
ASM_NAME(costab_x86_64):
.long 1056974725
.long 1057056395
.long 1057223771
.long 1057485416
.long 1057855544
.long 1058356026
.long 1059019886
.long 1059897405
.long 1061067246
.long 1062657950
.long 1064892987
.long 1066774581
.long 1069414683
.long 1073984175
.long 1079645762
.long 1092815430
.long 1057005197
.long 1057342072
.long 1058087743
.long 1059427869
.long 1061799040
.long 1065862217
.long 1071413542
.long 1084439708
.long 1057128951
.long 1058664893
.long 1063675095
.long 1076102863
.long 1057655764
.long 1067924853
.long 1060439283
.long 0
.text
ALIGN16
.globl ASM_NAME(dct64_real_x86_64)
ASM_NAME(dct64_real_x86_64):
#ifdef IS_MSABI /* should save xmm6-15 */
movq %rcx, ARG0
subq $168, %rsp /* stack alignment + 10 xmm registers */
movaps %xmm6, (%rsp)
movaps %xmm7, 16(%rsp)
movaps %xmm8, 32(%rsp)
movaps %xmm9, 48(%rsp)
movaps %xmm10, 64(%rsp)
movaps %xmm11, 80(%rsp)
movaps %xmm12, 96(%rsp)
movaps %xmm13, 112(%rsp)
movaps %xmm14, 128(%rsp)
movaps %xmm15, 144(%rsp)
#endif
leaq ASM_NAME(costab_x86_64)(%rip), %rcx
MOVUAPS (ARG2), %xmm15
MOVUAPS 16(ARG2), %xmm14
MOVUAPS 112(ARG2), %xmm0
MOVUAPS 96(ARG2), %xmm1
shufps $0x1b, %xmm0, %xmm0
shufps $0x1b, %xmm1, %xmm1
movaps %xmm15, %xmm8
movaps %xmm14, %xmm9
addps %xmm0, %xmm8
addps %xmm1, %xmm9
subps %xmm0, %xmm15
subps %xmm1, %xmm14
MOVUAPS 32(ARG2), %xmm13
MOVUAPS 48(ARG2), %xmm12
MOVUAPS 80(ARG2), %xmm0
MOVUAPS 64(ARG2), %xmm1
shufps $0x1b, %xmm0, %xmm0
shufps $0x1b, %xmm1, %xmm1
movaps %xmm13, %xmm10
movaps %xmm12, %xmm11
addps %xmm0, %xmm10
addps %xmm1, %xmm11
subps %xmm0, %xmm13
subps %xmm1, %xmm12
movaps (%rcx), %xmm0
movaps 16(%rcx), %xmm1
movaps 32(%rcx), %xmm2
movaps 48(%rcx), %xmm3
mulps %xmm0, %xmm15
mulps %xmm1, %xmm14
mulps %xmm2, %xmm13
mulps %xmm3, %xmm12
movaps 64(%rcx), %xmm0
movaps 80(%rcx), %xmm1
pshufd $0x1b, %xmm11, %xmm2
pshufd $0x1b, %xmm10, %xmm3
shufps $0x1b, %xmm13, %xmm13
shufps $0x1b, %xmm12, %xmm12
movaps %xmm8, %xmm11
movaps %xmm9, %xmm10
movaps %xmm14, %xmm4
movaps %xmm15, %xmm5
subps %xmm2, %xmm11
subps %xmm3, %xmm10
subps %xmm13, %xmm14
subps %xmm12, %xmm15
addps %xmm2, %xmm8
addps %xmm3, %xmm9
addps %xmm5, %xmm12
addps %xmm4, %xmm13
mulps %xmm0, %xmm11
mulps %xmm1, %xmm10
mulps %xmm1, %xmm14
mulps %xmm0, %xmm15
movaps 96(%rcx), %xmm0
pshufd $0x1b, %xmm9, %xmm1
pshufd $0x1b, %xmm13, %xmm2
shufps $0x1b, %xmm10, %xmm10
shufps $0x1b, %xmm14, %xmm14
movaps %xmm8, %xmm9
movaps %xmm12, %xmm13
movaps %xmm11, %xmm3
movaps %xmm15, %xmm4
subps %xmm1, %xmm9
subps %xmm2, %xmm13
subps %xmm10, %xmm11
subps %xmm14, %xmm15
addps %xmm1, %xmm8
addps %xmm2, %xmm12
addps %xmm3, %xmm10
addps %xmm4, %xmm14
mulps %xmm0, %xmm9
mulps %xmm0, %xmm13
mulps %xmm0, %xmm11
mulps %xmm0, %xmm15
movaps 112(%rcx), %xmm0
movaps %xmm0, %xmm1
movlhps %xmm1, %xmm1
movaps %xmm8, %xmm2
movaps %xmm9, %xmm3
shufps $0x44, %xmm10, %xmm2
shufps $0xbb, %xmm11, %xmm9
shufps $0xbb, %xmm10, %xmm8
shufps $0x44, %xmm11, %xmm3
movaps %xmm2, %xmm4
movaps %xmm3, %xmm5
subps %xmm8, %xmm2
subps %xmm9, %xmm3
addps %xmm4, %xmm8
addps %xmm5, %xmm9
mulps %xmm1, %xmm2
mulps %xmm1, %xmm3
movaps %xmm8, %xmm10
movaps %xmm9, %xmm11
shufps $0x14, %xmm2, %xmm8
shufps $0xbe, %xmm2, %xmm10
shufps $0x14, %xmm3, %xmm9
shufps $0xbe, %xmm3, %xmm11
movaps %xmm12, %xmm2
movaps %xmm13, %xmm3
shufps $0x44, %xmm14, %xmm2
shufps $0xbb, %xmm15, %xmm13
shufps $0xbb, %xmm14, %xmm12
shufps $0x44, %xmm15, %xmm3
movaps %xmm2, %xmm4
movaps %xmm3, %xmm5
subps %xmm12, %xmm2
subps %xmm13, %xmm3
addps %xmm4, %xmm12
addps %xmm5, %xmm13
mulps %xmm1, %xmm2
mulps %xmm1, %xmm3
movaps %xmm12, %xmm14
movaps %xmm13, %xmm15
shufps $0x14, %xmm2, %xmm12
shufps $0xbe, %xmm2, %xmm14
shufps $0x14, %xmm3, %xmm13
shufps $0xbe, %xmm3, %xmm15
shufps $0xaa, %xmm0, %xmm0
pcmpeqd %xmm1, %xmm1
pslld $31, %xmm1
psllq $32, %xmm1
xorps %xmm1, %xmm0
movaps %xmm8, %xmm1
movaps %xmm10, %xmm2
unpcklps %xmm9, %xmm8
unpckhps %xmm9, %xmm1
unpcklps %xmm11, %xmm10
unpckhps %xmm11, %xmm2
movaps %xmm8, %xmm3
movaps %xmm10, %xmm4
unpcklps %xmm1, %xmm8
unpckhps %xmm1, %xmm3
unpcklps %xmm2, %xmm10
unpckhps %xmm2, %xmm4
movaps %xmm8, %xmm1
movaps %xmm10, %xmm2
subps %xmm3, %xmm1
subps %xmm4, %xmm2
addps %xmm3, %xmm8
addps %xmm4, %xmm10
mulps %xmm0, %xmm1
mulps %xmm0, %xmm2
movaps %xmm8, %xmm9
movaps %xmm10, %xmm11
unpcklps %xmm1, %xmm8
unpckhps %xmm1, %xmm9
unpcklps %xmm2, %xmm10
unpckhps %xmm2, %xmm11
movaps %xmm12, %xmm1
movaps %xmm14, %xmm2
unpcklps %xmm13, %xmm12
unpckhps %xmm13, %xmm1
unpcklps %xmm15, %xmm14
unpckhps %xmm15, %xmm2
movaps %xmm12, %xmm3
movaps %xmm14, %xmm4
unpcklps %xmm1, %xmm12
unpckhps %xmm1, %xmm3
unpcklps %xmm2, %xmm14
unpckhps %xmm2, %xmm4
movaps %xmm12, %xmm1
movaps %xmm14, %xmm2
subps %xmm3, %xmm1
subps %xmm4, %xmm2
addps %xmm3, %xmm12
addps %xmm4, %xmm14
mulps %xmm0, %xmm1
mulps %xmm0, %xmm2
movaps %xmm12, %xmm13
movaps %xmm14, %xmm15
unpcklps %xmm1, %xmm12
unpckhps %xmm1, %xmm13
unpcklps %xmm2, %xmm14
unpckhps %xmm2, %xmm15
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
shufpd $0x2, %xmm8, %xmm0
shufpd $0x2, %xmm9, %xmm1
psrlq $32, %xmm0
psrlq $32, %xmm1
addps %xmm0, %xmm8
addps %xmm1, %xmm9
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
shufpd $0x2, %xmm10, %xmm0
shufpd $0x2, %xmm11, %xmm1
psrlq $32, %xmm0
psrlq $32, %xmm1
addps %xmm0, %xmm10
addps %xmm1, %xmm11
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
shufpd $0x2, %xmm12, %xmm0
shufpd $0x2, %xmm13, %xmm1
psrlq $32, %xmm0
psrlq $32, %xmm1
addps %xmm0, %xmm12
addps %xmm1, %xmm13
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
shufpd $0x2, %xmm14, %xmm0
shufpd $0x2, %xmm15, %xmm1
psrlq $32, %xmm0
psrlq $32, %xmm1
addps %xmm0, %xmm14
addps %xmm1, %xmm15
pshufd $0x78, %xmm9, %xmm0
pshufd $0x78, %xmm11, %xmm1
pshufd $0x78, %xmm13, %xmm2
pshufd $0x78, %xmm15, %xmm3
psrldq $4, %xmm0
psrldq $4, %xmm1
psrldq $4, %xmm2
psrldq $4, %xmm3
addps %xmm0, %xmm9
addps %xmm1, %xmm11
addps %xmm2, %xmm13
addps %xmm3, %xmm15
pshufd $0x78, %xmm10, %xmm0
pshufd $0x78, %xmm14, %xmm1
psrldq $4, %xmm0
psrldq $4, %xmm1
addps %xmm11, %xmm10
addps %xmm15, %xmm14
addps %xmm0, %xmm11
addps %xmm1, %xmm15
movss %xmm8, 1024(ARG0)
movss %xmm10, 896(ARG0)
movss %xmm9, 768(ARG0)
movss %xmm11, 640(ARG0)
movhlps %xmm8, %xmm0
movhlps %xmm10, %xmm1
movhlps %xmm9, %xmm2
movhlps %xmm11, %xmm3
movss %xmm0, 512(ARG0)
movss %xmm1, 384(ARG0)
movss %xmm2, 256(ARG0)
movss %xmm3, 128(ARG0)
pshuflw $0xee, %xmm8, %xmm4
pshuflw $0xee, %xmm10, %xmm5
pshuflw $0xee, %xmm9, %xmm6
pshuflw $0xee, %xmm11, %xmm7
movss %xmm4, (ARG0)
movss %xmm4, (ARG1)
movss %xmm5, 128(ARG1)
movss %xmm6, 256(ARG1)
movss %xmm7, 384(ARG1)
pshuflw $0xee, %xmm0, %xmm0
pshuflw $0xee, %xmm1, %xmm1
pshuflw $0xee, %xmm2, %xmm2
pshuflw $0xee, %xmm3, %xmm3
movss %xmm0, 512(ARG1)
movss %xmm1, 640(ARG1)
movss %xmm2, 768(ARG1)
movss %xmm3, 896(ARG1)
pshufd $0x78, %xmm12, %xmm0
movaps %xmm13, %xmm1
psrldq $4, %xmm0
addps %xmm14, %xmm12
addps %xmm15, %xmm13
addps %xmm1, %xmm14
addps %xmm0, %xmm15
movss %xmm12, 960(ARG0)
movss %xmm14, 832(ARG0)
movss %xmm13, 704(ARG0)
movss %xmm15, 576(ARG0)
movhlps %xmm12, %xmm0
movhlps %xmm14, %xmm1
movhlps %xmm13, %xmm2
movhlps %xmm15, %xmm3
movss %xmm0, 448(ARG0)
movss %xmm1, 320(ARG0)
movss %xmm2, 192(ARG0)
movss %xmm3, 64(ARG0)
pshuflw $0xee, %xmm12, %xmm4
pshuflw $0xee, %xmm14, %xmm5
pshuflw $0xee, %xmm13, %xmm6
pshuflw $0xee, %xmm15, %xmm7
movss %xmm4, 64(ARG1)
movss %xmm5, 192(ARG1)
movss %xmm6, 320(ARG1)
movss %xmm7, 448(ARG1)
pshuflw $0xee, %xmm0, %xmm0
pshuflw $0xee, %xmm1, %xmm1
pshuflw $0xee, %xmm2, %xmm2
pshuflw $0xee, %xmm3, %xmm3
movss %xmm0, 576(ARG1)
movss %xmm1, 704(ARG1)
movss %xmm2, 832(ARG1)
movss %xmm3, 960(ARG1)
#ifdef IS_MSABI
movaps (%rsp), %xmm6
movaps 16(%rsp), %xmm7
movaps 32(%rsp), %xmm8
movaps 48(%rsp), %xmm9
movaps 64(%rsp), %xmm10
movaps 80(%rsp), %xmm11
movaps 96(%rsp), %xmm12
movaps 112(%rsp), %xmm13
movaps 128(%rsp), %xmm14
movaps 144(%rsp), %xmm15
addq $168, %rsp
#endif
ret
NONEXEC_STACK