Skip to content

Latest commit

 

History

History
244 lines (216 loc) · 5.01 KB

synth_x86_64.S

File metadata and controls

244 lines (216 loc) · 5.01 KB
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
/*
synth_x86_64: SSE optimized synth for x86-64
copyright 1995-2009 by the mpg123 project - free software under the terms of the LGPL 2.1
see COPYING and AUTHORS files in distribution or http://mpg123.org
initially written by Taihei Monma
*/
#include "mangle.h"
#ifdef IS_MSABI
/* short *window; */
#define ARG0 %r10
/* short *b0; */
#define ARG1 %rdx
/* short *samples; */
#define ARG2 %r8
/* int bo1; */
#define ARG3 %r9
#else
/* short *window; */
#define ARG0 %rdi
/* short *b0; */
#define ARG1 %rsi
/* short *samples; */
#define ARG2 %rdx
/* int bo1; */
#define ARG3 %rcx
#endif
#define XMMREG_CLIP %xmm15
#define XMMREG_MAX %xmm14 /* {32767, 32767, 32767, 32767} */
#define XMMREG_MIN %xmm13 /* {-32769, -32769, -32769, -32769} : not -32768 because SSE doesn't have "less than" comparison... */
#define XMMREG_FULL %xmm12 /* {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF} */
/*
int synth_1to1_x86_64_asm(short *window, short *b0, short *samples, int bo1);
return value: number of clipped samples
*/
#ifndef __APPLE__
.section .rodata
#else
.data
#endif
ALIGN32
ASM_NAME(maxmin_x86_64):
.long 32767
.long 32767
.long 32767
.long 32767
.long -32769
.long -32769
.long -32769
.long -32769
.text
ALIGN16
.globl ASM_NAME(synth_1to1_x86_64_asm)
ASM_NAME(synth_1to1_x86_64_asm):
#ifdef IS_MSABI /* should save xmm6-15 */
movq %rcx, ARG0
subq $104, %rsp /* stack alignment + 6 xmm registers */
movaps %xmm6, (%rsp)
movaps %xmm7, 16(%rsp)
movaps %xmm12, 32(%rsp)
movaps %xmm13, 48(%rsp)
movaps %xmm14, 64(%rsp)
movaps %xmm15, 80(%rsp)
#endif
leaq ASM_NAME(maxmin_x86_64)(%rip), %rax
movaps (%rax), XMMREG_MAX
movaps 16(%rax), XMMREG_MIN
pxor XMMREG_CLIP, XMMREG_CLIP
pcmpeqd XMMREG_FULL, XMMREG_FULL
andq $0xf, ARG3
shlq $1, ARG3
leaq 32(ARG0), ARG0
subq ARG3, ARG0
movl $4, %ecx
ALIGN16
1:
movups (ARG0), %xmm0
movups 16(ARG0), %xmm1
movups 64(ARG0), %xmm2
movups 80(ARG0), %xmm3
movups 128(ARG0), %xmm4
movups 144(ARG0), %xmm5
movups 192(ARG0), %xmm6
movups 208(ARG0), %xmm7
pmaddwd (ARG1), %xmm0
pmaddwd 16(ARG1), %xmm1
pmaddwd 32(ARG1), %xmm2
pmaddwd 48(ARG1), %xmm3
pmaddwd 64(ARG1), %xmm4
pmaddwd 80(ARG1), %xmm5
pmaddwd 96(ARG1), %xmm6
pmaddwd 112(ARG1), %xmm7
paddd %xmm1, %xmm0
paddd %xmm3, %xmm2
paddd %xmm5, %xmm4
paddd %xmm7, %xmm6
movaps %xmm0, %xmm1
movaps %xmm4, %xmm3
punpckldq %xmm2, %xmm0
punpckldq %xmm6, %xmm4
punpckhdq %xmm2, %xmm1
punpckhdq %xmm6, %xmm3
movaps %xmm0, %xmm5
movaps %xmm1, %xmm7
movlhps %xmm4, %xmm0
movhlps %xmm5, %xmm4
movlhps %xmm3, %xmm1
movhlps %xmm7, %xmm3
paddd %xmm4, %xmm0
paddd %xmm3, %xmm1
paddd %xmm1, %xmm0
psrad $13, %xmm0
movups (ARG2), %xmm3
movaps %xmm0, %xmm1
movaps %xmm0, %xmm2
packssdw %xmm0, %xmm0
pcmpgtd XMMREG_MAX, %xmm1
pcmpgtd XMMREG_MIN, %xmm2
movhlps %xmm3, %xmm4
pshuflw $0xdd, %xmm3, %xmm3
pshuflw $0xdd, %xmm4, %xmm4
psrlq $32, %xmm3
psllq $32, %xmm4
por %xmm4, %xmm3
punpcklwd %xmm3, %xmm0
movups %xmm0, (ARG2)
pxor XMMREG_FULL, %xmm2
psrld $31, %xmm1
psrld $31, %xmm2
paddd %xmm2, %xmm1
paddd %xmm1, XMMREG_CLIP
leaq 256(ARG0), ARG0
leaq 128(ARG1), ARG1
leaq 16(ARG2), ARG2
decl %ecx
jnz 1b
movl $4, %ecx
ALIGN16
1:
movups (ARG0), %xmm0
movups 16(ARG0), %xmm1
movups 64(ARG0), %xmm2
movups 80(ARG0), %xmm3
movups 128(ARG0), %xmm4
movups 144(ARG0), %xmm5
movups 192(ARG0), %xmm6
movups 208(ARG0), %xmm7
pmaddwd (ARG1), %xmm0
pmaddwd 16(ARG1), %xmm1
pmaddwd -32(ARG1), %xmm2
pmaddwd -16(ARG1), %xmm3
pmaddwd -64(ARG1), %xmm4
pmaddwd -48(ARG1), %xmm5
pmaddwd -96(ARG1), %xmm6
pmaddwd -80(ARG1), %xmm7
paddd %xmm1, %xmm0
paddd %xmm3, %xmm2
paddd %xmm5, %xmm4
paddd %xmm7, %xmm6
movaps %xmm0, %xmm1
movaps %xmm4, %xmm3
punpckldq %xmm2, %xmm0
punpckldq %xmm6, %xmm4
punpckhdq %xmm2, %xmm1
punpckhdq %xmm6, %xmm3
movaps %xmm0, %xmm5
movaps %xmm1, %xmm7
movlhps %xmm4, %xmm0
movhlps %xmm5, %xmm4
movlhps %xmm3, %xmm1
movhlps %xmm7, %xmm3
paddd %xmm4, %xmm0
paddd %xmm3, %xmm1
paddd %xmm1, %xmm0
psrad $13, %xmm0
movups (ARG2), %xmm3
movaps %xmm0, %xmm1
movaps %xmm0, %xmm2
packssdw %xmm0, %xmm0
pcmpgtd XMMREG_MAX, %xmm1
pcmpgtd XMMREG_MIN, %xmm2
movhlps %xmm3, %xmm4
pshuflw $0xdd, %xmm3, %xmm3
pshuflw $0xdd, %xmm4, %xmm4
psrlq $32, %xmm3
psllq $32, %xmm4
por %xmm4, %xmm3
punpcklwd %xmm3, %xmm0
movups %xmm0, (ARG2)
pxor XMMREG_FULL, %xmm2
psrld $31, %xmm1
psrld $31, %xmm2
paddd %xmm2, %xmm1
paddd %xmm1, XMMREG_CLIP
leaq 256(ARG0), ARG0
leaq -128(ARG1), ARG1
leaq 16(ARG2), ARG2
decl %ecx
jnz 1b
pshuflw $0xee, XMMREG_CLIP, %xmm0
movhlps XMMREG_CLIP, %xmm1
pshuflw $0xee, %xmm1, %xmm2
paddd %xmm0, XMMREG_CLIP
paddd %xmm1, XMMREG_CLIP
paddd %xmm2, XMMREG_CLIP
movd XMMREG_CLIP, %eax
#ifdef IS_MSABI
movaps (%rsp), %xmm6
movaps 16(%rsp), %xmm7
movaps 32(%rsp), %xmm12
movaps 48(%rsp), %xmm13
movaps 64(%rsp), %xmm14
movaps 80(%rsp), %xmm15
addq $104, %rsp
#endif
ret
NONEXEC_STACK