V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instruction-codes-x64.h
1
// Copyright 2014 the V8 project authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
#ifndef V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
6
#define V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
7
8
namespace
v8
{
9
namespace
internal {
10
namespace
compiler {
11
12
// X64-specific opcodes that specify which assembly sequence to emit.
13
// Most opcodes specify a single instruction.
14
#define TARGET_ARCH_OPCODE_LIST(V) \
15
V(X64Add) \
16
V(X64Add32) \
17
V(X64And) \
18
V(X64And32) \
19
V(X64Cmp) \
20
V(X64Cmp32) \
21
V(X64Cmp16) \
22
V(X64Cmp8) \
23
V(X64Test) \
24
V(X64Test32) \
25
V(X64Test16) \
26
V(X64Test8) \
27
V(X64Or) \
28
V(X64Or32) \
29
V(X64Xor) \
30
V(X64Xor32) \
31
V(X64Sub) \
32
V(X64Sub32) \
33
V(X64Imul) \
34
V(X64Imul32) \
35
V(X64ImulHigh32) \
36
V(X64UmulHigh32) \
37
V(X64Idiv) \
38
V(X64Idiv32) \
39
V(X64Udiv) \
40
V(X64Udiv32) \
41
V(X64Not) \
42
V(X64Not32) \
43
V(X64Neg) \
44
V(X64Neg32) \
45
V(X64Shl) \
46
V(X64Shl32) \
47
V(X64Shr) \
48
V(X64Shr32) \
49
V(X64Sar) \
50
V(X64Sar32) \
51
V(X64Ror) \
52
V(X64Ror32) \
53
V(X64Lzcnt) \
54
V(X64Lzcnt32) \
55
V(X64Tzcnt) \
56
V(X64Tzcnt32) \
57
V(X64Popcnt) \
58
V(X64Popcnt32) \
59
V(X64Bswap) \
60
V(X64Bswap32) \
61
V(LFence) \
62
V(SSEFloat32Cmp) \
63
V(SSEFloat32Add) \
64
V(SSEFloat32Sub) \
65
V(SSEFloat32Mul) \
66
V(SSEFloat32Div) \
67
V(SSEFloat32Abs) \
68
V(SSEFloat32Neg) \
69
V(SSEFloat32Sqrt) \
70
V(SSEFloat32ToFloat64) \
71
V(SSEFloat32ToInt32) \
72
V(SSEFloat32ToUint32) \
73
V(SSEFloat32Round) \
74
V(SSEFloat64Cmp) \
75
V(SSEFloat64Add) \
76
V(SSEFloat64Sub) \
77
V(SSEFloat64Mul) \
78
V(SSEFloat64Div) \
79
V(SSEFloat64Mod) \
80
V(SSEFloat64Abs) \
81
V(SSEFloat64Neg) \
82
V(SSEFloat64Sqrt) \
83
V(SSEFloat64Round) \
84
V(SSEFloat32Max) \
85
V(SSEFloat64Max) \
86
V(SSEFloat32Min) \
87
V(SSEFloat64Min) \
88
V(SSEFloat64ToFloat32) \
89
V(SSEFloat64ToInt32) \
90
V(SSEFloat64ToUint32) \
91
V(SSEFloat32ToInt64) \
92
V(SSEFloat64ToInt64) \
93
V(SSEFloat32ToUint64) \
94
V(SSEFloat64ToUint64) \
95
V(SSEInt32ToFloat64) \
96
V(SSEInt32ToFloat32) \
97
V(SSEInt64ToFloat32) \
98
V(SSEInt64ToFloat64) \
99
V(SSEUint64ToFloat32) \
100
V(SSEUint64ToFloat64) \
101
V(SSEUint32ToFloat64) \
102
V(SSEUint32ToFloat32) \
103
V(SSEFloat64ExtractLowWord32) \
104
V(SSEFloat64ExtractHighWord32) \
105
V(SSEFloat64InsertLowWord32) \
106
V(SSEFloat64InsertHighWord32) \
107
V(SSEFloat64LoadLowWord32) \
108
V(SSEFloat64SilenceNaN) \
109
V(AVXFloat32Cmp) \
110
V(AVXFloat32Add) \
111
V(AVXFloat32Sub) \
112
V(AVXFloat32Mul) \
113
V(AVXFloat32Div) \
114
V(AVXFloat64Cmp) \
115
V(AVXFloat64Add) \
116
V(AVXFloat64Sub) \
117
V(AVXFloat64Mul) \
118
V(AVXFloat64Div) \
119
V(AVXFloat64Abs) \
120
V(AVXFloat64Neg) \
121
V(AVXFloat32Abs) \
122
V(AVXFloat32Neg) \
123
V(X64Movsxbl) \
124
V(X64Movzxbl) \
125
V(X64Movsxbq) \
126
V(X64Movzxbq) \
127
V(X64Movb) \
128
V(X64Movsxwl) \
129
V(X64Movzxwl) \
130
V(X64Movsxwq) \
131
V(X64Movzxwq) \
132
V(X64Movw) \
133
V(X64Movl) \
134
V(X64Movsxlq) \
135
V(X64MovqDecompressTaggedSigned) \
136
V(X64MovqDecompressTaggedPointer) \
137
V(X64MovqDecompressAnyTagged) \
138
V(X64Movq) \
139
V(X64Movsd) \
140
V(X64Movss) \
141
V(X64Movdqu) \
142
V(X64BitcastFI) \
143
V(X64BitcastDL) \
144
V(X64BitcastIF) \
145
V(X64BitcastLD) \
146
V(X64Lea32) \
147
V(X64Lea) \
148
V(X64Dec32) \
149
V(X64Inc32) \
150
V(X64Push) \
151
V(X64Poke) \
152
V(X64Peek) \
153
V(X64StackCheck) \
154
V(X64F32x4Splat) \
155
V(X64F32x4ExtractLane) \
156
V(X64F32x4ReplaceLane) \
157
V(X64F32x4SConvertI32x4) \
158
V(X64F32x4UConvertI32x4) \
159
V(X64F32x4Abs) \
160
V(X64F32x4Neg) \
161
V(X64F32x4RecipApprox) \
162
V(X64F32x4RecipSqrtApprox) \
163
V(X64F32x4Add) \
164
V(X64F32x4AddHoriz) \
165
V(X64F32x4Sub) \
166
V(X64F32x4Mul) \
167
V(X64F32x4Min) \
168
V(X64F32x4Max) \
169
V(X64F32x4Eq) \
170
V(X64F32x4Ne) \
171
V(X64F32x4Lt) \
172
V(X64F32x4Le) \
173
V(X64I32x4Splat) \
174
V(X64I32x4ExtractLane) \
175
V(X64I32x4ReplaceLane) \
176
V(X64I32x4SConvertF32x4) \
177
V(X64I32x4SConvertI16x8Low) \
178
V(X64I32x4SConvertI16x8High) \
179
V(X64I32x4Neg) \
180
V(X64I32x4Shl) \
181
V(X64I32x4ShrS) \
182
V(X64I32x4Add) \
183
V(X64I32x4AddHoriz) \
184
V(X64I32x4Sub) \
185
V(X64I32x4Mul) \
186
V(X64I32x4MinS) \
187
V(X64I32x4MaxS) \
188
V(X64I32x4Eq) \
189
V(X64I32x4Ne) \
190
V(X64I32x4GtS) \
191
V(X64I32x4GeS) \
192
V(X64I32x4UConvertF32x4) \
193
V(X64I32x4UConvertI16x8Low) \
194
V(X64I32x4UConvertI16x8High) \
195
V(X64I32x4ShrU) \
196
V(X64I32x4MinU) \
197
V(X64I32x4MaxU) \
198
V(X64I32x4GtU) \
199
V(X64I32x4GeU) \
200
V(X64I16x8Splat) \
201
V(X64I16x8ExtractLane) \
202
V(X64I16x8ReplaceLane) \
203
V(X64I16x8SConvertI8x16Low) \
204
V(X64I16x8SConvertI8x16High) \
205
V(X64I16x8Neg) \
206
V(X64I16x8Shl) \
207
V(X64I16x8ShrS) \
208
V(X64I16x8SConvertI32x4) \
209
V(X64I16x8Add) \
210
V(X64I16x8AddSaturateS) \
211
V(X64I16x8AddHoriz) \
212
V(X64I16x8Sub) \
213
V(X64I16x8SubSaturateS) \
214
V(X64I16x8Mul) \
215
V(X64I16x8MinS) \
216
V(X64I16x8MaxS) \
217
V(X64I16x8Eq) \
218
V(X64I16x8Ne) \
219
V(X64I16x8GtS) \
220
V(X64I16x8GeS) \
221
V(X64I16x8UConvertI8x16Low) \
222
V(X64I16x8UConvertI8x16High) \
223
V(X64I16x8ShrU) \
224
V(X64I16x8UConvertI32x4) \
225
V(X64I16x8AddSaturateU) \
226
V(X64I16x8SubSaturateU) \
227
V(X64I16x8MinU) \
228
V(X64I16x8MaxU) \
229
V(X64I16x8GtU) \
230
V(X64I16x8GeU) \
231
V(X64I8x16Splat) \
232
V(X64I8x16ExtractLane) \
233
V(X64I8x16ReplaceLane) \
234
V(X64I8x16SConvertI16x8) \
235
V(X64I8x16Neg) \
236
V(X64I8x16Shl) \
237
V(X64I8x16ShrS) \
238
V(X64I8x16Add) \
239
V(X64I8x16AddSaturateS) \
240
V(X64I8x16Sub) \
241
V(X64I8x16SubSaturateS) \
242
V(X64I8x16Mul) \
243
V(X64I8x16MinS) \
244
V(X64I8x16MaxS) \
245
V(X64I8x16Eq) \
246
V(X64I8x16Ne) \
247
V(X64I8x16GtS) \
248
V(X64I8x16GeS) \
249
V(X64I8x16UConvertI16x8) \
250
V(X64I8x16AddSaturateU) \
251
V(X64I8x16SubSaturateU) \
252
V(X64I8x16ShrU) \
253
V(X64I8x16MinU) \
254
V(X64I8x16MaxU) \
255
V(X64I8x16GtU) \
256
V(X64I8x16GeU) \
257
V(X64S128Zero) \
258
V(X64S128Not) \
259
V(X64S128And) \
260
V(X64S128Or) \
261
V(X64S128Xor) \
262
V(X64S128Select) \
263
V(X64S1x4AnyTrue) \
264
V(X64S1x4AllTrue) \
265
V(X64S1x8AnyTrue) \
266
V(X64S1x8AllTrue) \
267
V(X64S1x16AnyTrue) \
268
V(X64S1x16AllTrue) \
269
V(X64Word64AtomicLoadUint8) \
270
V(X64Word64AtomicLoadUint16) \
271
V(X64Word64AtomicLoadUint32) \
272
V(X64Word64AtomicLoadUint64) \
273
V(X64Word64AtomicStoreWord8) \
274
V(X64Word64AtomicStoreWord16) \
275
V(X64Word64AtomicStoreWord32) \
276
V(X64Word64AtomicStoreWord64) \
277
V(X64Word64AtomicAddUint8) \
278
V(X64Word64AtomicAddUint16) \
279
V(X64Word64AtomicAddUint32) \
280
V(X64Word64AtomicAddUint64) \
281
V(X64Word64AtomicSubUint8) \
282
V(X64Word64AtomicSubUint16) \
283
V(X64Word64AtomicSubUint32) \
284
V(X64Word64AtomicSubUint64) \
285
V(X64Word64AtomicAndUint8) \
286
V(X64Word64AtomicAndUint16) \
287
V(X64Word64AtomicAndUint32) \
288
V(X64Word64AtomicAndUint64) \
289
V(X64Word64AtomicOrUint8) \
290
V(X64Word64AtomicOrUint16) \
291
V(X64Word64AtomicOrUint32) \
292
V(X64Word64AtomicOrUint64) \
293
V(X64Word64AtomicXorUint8) \
294
V(X64Word64AtomicXorUint16) \
295
V(X64Word64AtomicXorUint32) \
296
V(X64Word64AtomicXorUint64) \
297
V(X64Word64AtomicExchangeUint8) \
298
V(X64Word64AtomicExchangeUint16) \
299
V(X64Word64AtomicExchangeUint32) \
300
V(X64Word64AtomicExchangeUint64) \
301
V(X64Word64AtomicCompareExchangeUint8) \
302
V(X64Word64AtomicCompareExchangeUint16) \
303
V(X64Word64AtomicCompareExchangeUint32) \
304
V(X64Word64AtomicCompareExchangeUint64)
305
306
// Addressing modes represent the "shape" of inputs to an instruction.
307
// Many instructions support multiple addressing modes. Addressing modes
308
// are encoded into the InstructionCode of the instruction and tell the
309
// code generator after register allocation which assembler method to call.
310
//
311
// We use the following local notation for addressing modes:
312
//
313
// M = memory operand
314
// R = base register
315
// N = index register * N for N in {1, 2, 4, 8}
316
// I = immediate displacement (32-bit signed integer)
317
318
#define TARGET_ADDRESSING_MODE_LIST(V) \
319
V(MR)
/* [%r1 ] */
\
320
V(MRI)
/* [%r1 + K] */
\
321
V(MR1)
/* [%r1 + %r2*1 ] */
\
322
V(MR2)
/* [%r1 + %r2*2 ] */
\
323
V(MR4)
/* [%r1 + %r2*4 ] */
\
324
V(MR8)
/* [%r1 + %r2*8 ] */
\
325
V(MR1I)
/* [%r1 + %r2*1 + K] */
\
326
V(MR2I)
/* [%r1 + %r2*2 + K] */
\
327
V(MR4I)
/* [%r1 + %r2*3 + K] */
\
328
V(MR8I)
/* [%r1 + %r2*4 + K] */
\
329
V(M1)
/* [ %r2*1 ] */
\
330
V(M2)
/* [ %r2*2 ] */
\
331
V(M4)
/* [ %r2*4 ] */
\
332
V(M8)
/* [ %r2*8 ] */
\
333
V(M1I)
/* [ %r2*1 + K] */
\
334
V(M2I)
/* [ %r2*2 + K] */
\
335
V(M4I)
/* [ %r2*4 + K] */
\
336
V(M8I)
/* [ %r2*8 + K] */
\
337
V(Root)
/* [%root + K] */
338
339
}
// namespace compiler
340
}
// namespace internal
341
}
// namespace v8
342
343
#endif // V8_COMPILER_BACKEND_X64_INSTRUCTION_CODES_X64_H_
v8
Definition:
libplatform.h:13
v8
src
compiler
backend
x64
instruction-codes-x64.h
Generated on Tue Dec 25 2018 14:38:32 by
1.8.14