V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instruction-codes-arm64.h
1
// Copyright 2014 the V8 project authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
#ifndef V8_COMPILER_BACKEND_ARM64_INSTRUCTION_CODES_ARM64_H_
6
#define V8_COMPILER_BACKEND_ARM64_INSTRUCTION_CODES_ARM64_H_
7
8
namespace
v8
{
9
namespace
internal {
10
namespace
compiler {
11
12
// ARM64-specific opcodes that specify which assembly sequence to emit.
13
// Most opcodes specify a single instruction.
14
#define TARGET_ARCH_OPCODE_LIST(V) \
15
V(Arm64Add) \
16
V(Arm64Add32) \
17
V(Arm64And) \
18
V(Arm64And32) \
19
V(Arm64Bic) \
20
V(Arm64Bic32) \
21
V(Arm64Clz) \
22
V(Arm64Clz32) \
23
V(Arm64Cmp) \
24
V(Arm64Cmp32) \
25
V(Arm64Cmn) \
26
V(Arm64Cmn32) \
27
V(Arm64Tst) \
28
V(Arm64Tst32) \
29
V(Arm64Or) \
30
V(Arm64Or32) \
31
V(Arm64Orn) \
32
V(Arm64Orn32) \
33
V(Arm64Eor) \
34
V(Arm64Eor32) \
35
V(Arm64Eon) \
36
V(Arm64Eon32) \
37
V(Arm64Sub) \
38
V(Arm64Sub32) \
39
V(Arm64Mul) \
40
V(Arm64Mul32) \
41
V(Arm64Smull) \
42
V(Arm64Umull) \
43
V(Arm64Madd) \
44
V(Arm64Madd32) \
45
V(Arm64Msub) \
46
V(Arm64Msub32) \
47
V(Arm64Mneg) \
48
V(Arm64Mneg32) \
49
V(Arm64Idiv) \
50
V(Arm64Idiv32) \
51
V(Arm64Udiv) \
52
V(Arm64Udiv32) \
53
V(Arm64Imod) \
54
V(Arm64Imod32) \
55
V(Arm64Umod) \
56
V(Arm64Umod32) \
57
V(Arm64Not) \
58
V(Arm64Not32) \
59
V(Arm64Lsl) \
60
V(Arm64Lsl32) \
61
V(Arm64Lsr) \
62
V(Arm64Lsr32) \
63
V(Arm64Asr) \
64
V(Arm64Asr32) \
65
V(Arm64Ror) \
66
V(Arm64Ror32) \
67
V(Arm64Mov32) \
68
V(Arm64Sxtb32) \
69
V(Arm64Sxth32) \
70
V(Arm64Sxtb) \
71
V(Arm64Sxth) \
72
V(Arm64Sxtw) \
73
V(Arm64Sbfx32) \
74
V(Arm64Ubfx) \
75
V(Arm64Ubfx32) \
76
V(Arm64Ubfiz32) \
77
V(Arm64Bfi) \
78
V(Arm64Rbit) \
79
V(Arm64Rbit32) \
80
V(Arm64Rev) \
81
V(Arm64Rev32) \
82
V(Arm64TestAndBranch32) \
83
V(Arm64TestAndBranch) \
84
V(Arm64CompareAndBranch32) \
85
V(Arm64CompareAndBranch) \
86
V(Arm64Claim) \
87
V(Arm64Poke) \
88
V(Arm64PokePair) \
89
V(Arm64Peek) \
90
V(Arm64Float32Cmp) \
91
V(Arm64Float32Add) \
92
V(Arm64Float32Sub) \
93
V(Arm64Float32Mul) \
94
V(Arm64Float32Div) \
95
V(Arm64Float32Abs) \
96
V(Arm64Float32Neg) \
97
V(Arm64Float32Sqrt) \
98
V(Arm64Float32RoundDown) \
99
V(Arm64Float32Max) \
100
V(Arm64Float32Min) \
101
V(Arm64Float64Cmp) \
102
V(Arm64Float64Add) \
103
V(Arm64Float64Sub) \
104
V(Arm64Float64Mul) \
105
V(Arm64Float64Div) \
106
V(Arm64Float64Mod) \
107
V(Arm64Float64Max) \
108
V(Arm64Float64Min) \
109
V(Arm64Float64Abs) \
110
V(Arm64Float64Neg) \
111
V(Arm64Float64Sqrt) \
112
V(Arm64Float64RoundDown) \
113
V(Arm64Float32RoundUp) \
114
V(Arm64Float64RoundUp) \
115
V(Arm64Float64RoundTiesAway) \
116
V(Arm64Float32RoundTruncate) \
117
V(Arm64Float64RoundTruncate) \
118
V(Arm64Float32RoundTiesEven) \
119
V(Arm64Float64RoundTiesEven) \
120
V(Arm64Float64SilenceNaN) \
121
V(Arm64Float32ToFloat64) \
122
V(Arm64Float64ToFloat32) \
123
V(Arm64Float32ToInt32) \
124
V(Arm64Float64ToInt32) \
125
V(Arm64Float32ToUint32) \
126
V(Arm64Float64ToUint32) \
127
V(Arm64Float32ToInt64) \
128
V(Arm64Float64ToInt64) \
129
V(Arm64Float32ToUint64) \
130
V(Arm64Float64ToUint64) \
131
V(Arm64Int32ToFloat32) \
132
V(Arm64Int32ToFloat64) \
133
V(Arm64Int64ToFloat32) \
134
V(Arm64Int64ToFloat64) \
135
V(Arm64Uint32ToFloat32) \
136
V(Arm64Uint32ToFloat64) \
137
V(Arm64Uint64ToFloat32) \
138
V(Arm64Uint64ToFloat64) \
139
V(Arm64Float64ExtractLowWord32) \
140
V(Arm64Float64ExtractHighWord32) \
141
V(Arm64Float64InsertLowWord32) \
142
V(Arm64Float64InsertHighWord32) \
143
V(Arm64Float64MoveU64) \
144
V(Arm64U64MoveFloat64) \
145
V(Arm64LdrS) \
146
V(Arm64StrS) \
147
V(Arm64LdrD) \
148
V(Arm64StrD) \
149
V(Arm64LdrQ) \
150
V(Arm64StrQ) \
151
V(Arm64Ldrb) \
152
V(Arm64Ldrsb) \
153
V(Arm64Strb) \
154
V(Arm64Ldrh) \
155
V(Arm64Ldrsh) \
156
V(Arm64Strh) \
157
V(Arm64Ldrsw) \
158
V(Arm64LdrW) \
159
V(Arm64StrW) \
160
V(Arm64Ldr) \
161
V(Arm64Str) \
162
V(Arm64DsbIsb) \
163
V(Arm64F32x4Splat) \
164
V(Arm64F32x4ExtractLane) \
165
V(Arm64F32x4ReplaceLane) \
166
V(Arm64F32x4SConvertI32x4) \
167
V(Arm64F32x4UConvertI32x4) \
168
V(Arm64F32x4Abs) \
169
V(Arm64F32x4Neg) \
170
V(Arm64F32x4RecipApprox) \
171
V(Arm64F32x4RecipSqrtApprox) \
172
V(Arm64F32x4Add) \
173
V(Arm64F32x4AddHoriz) \
174
V(Arm64F32x4Sub) \
175
V(Arm64F32x4Mul) \
176
V(Arm64F32x4Min) \
177
V(Arm64F32x4Max) \
178
V(Arm64F32x4Eq) \
179
V(Arm64F32x4Ne) \
180
V(Arm64F32x4Lt) \
181
V(Arm64F32x4Le) \
182
V(Arm64I32x4Splat) \
183
V(Arm64I32x4ExtractLane) \
184
V(Arm64I32x4ReplaceLane) \
185
V(Arm64I32x4SConvertF32x4) \
186
V(Arm64I32x4SConvertI16x8Low) \
187
V(Arm64I32x4SConvertI16x8High) \
188
V(Arm64I32x4Neg) \
189
V(Arm64I32x4Shl) \
190
V(Arm64I32x4ShrS) \
191
V(Arm64I32x4Add) \
192
V(Arm64I32x4AddHoriz) \
193
V(Arm64I32x4Sub) \
194
V(Arm64I32x4Mul) \
195
V(Arm64I32x4MinS) \
196
V(Arm64I32x4MaxS) \
197
V(Arm64I32x4Eq) \
198
V(Arm64I32x4Ne) \
199
V(Arm64I32x4GtS) \
200
V(Arm64I32x4GeS) \
201
V(Arm64I32x4UConvertF32x4) \
202
V(Arm64I32x4UConvertI16x8Low) \
203
V(Arm64I32x4UConvertI16x8High) \
204
V(Arm64I32x4ShrU) \
205
V(Arm64I32x4MinU) \
206
V(Arm64I32x4MaxU) \
207
V(Arm64I32x4GtU) \
208
V(Arm64I32x4GeU) \
209
V(Arm64I16x8Splat) \
210
V(Arm64I16x8ExtractLane) \
211
V(Arm64I16x8ReplaceLane) \
212
V(Arm64I16x8SConvertI8x16Low) \
213
V(Arm64I16x8SConvertI8x16High) \
214
V(Arm64I16x8Neg) \
215
V(Arm64I16x8Shl) \
216
V(Arm64I16x8ShrS) \
217
V(Arm64I16x8SConvertI32x4) \
218
V(Arm64I16x8Add) \
219
V(Arm64I16x8AddSaturateS) \
220
V(Arm64I16x8AddHoriz) \
221
V(Arm64I16x8Sub) \
222
V(Arm64I16x8SubSaturateS) \
223
V(Arm64I16x8Mul) \
224
V(Arm64I16x8MinS) \
225
V(Arm64I16x8MaxS) \
226
V(Arm64I16x8Eq) \
227
V(Arm64I16x8Ne) \
228
V(Arm64I16x8GtS) \
229
V(Arm64I16x8GeS) \
230
V(Arm64I16x8UConvertI8x16Low) \
231
V(Arm64I16x8UConvertI8x16High) \
232
V(Arm64I16x8ShrU) \
233
V(Arm64I16x8UConvertI32x4) \
234
V(Arm64I16x8AddSaturateU) \
235
V(Arm64I16x8SubSaturateU) \
236
V(Arm64I16x8MinU) \
237
V(Arm64I16x8MaxU) \
238
V(Arm64I16x8GtU) \
239
V(Arm64I16x8GeU) \
240
V(Arm64I8x16Splat) \
241
V(Arm64I8x16ExtractLane) \
242
V(Arm64I8x16ReplaceLane) \
243
V(Arm64I8x16Neg) \
244
V(Arm64I8x16Shl) \
245
V(Arm64I8x16ShrS) \
246
V(Arm64I8x16SConvertI16x8) \
247
V(Arm64I8x16Add) \
248
V(Arm64I8x16AddSaturateS) \
249
V(Arm64I8x16Sub) \
250
V(Arm64I8x16SubSaturateS) \
251
V(Arm64I8x16Mul) \
252
V(Arm64I8x16MinS) \
253
V(Arm64I8x16MaxS) \
254
V(Arm64I8x16Eq) \
255
V(Arm64I8x16Ne) \
256
V(Arm64I8x16GtS) \
257
V(Arm64I8x16GeS) \
258
V(Arm64I8x16ShrU) \
259
V(Arm64I8x16UConvertI16x8) \
260
V(Arm64I8x16AddSaturateU) \
261
V(Arm64I8x16SubSaturateU) \
262
V(Arm64I8x16MinU) \
263
V(Arm64I8x16MaxU) \
264
V(Arm64I8x16GtU) \
265
V(Arm64I8x16GeU) \
266
V(Arm64S128Zero) \
267
V(Arm64S128Dup) \
268
V(Arm64S128And) \
269
V(Arm64S128Or) \
270
V(Arm64S128Xor) \
271
V(Arm64S128Not) \
272
V(Arm64S128Select) \
273
V(Arm64S32x4ZipLeft) \
274
V(Arm64S32x4ZipRight) \
275
V(Arm64S32x4UnzipLeft) \
276
V(Arm64S32x4UnzipRight) \
277
V(Arm64S32x4TransposeLeft) \
278
V(Arm64S32x4TransposeRight) \
279
V(Arm64S32x4Shuffle) \
280
V(Arm64S16x8ZipLeft) \
281
V(Arm64S16x8ZipRight) \
282
V(Arm64S16x8UnzipLeft) \
283
V(Arm64S16x8UnzipRight) \
284
V(Arm64S16x8TransposeLeft) \
285
V(Arm64S16x8TransposeRight) \
286
V(Arm64S8x16ZipLeft) \
287
V(Arm64S8x16ZipRight) \
288
V(Arm64S8x16UnzipLeft) \
289
V(Arm64S8x16UnzipRight) \
290
V(Arm64S8x16TransposeLeft) \
291
V(Arm64S8x16TransposeRight) \
292
V(Arm64S8x16Concat) \
293
V(Arm64S8x16Shuffle) \
294
V(Arm64S32x2Reverse) \
295
V(Arm64S16x4Reverse) \
296
V(Arm64S16x2Reverse) \
297
V(Arm64S8x8Reverse) \
298
V(Arm64S8x4Reverse) \
299
V(Arm64S8x2Reverse) \
300
V(Arm64S1x4AnyTrue) \
301
V(Arm64S1x4AllTrue) \
302
V(Arm64S1x8AnyTrue) \
303
V(Arm64S1x8AllTrue) \
304
V(Arm64S1x16AnyTrue) \
305
V(Arm64S1x16AllTrue) \
306
V(Arm64Word64AtomicLoadUint8) \
307
V(Arm64Word64AtomicLoadUint16) \
308
V(Arm64Word64AtomicLoadUint32) \
309
V(Arm64Word64AtomicLoadUint64) \
310
V(Arm64Word64AtomicStoreWord8) \
311
V(Arm64Word64AtomicStoreWord16) \
312
V(Arm64Word64AtomicStoreWord32) \
313
V(Arm64Word64AtomicStoreWord64) \
314
V(Arm64Word64AtomicAddUint8) \
315
V(Arm64Word64AtomicAddUint16) \
316
V(Arm64Word64AtomicAddUint32) \
317
V(Arm64Word64AtomicAddUint64) \
318
V(Arm64Word64AtomicSubUint8) \
319
V(Arm64Word64AtomicSubUint16) \
320
V(Arm64Word64AtomicSubUint32) \
321
V(Arm64Word64AtomicSubUint64) \
322
V(Arm64Word64AtomicAndUint8) \
323
V(Arm64Word64AtomicAndUint16) \
324
V(Arm64Word64AtomicAndUint32) \
325
V(Arm64Word64AtomicAndUint64) \
326
V(Arm64Word64AtomicOrUint8) \
327
V(Arm64Word64AtomicOrUint16) \
328
V(Arm64Word64AtomicOrUint32) \
329
V(Arm64Word64AtomicOrUint64) \
330
V(Arm64Word64AtomicXorUint8) \
331
V(Arm64Word64AtomicXorUint16) \
332
V(Arm64Word64AtomicXorUint32) \
333
V(Arm64Word64AtomicXorUint64) \
334
V(Arm64Word64AtomicExchangeUint8) \
335
V(Arm64Word64AtomicExchangeUint16) \
336
V(Arm64Word64AtomicExchangeUint32) \
337
V(Arm64Word64AtomicExchangeUint64) \
338
V(Arm64Word64AtomicCompareExchangeUint8) \
339
V(Arm64Word64AtomicCompareExchangeUint16) \
340
V(Arm64Word64AtomicCompareExchangeUint32) \
341
V(Arm64Word64AtomicCompareExchangeUint64)
342
343
// Addressing modes represent the "shape" of inputs to an instruction.
344
// Many instructions support multiple addressing modes. Addressing modes
345
// are encoded into the InstructionCode of the instruction and tell the
346
// code generator after register allocation which assembler method to call.
347
//
348
// We use the following local notation for addressing modes:
349
//
350
// R = register
351
// O = register or stack slot
352
// D = double register
353
// I = immediate (handle, external, int32)
354
// MRI = [register + immediate]
355
// MRR = [register + register]
356
#define TARGET_ADDRESSING_MODE_LIST(V) \
357
V(MRI)
/* [%r0 + K] */
\
358
V(MRR)
/* [%r0 + %r1] */
\
359
V(Operand2_R_LSL_I)
/* %r0 LSL K */
\
360
V(Operand2_R_LSR_I)
/* %r0 LSR K */
\
361
V(Operand2_R_ASR_I)
/* %r0 ASR K */
\
362
V(Operand2_R_ROR_I)
/* %r0 ROR K */
\
363
V(Operand2_R_UXTB)
/* %r0 UXTB (unsigned extend byte) */
\
364
V(Operand2_R_UXTH)
/* %r0 UXTH (unsigned extend halfword) */
\
365
V(Operand2_R_SXTB)
/* %r0 SXTB (signed extend byte) */
\
366
V(Operand2_R_SXTH)
/* %r0 SXTH (signed extend halfword) */
\
367
V(Operand2_R_SXTW)
/* %r0 SXTW (signed extend word) */
\
368
V(Root)
/* [%rr + K] */
369
370
}
// namespace compiler
371
}
// namespace internal
372
}
// namespace v8
373
374
#endif // V8_COMPILER_BACKEND_ARM64_INSTRUCTION_CODES_ARM64_H_
v8
Definition:
libplatform.h:13
v8
src
compiler
backend
arm64
instruction-codes-arm64.h
Generated on Tue Dec 25 2018 14:38:27 by
1.8.14