V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
constants-ppc.h
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_PPC_CONSTANTS_PPC_H_
6 #define V8_PPC_CONSTANTS_PPC_H_
7 
8 #include <stdint.h>
9 
10 #include "src/base/logging.h"
11 #include "src/base/macros.h"
12 #include "src/globals.h"
13 
14 // UNIMPLEMENTED_ macro for PPC.
15 #ifdef DEBUG
16 #define UNIMPLEMENTED_PPC() \
17  v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
18  __FILE__, __LINE__, __func__)
19 #else
20 #define UNIMPLEMENTED_PPC()
21 #endif
22 
23 namespace v8 {
24 namespace internal {
25 
26 // TODO(sigurds): Change this value once we use relative jumps.
27 constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
28 
29 // Number of registers
30 const int kNumRegisters = 32;
31 
32 // FP support.
33 const int kNumDoubleRegisters = 32;
34 
35 const int kNoRegister = -1;
36 
37 // Used in embedded constant pool builder - max reach in bits for
38 // various load instructions (one less due to unsigned)
39 const int kLoadPtrMaxReachBits = 15;
40 const int kLoadDoubleMaxReachBits = 15;
41 
42 // Actual value of root register is offset from the root array's start
43 // to take advantage of negative displacement values.
44 // TODO(sigurds): Choose best value.
45 constexpr int kRootRegisterBias = 128;
46 
47 // sign-extend the least significant 16-bits of value <imm>
48 #define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
49 
50 // sign-extend the least significant 22-bits of value <imm>
51 #define SIGN_EXT_IMM22(imm) ((static_cast<int>(imm) << 10) >> 10)
52 
53 // sign-extend the least significant 26-bits of value <imm>
54 #define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
55 
56 // -----------------------------------------------------------------------------
57 // Conditions.
58 
59 // Defines constants and accessor classes to assemble, disassemble and
60 // simulate PPC instructions.
61 //
62 // Section references in the code refer to the "PowerPC Microprocessor
63 // Family: The Programmer.s Reference Guide" from 10/95
64 // https://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF778525699600741775/$file/prg.pdf
65 //
66 
67 // Constants for specific fields are defined in their respective named enums.
68 // General constants are in an anonymous enum in class Instr.
69 enum Condition {
70  kNoCondition = -1,
71  eq = 0, // Equal.
72  ne = 1, // Not equal.
73  ge = 2, // Greater or equal.
74  lt = 3, // Less than.
75  gt = 4, // Greater than.
76  le = 5, // Less then or equal
77  unordered = 6, // Floating-point unordered
78  ordered = 7,
79  overflow = 8, // Summary overflow
80  nooverflow = 9,
81  al = 10 // Always.
82 };
83 
84 
85 inline Condition NegateCondition(Condition cond) {
86  DCHECK(cond != al);
87  return static_cast<Condition>(cond ^ ne);
88 }
89 
90 
91 // -----------------------------------------------------------------------------
92 // Instructions encoding.
93 
94 // Instr is merely used by the Assembler to distinguish 32bit integers
95 // representing instructions from usual 32 bit values.
96 // Instruction objects are pointers to 32bit values, and provide methods to
97 // access the various ISA fields.
98 typedef uint32_t Instr;
99 
100 #define PPC_XX3_OPCODE_LIST(V) \
101  /* VSX Scalar Add Double-Precision */ \
102  V(xsadddp, XSADDDP, 0xF0000100) \
103  /* VSX Scalar Add Single-Precision */ \
104  V(xsaddsp, XSADDSP, 0xF0000000) \
105  /* VSX Scalar Compare Ordered Double-Precision */ \
106  V(xscmpodp, XSCMPODP, 0xF0000158) \
107  /* VSX Scalar Compare Unordered Double-Precision */ \
108  V(xscmpudp, XSCMPUDP, 0xF0000118) \
109  /* VSX Scalar Copy Sign Double-Precision */ \
110  V(xscpsgndp, XSCPSGNDP, 0xF0000580) \
111  /* VSX Scalar Divide Double-Precision */ \
112  V(xsdivdp, XSDIVDP, 0xF00001C0) \
113  /* VSX Scalar Divide Single-Precision */ \
114  V(xsdivsp, XSDIVSP, 0xF00000C0) \
115  /* VSX Scalar Multiply-Add Type-A Double-Precision */ \
116  V(xsmaddadp, XSMADDADP, 0xF0000108) \
117  /* VSX Scalar Multiply-Add Type-A Single-Precision */ \
118  V(xsmaddasp, XSMADDASP, 0xF0000008) \
119  /* VSX Scalar Multiply-Add Type-M Double-Precision */ \
120  V(xsmaddmdp, XSMADDMDP, 0xF0000148) \
121  /* VSX Scalar Multiply-Add Type-M Single-Precision */ \
122  V(xsmaddmsp, XSMADDMSP, 0xF0000048) \
123  /* VSX Scalar Maximum Double-Precision */ \
124  V(xsmaxdp, XSMAXDP, 0xF0000500) \
125  /* VSX Scalar Minimum Double-Precision */ \
126  V(xsmindp, XSMINDP, 0xF0000540) \
127  /* VSX Scalar Multiply-Subtract Type-A Double-Precision */ \
128  V(xsmsubadp, XSMSUBADP, 0xF0000188) \
129  /* VSX Scalar Multiply-Subtract Type-A Single-Precision */ \
130  V(xsmsubasp, XSMSUBASP, 0xF0000088) \
131  /* VSX Scalar Multiply-Subtract Type-M Double-Precision */ \
132  V(xsmsubmdp, XSMSUBMDP, 0xF00001C8) \
133  /* VSX Scalar Multiply-Subtract Type-M Single-Precision */ \
134  V(xsmsubmsp, XSMSUBMSP, 0xF00000C8) \
135  /* VSX Scalar Multiply Double-Precision */ \
136  V(xsmuldp, XSMULDP, 0xF0000180) \
137  /* VSX Scalar Multiply Single-Precision */ \
138  V(xsmulsp, XSMULSP, 0xF0000080) \
139  /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */ \
140  V(xsnmaddadp, XSNMADDADP, 0xF0000508) \
141  /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */ \
142  V(xsnmaddasp, XSNMADDASP, 0xF0000408) \
143  /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */ \
144  V(xsnmaddmdp, XSNMADDMDP, 0xF0000548) \
145  /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */ \
146  V(xsnmaddmsp, XSNMADDMSP, 0xF0000448) \
147  /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */ \
148  V(xsnmsubadp, XSNMSUBADP, 0xF0000588) \
149  /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */ \
150  V(xsnmsubasp, XSNMSUBASP, 0xF0000488) \
151  /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */ \
152  V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8) \
153  /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */ \
154  V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8) \
155  /* VSX Scalar Reciprocal Estimate Double-Precision */ \
156  V(xsredp, XSREDP, 0xF0000168) \
157  /* VSX Scalar Reciprocal Estimate Single-Precision */ \
158  V(xsresp, XSRESP, 0xF0000068) \
159  /* VSX Scalar Subtract Double-Precision */ \
160  V(xssubdp, XSSUBDP, 0xF0000140) \
161  /* VSX Scalar Subtract Single-Precision */ \
162  V(xssubsp, XSSUBSP, 0xF0000040) \
163  /* VSX Scalar Test for software Divide Double-Precision */ \
164  V(xstdivdp, XSTDIVDP, 0xF00001E8) \
165  /* VSX Vector Add Double-Precision */ \
166  V(xvadddp, XVADDDP, 0xF0000300) \
167  /* VSX Vector Add Single-Precision */ \
168  V(xvaddsp, XVADDSP, 0xF0000200) \
169  /* VSX Vector Compare Equal To Double-Precision */ \
170  V(xvcmpeqdp, XVCMPEQDP, 0xF0000318) \
171  /* VSX Vector Compare Equal To Double-Precision & record CR6 */ \
172  V(xvcmpeqdpx, XVCMPEQDPx, 0xF0000718) \
173  /* VSX Vector Compare Equal To Single-Precision */ \
174  V(xvcmpeqsp, XVCMPEQSP, 0xF0000218) \
175  /* VSX Vector Compare Equal To Single-Precision & record CR6 */ \
176  V(xvcmpeqspx, XVCMPEQSPx, 0xF0000618) \
177  /* VSX Vector Compare Greater Than or Equal To Double-Precision */ \
178  V(xvcmpgedp, XVCMPGEDP, 0xF0000398) \
179  /* VSX Vector Compare Greater Than or Equal To Double-Precision & record */ \
180  /* CR6 */ \
181  V(xvcmpgedpx, XVCMPGEDPx, 0xF0000798) \
182  /* VSX Vector Compare Greater Than or Equal To Single-Precision */ \
183  V(xvcmpgesp, XVCMPGESP, 0xF0000298) \
184  /* VSX Vector Compare Greater Than or Equal To Single-Precision & record */ \
185  /* CR6 */ \
186  V(xvcmpgespx, XVCMPGESPx, 0xF0000698) \
187  /* VSX Vector Compare Greater Than Double-Precision */ \
188  V(xvcmpgtdp, XVCMPGTDP, 0xF0000358) \
189  /* VSX Vector Compare Greater Than Double-Precision & record CR6 */ \
190  V(xvcmpgtdpx, XVCMPGTDPx, 0xF0000758) \
191  /* VSX Vector Compare Greater Than Single-Precision */ \
192  V(xvcmpgtsp, XVCMPGTSP, 0xF0000258) \
193  /* VSX Vector Compare Greater Than Single-Precision & record CR6 */ \
194  V(xvcmpgtspx, XVCMPGTSPx, 0xF0000658) \
195  /* VSX Vector Copy Sign Double-Precision */ \
196  V(xvcpsgndp, XVCPSGNDP, 0xF0000780) \
197  /* VSX Vector Copy Sign Single-Precision */ \
198  V(xvcpsgnsp, XVCPSGNSP, 0xF0000680) \
199  /* VSX Vector Divide Double-Precision */ \
200  V(xvdivdp, XVDIVDP, 0xF00003C0) \
201  /* VSX Vector Divide Single-Precision */ \
202  V(xvdivsp, XVDIVSP, 0xF00002C0) \
203  /* VSX Vector Multiply-Add Type-A Double-Precision */ \
204  V(xvmaddadp, XVMADDADP, 0xF0000308) \
205  /* VSX Vector Multiply-Add Type-A Single-Precision */ \
206  V(xvmaddasp, XVMADDASP, 0xF0000208) \
207  /* VSX Vector Multiply-Add Type-M Double-Precision */ \
208  V(xvmaddmdp, XVMADDMDP, 0xF0000348) \
209  /* VSX Vector Multiply-Add Type-M Single-Precision */ \
210  V(xvmaddmsp, XVMADDMSP, 0xF0000248) \
211  /* VSX Vector Maximum Double-Precision */ \
212  V(xvmaxdp, XVMAXDP, 0xF0000700) \
213  /* VSX Vector Maximum Single-Precision */ \
214  V(xvmaxsp, XVMAXSP, 0xF0000600) \
215  /* VSX Vector Minimum Double-Precision */ \
216  V(xvmindp, XVMINDP, 0xF0000740) \
217  /* VSX Vector Minimum Single-Precision */ \
218  V(xvminsp, XVMINSP, 0xF0000640) \
219  /* VSX Vector Multiply-Subtract Type-A Double-Precision */ \
220  V(xvmsubadp, XVMSUBADP, 0xF0000388) \
221  /* VSX Vector Multiply-Subtract Type-A Single-Precision */ \
222  V(xvmsubasp, XVMSUBASP, 0xF0000288) \
223  /* VSX Vector Multiply-Subtract Type-M Double-Precision */ \
224  V(xvmsubmdp, XVMSUBMDP, 0xF00003C8) \
225  /* VSX Vector Multiply-Subtract Type-M Single-Precision */ \
226  V(xvmsubmsp, XVMSUBMSP, 0xF00002C8) \
227  /* VSX Vector Multiply Double-Precision */ \
228  V(xvmuldp, XVMULDP, 0xF0000380) \
229  /* VSX Vector Multiply Single-Precision */ \
230  V(xvmulsp, XVMULSP, 0xF0000280) \
231  /* VSX Vector Negative Multiply-Add Type-A Double-Precision */ \
232  V(xvnmaddadp, XVNMADDADP, 0xF0000708) \
233  /* VSX Vector Negative Multiply-Add Type-A Single-Precision */ \
234  V(xvnmaddasp, XVNMADDASP, 0xF0000608) \
235  /* VSX Vector Negative Multiply-Add Type-M Double-Precision */ \
236  V(xvnmaddmdp, XVNMADDMDP, 0xF0000748) \
237  /* VSX Vector Negative Multiply-Add Type-M Single-Precision */ \
238  V(xvnmaddmsp, XVNMADDMSP, 0xF0000648) \
239  /* VSX Vector Negative Multiply-Subtract Type-A Double-Precision */ \
240  V(xvnmsubadp, XVNMSUBADP, 0xF0000788) \
241  /* VSX Vector Negative Multiply-Subtract Type-A Single-Precision */ \
242  V(xvnmsubasp, XVNMSUBASP, 0xF0000688) \
243  /* VSX Vector Negative Multiply-Subtract Type-M Double-Precision */ \
244  V(xvnmsubmdp, XVNMSUBMDP, 0xF00007C8) \
245  /* VSX Vector Negative Multiply-Subtract Type-M Single-Precision */ \
246  V(xvnmsubmsp, XVNMSUBMSP, 0xF00006C8) \
247  /* VSX Vector Reciprocal Estimate Double-Precision */ \
248  V(xvredp, XVREDP, 0xF0000368) \
249  /* VSX Vector Reciprocal Estimate Single-Precision */ \
250  V(xvresp, XVRESP, 0xF0000268) \
251  /* VSX Vector Subtract Double-Precision */ \
252  V(xvsubdp, XVSUBDP, 0xF0000340) \
253  /* VSX Vector Subtract Single-Precision */ \
254  V(xvsubsp, XVSUBSP, 0xF0000240) \
255  /* VSX Vector Test for software Divide Double-Precision */ \
256  V(xvtdivdp, XVTDIVDP, 0xF00003E8) \
257  /* VSX Vector Test for software Divide Single-Precision */ \
258  V(xvtdivsp, XVTDIVSP, 0xF00002E8) \
259  /* VSX Logical AND */ \
260  V(xxland, XXLAND, 0xF0000410) \
261  /* VSX Logical AND with Complement */ \
262  V(xxlandc, XXLANDC, 0xF0000450) \
263  /* VSX Logical Equivalence */ \
264  V(xxleqv, XXLEQV, 0xF00005D0) \
265  /* VSX Logical NAND */ \
266  V(xxlnand, XXLNAND, 0xF0000590) \
267  /* VSX Logical NOR */ \
268  V(xxlnor, XXLNOR, 0xF0000510) \
269  /* VSX Logical OR */ \
270  V(xxlor, XXLOR, 0xF0000490) \
271  /* VSX Logical OR with Complement */ \
272  V(xxlorc, XXLORC, 0xF0000550) \
273  /* VSX Logical XOR */ \
274  V(xxlxor, XXLXOR, 0xF00004D0) \
275  /* VSX Merge High Word */ \
276  V(xxmrghw, XXMRGHW, 0xF0000090) \
277  /* VSX Merge Low Word */ \
278  V(xxmrglw, XXMRGLW, 0xF0000190) \
279  /* VSX Permute Doubleword Immediate */ \
280  V(xxpermdi, XXPERMDI, 0xF0000050) \
281  /* VSX Shift Left Double by Word Immediate */ \
282  V(xxsldwi, XXSLDWI, 0xF0000010) \
283  /* VSX Splat Word */ \
284  V(xxspltw, XXSPLTW, 0xF0000290)
285 
286 #define PPC_Z23_OPCODE_LIST(V) \
287  /* Decimal Quantize */ \
288  V(dqua, DQUA, 0xEC000006) \
289  /* Decimal Quantize Immediate */ \
290  V(dquai, DQUAI, 0xEC000086) \
291  /* Decimal Quantize Immediate Quad */ \
292  V(dquaiq, DQUAIQ, 0xFC000086) \
293  /* Decimal Quantize Quad */ \
294  V(dquaq, DQUAQ, 0xFC000006) \
295  /* Decimal Floating Round To FP Integer Without Inexact */ \
296  V(drintn, DRINTN, 0xEC0001C6) \
297  /* Decimal Floating Round To FP Integer Without Inexact Quad */ \
298  V(drintnq, DRINTNQ, 0xFC0001C6) \
299  /* Decimal Floating Round To FP Integer With Inexact */ \
300  V(drintx, DRINTX, 0xEC0000C6) \
301  /* Decimal Floating Round To FP Integer With Inexact Quad */ \
302  V(drintxq, DRINTXQ, 0xFC0000C6) \
303  /* Decimal Floating Reround */ \
304  V(drrnd, DRRND, 0xEC000046) \
305  /* Decimal Floating Reround Quad */ \
306  V(drrndq, DRRNDQ, 0xFC000046)
307 
308 #define PPC_Z22_OPCODE_LIST(V) \
309  /* Decimal Floating Shift Coefficient Left Immediate */ \
310  V(dscli, DSCLI, 0xEC000084) \
311  /* Decimal Floating Shift Coefficient Left Immediate Quad */ \
312  V(dscliq, DSCLIQ, 0xFC000084) \
313  /* Decimal Floating Shift Coefficient Right Immediate */ \
314  V(dscri, DSCRI, 0xEC0000C4) \
315  /* Decimal Floating Shift Coefficient Right Immediate Quad */ \
316  V(dscriq, DSCRIQ, 0xFC0000C4) \
317  /* Decimal Floating Test Data Class */ \
318  V(dtstdc, DTSTDC, 0xEC000184) \
319  /* Decimal Floating Test Data Class Quad */ \
320  V(dtstdcq, DTSTDCQ, 0xFC000184) \
321  /* Decimal Floating Test Data Group */ \
322  V(dtstdg, DTSTDG, 0xEC0001C4) \
323  /* Decimal Floating Test Data Group Quad */ \
324  V(dtstdgq, DTSTDGQ, 0xFC0001C4)
325 
326 #define PPC_XX2_OPCODE_LIST(V) \
327  /* Move To VSR Doubleword */ \
328  V(mtvsrd, MTVSRD, 0x7C000166) \
329  /* Move To VSR Word Algebraic */ \
330  V(mtvsrwa, MTVSRWA, 0x7C0001A6) \
331  /* Move To VSR Word and Zero */ \
332  V(mtvsrwz, MTVSRWZ, 0x7C0001E6) \
333  /* VSX Scalar Absolute Value Double-Precision */ \
334  V(xsabsdp, XSABSDP, 0xF0000564) \
335  /* VSX Scalar Convert Double-Precision to Single-Precision */ \
336  V(xscvdpsp, XSCVDPSP, 0xF0000424) \
337  /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
338  /* signalling */ \
339  V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
340  /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \
341  /* Saturate */ \
342  V(xscvdpsxds, XSCVDPSXDS, 0xF0000560) \
343  /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Word */ \
344  /* Saturate */ \
345  V(xscvdpsxws, XSCVDPSXWS, 0xF0000160) \
346  /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point */ \
347  /* Doubleword Saturate */ \
348  V(xscvdpuxds, XSCVDPUXDS, 0xF0000520) \
349  /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point Word */ \
350  /* Saturate */ \
351  V(xscvdpuxws, XSCVDPUXWS, 0xF0000120) \
352  /* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */ \
353  V(xscvspdp, XSCVSPDP, 0xF0000524) \
354  /* Scalar Convert Single-Precision to Double-Precision format Non- */ \
355  /* signalling */ \
356  V(xscvspdpn, XSCVSPDPN, 0xF000052C) \
357  /* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \
358  V(xscvsxddp, XSCVSXDDP, 0xF00005E0) \
359  /* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \
360  V(xscvsxdsp, XSCVSXDSP, 0xF00004E0) \
361  /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Double- */ \
362  /* Precision */ \
363  V(xscvuxddp, XSCVUXDDP, 0xF00005A0) \
364  /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Single- */ \
365  /* Precision */ \
366  V(xscvuxdsp, XSCVUXDSP, 0xF00004A0) \
367  /* VSX Scalar Negative Absolute Value Double-Precision */ \
368  V(xsnabsdp, XSNABSDP, 0xF00005A4) \
369  /* VSX Scalar Negate Double-Precision */ \
370  V(xsnegdp, XSNEGDP, 0xF00005E4) \
371  /* VSX Scalar Round to Double-Precision Integer */ \
372  V(xsrdpi, XSRDPI, 0xF0000124) \
373  /* VSX Scalar Round to Double-Precision Integer using Current rounding */ \
374  /* mode */ \
375  V(xsrdpic, XSRDPIC, 0xF00001AC) \
376  /* VSX Scalar Round to Double-Precision Integer toward -Infinity */ \
377  V(xsrdpim, XSRDPIM, 0xF00001E4) \
378  /* VSX Scalar Round to Double-Precision Integer toward +Infinity */ \
379  V(xsrdpip, XSRDPIP, 0xF00001A4) \
380  /* VSX Scalar Round to Double-Precision Integer toward Zero */ \
381  V(xsrdpiz, XSRDPIZ, 0xF0000164) \
382  /* VSX Scalar Round to Single-Precision */ \
383  V(xsrsp, XSRSP, 0xF0000464) \
384  /* VSX Scalar Reciprocal Square Root Estimate Double-Precision */ \
385  V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128) \
386  /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */ \
387  V(xsrsqrtesp, XSRSQRTESP, 0xF0000028) \
388  /* VSX Scalar Square Root Double-Precision */ \
389  V(xssqrtdp, XSSQRTDP, 0xF000012C) \
390  /* VSX Scalar Square Root Single-Precision */ \
391  V(xssqrtsp, XSSQRTSP, 0xF000002C) \
392  /* VSX Scalar Test for software Square Root Double-Precision */ \
393  V(xstsqrtdp, XSTSQRTDP, 0xF00001A8) \
394  /* VSX Vector Absolute Value Double-Precision */ \
395  V(xvabsdp, XVABSDP, 0xF0000764) \
396  /* VSX Vector Absolute Value Single-Precision */ \
397  V(xvabssp, XVABSSP, 0xF0000664) \
398  /* VSX Vector Convert Double-Precision to Single-Precision */ \
399  V(xvcvdpsp, XVCVDPSP, 0xF0000624) \
400  /* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */ \
401  /* Saturate */ \
402  V(xvcvdpsxds, XVCVDPSXDS, 0xF0000760) \
403  /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */ \
404  /* Saturate */ \
405  V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360) \
406  /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point */ \
407  /* Doubleword Saturate */ \
408  V(xvcvdpuxds, XVCVDPUXDS, 0xF0000720) \
409  /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */ \
410  /* Saturate */ \
411  V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320) \
412  /* VSX Vector Convert Single-Precision to Double-Precision */ \
413  V(xvcvspdp, XVCVSPDP, 0xF0000724) \
414  /* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */ \
415  /* Saturate */ \
416  V(xvcvspsxds, XVCVSPSXDS, 0xF0000660) \
417  /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \
418  /* Saturate */ \
419  V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \
420  /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */ \
421  /* Doubleword Saturate */ \
422  V(xvcvspuxds, XVCVSPUXDS, 0xF0000620) \
423  /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \
424  /* Saturate */ \
425  V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \
426  /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \
427  V(xvcvsxddp, XVCVSXDDP, 0xF00007E0) \
428  /* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */ \
429  V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0) \
430  /* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */ \
431  V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0) \
432  /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \
433  V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \
434  /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */ \
435  /* Precision */ \
436  V(xvcvuxddp, XVCVUXDDP, 0xF00007A0) \
437  /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Single- */ \
438  /* Precision */ \
439  V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0) \
440  /* VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision */ \
441  V(xvcvuxwdp, XVCVUXWDP, 0xF00003A0) \
442  /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \
443  V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0) \
444  /* VSX Vector Negative Absolute Value Double-Precision */ \
445  V(xvnabsdp, XVNABSDP, 0xF00007A4) \
446  /* VSX Vector Negative Absolute Value Single-Precision */ \
447  V(xvnabssp, XVNABSSP, 0xF00006A4) \
448  /* VSX Vector Negate Double-Precision */ \
449  V(xvnegdp, XVNEGDP, 0xF00007E4) \
450  /* VSX Vector Negate Single-Precision */ \
451  V(xvnegsp, XVNEGSP, 0xF00006E4) \
452  /* VSX Vector Round to Double-Precision Integer */ \
453  V(xvrdpi, XVRDPI, 0xF0000324) \
454  /* VSX Vector Round to Double-Precision Integer using Current rounding */ \
455  /* mode */ \
456  V(xvrdpic, XVRDPIC, 0xF00003AC) \
457  /* VSX Vector Round to Double-Precision Integer toward -Infinity */ \
458  V(xvrdpim, XVRDPIM, 0xF00003E4) \
459  /* VSX Vector Round to Double-Precision Integer toward +Infinity */ \
460  V(xvrdpip, XVRDPIP, 0xF00003A4) \
461  /* VSX Vector Round to Double-Precision Integer toward Zero */ \
462  V(xvrdpiz, XVRDPIZ, 0xF0000364) \
463  /* VSX Vector Round to Single-Precision Integer */ \
464  V(xvrspi, XVRSPI, 0xF0000224) \
465  /* VSX Vector Round to Single-Precision Integer using Current rounding */ \
466  /* mode */ \
467  V(xvrspic, XVRSPIC, 0xF00002AC) \
468  /* VSX Vector Round to Single-Precision Integer toward -Infinity */ \
469  V(xvrspim, XVRSPIM, 0xF00002E4) \
470  /* VSX Vector Round to Single-Precision Integer toward +Infinity */ \
471  V(xvrspip, XVRSPIP, 0xF00002A4) \
472  /* VSX Vector Round to Single-Precision Integer toward Zero */ \
473  V(xvrspiz, XVRSPIZ, 0xF0000264) \
474  /* VSX Vector Reciprocal Square Root Estimate Double-Precision */ \
475  V(xvrsqrtedp, XVRSQRTEDP, 0xF0000328) \
476  /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \
477  V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \
478  /* VSX Vector Square Root Double-Precision */ \
479  V(xvsqrtdp, XVSQRTDP, 0xF000032C) \
480  /* VSX Vector Square Root Single-Precision */ \
481  V(xvsqrtsp, XVSQRTSP, 0xF000022C) \
482  /* VSX Vector Test for software Square Root Double-Precision */ \
483  V(xvtsqrtdp, XVTSQRTDP, 0xF00003A8) \
484  /* VSX Vector Test for software Square Root Single-Precision */ \
485  V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8)
486 
487 #define PPC_EVX_OPCODE_LIST(V) \
488  /* Vector Load Double Word into Double Word by External PID Indexed */ \
489  V(evlddepx, EVLDDEPX, 0x7C00063E) \
490  /* Vector Store Double of Double by External PID Indexed */ \
491  V(evstddepx, EVSTDDEPX, 0x7C00073E) \
492  /* Bit Reversed Increment */ \
493  V(brinc, BRINC, 0x1000020F) \
494  /* Vector Absolute Value */ \
495  V(evabs, EVABS, 0x10000208) \
496  /* Vector Add Immediate Word */ \
497  V(evaddiw, EVADDIW, 0x10000202) \
498  /* Vector Add Signed, Modulo, Integer to Accumulator Word */ \
499  V(evaddsmiaaw, EVADDSMIAAW, 0x100004C9) \
500  /* Vector Add Signed, Saturate, Integer to Accumulator Word */ \
501  V(evaddssiaaw, EVADDSSIAAW, 0x100004C1) \
502  /* Vector Add Unsigned, Modulo, Integer to Accumulator Word */ \
503  V(evaddumiaaw, EVADDUMIAAW, 0x100004C8) \
504  /* Vector Add Unsigned, Saturate, Integer to Accumulator Word */ \
505  V(evaddusiaaw, EVADDUSIAAW, 0x100004C0) \
506  /* Vector Add Word */ \
507  V(evaddw, EVADDW, 0x10000200) \
508  /* Vector AND */ \
509  V(evand, EVAND, 0x10000211) \
510  /* Vector AND with Complement */ \
511  V(evandc, EVANDC, 0x10000212) \
512  /* Vector Compare Equal */ \
513  V(evcmpeq, EVCMPEQ, 0x10000234) \
514  /* Vector Compare Greater Than Signed */ \
515  V(evcmpgts, EVCMPGTS, 0x10000231) \
516  /* Vector Compare Greater Than Unsigned */ \
517  V(evcmpgtu, EVCMPGTU, 0x10000230) \
518  /* Vector Compare Less Than Signed */ \
519  V(evcmplts, EVCMPLTS, 0x10000233) \
520  /* Vector Compare Less Than Unsigned */ \
521  V(evcmpltu, EVCMPLTU, 0x10000232) \
522  /* Vector Count Leading Signed Bits Word */ \
523  V(evcntlsw, EVCNTLSW, 0x1000020E) \
524  /* Vector Count Leading Zeros Word */ \
525  V(evcntlzw, EVCNTLZW, 0x1000020D) \
526  /* Vector Divide Word Signed */ \
527  V(evdivws, EVDIVWS, 0x100004C6) \
528  /* Vector Divide Word Unsigned */ \
529  V(evdivwu, EVDIVWU, 0x100004C7) \
530  /* Vector Equivalent */ \
531  V(eveqv, EVEQV, 0x10000219) \
532  /* Vector Extend Sign Byte */ \
533  V(evextsb, EVEXTSB, 0x1000020A) \
534  /* Vector Extend Sign Half Word */ \
535  V(evextsh, EVEXTSH, 0x1000020B) \
536  /* Vector Load Double Word into Double Word */ \
537  V(evldd, EVLDD, 0x10000301) \
538  /* Vector Load Double Word into Double Word Indexed */ \
539  V(evlddx, EVLDDX, 0x10000300) \
540  /* Vector Load Double into Four Half Words */ \
541  V(evldh, EVLDH, 0x10000305) \
542  /* Vector Load Double into Four Half Words Indexed */ \
543  V(evldhx, EVLDHX, 0x10000304) \
544  /* Vector Load Double into Two Words */ \
545  V(evldw, EVLDW, 0x10000303) \
546  /* Vector Load Double into Two Words Indexed */ \
547  V(evldwx, EVLDWX, 0x10000302) \
548  /* Vector Load Half Word into Half Words Even and Splat */ \
549  V(evlhhesplat, EVLHHESPLAT, 0x10000309) \
550  /* Vector Load Half Word into Half Words Even and Splat Indexed */ \
551  V(evlhhesplatx, EVLHHESPLATX, 0x10000308) \
552  /* Vector Load Half Word into Half Word Odd Signed and Splat */ \
553  V(evlhhossplat, EVLHHOSSPLAT, 0x1000030F) \
554  /* Vector Load Half Word into Half Word Odd Signed and Splat Indexed */ \
555  V(evlhhossplatx, EVLHHOSSPLATX, 0x1000030E) \
556  /* Vector Load Half Word into Half Word Odd Unsigned and Splat */ \
557  V(evlhhousplat, EVLHHOUSPLAT, 0x1000030D) \
558  /* Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed */ \
559  V(evlhhousplatx, EVLHHOUSPLATX, 0x1000030C) \
560  /* Vector Load Word into Two Half Words Even */ \
561  V(evlwhe, EVLWHE, 0x10000311) \
562  /* Vector Load Word into Two Half Words Odd Signed (with sign extension) */ \
563  V(evlwhos, EVLWHOS, 0x10000317) \
564  /* Vector Load Word into Two Half Words Odd Signed Indexed (with sign */ \
565  /* extension) */ \
566  V(evlwhosx, EVLWHOSX, 0x10000316) \
567  /* Vector Load Word into Two Half Words Odd Unsigned (zero-extended) */ \
568  V(evlwhou, EVLWHOU, 0x10000315) \
569  /* Vector Load Word into Two Half Words Odd Unsigned Indexed (zero- */ \
570  /* extended) */ \
571  V(evlwhoux, EVLWHOUX, 0x10000314) \
572  /* Vector Load Word into Two Half Words and Splat */ \
573  V(evlwhsplat, EVLWHSPLAT, 0x1000031D) \
574  /* Vector Load Word into Two Half Words and Splat Indexed */ \
575  V(evlwhsplatx, EVLWHSPLATX, 0x1000031C) \
576  /* Vector Load Word into Word and Splat */ \
577  V(evlwwsplat, EVLWWSPLAT, 0x10000319) \
578  /* Vector Load Word into Word and Splat Indexed */ \
579  V(evlwwsplatx, EVLWWSPLATX, 0x10000318) \
580  /* Vector Merge High */ \
581  V(evmergehi, EVMERGEHI, 0x1000022C) \
582  /* Vector Merge High/Low */ \
583  V(evmergehilo, EVMERGEHILO, 0x1000022E) \
584  /* Vector Merge Low */ \
585  V(evmergelo, EVMERGELO, 0x1000022D) \
586  /* Vector Merge Low/High */ \
587  V(evmergelohi, EVMERGELOHI, 0x1000022F) \
588  /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
589  /* and Accumulate */ \
590  V(evmhegsmfaa, EVMHEGSMFAA, 0x1000052B) \
591  /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
592  /* and Accumulate Negative */ \
593  V(evmhegsmfan, EVMHEGSMFAN, 0x100005AB) \
594  /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
595  /* and Accumulate */ \
596  V(evmhegsmiaa, EVMHEGSMIAA, 0x10000529) \
597  /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
598  /* and Accumulate Negative */ \
599  V(evmhegsmian, EVMHEGSMIAN, 0x100005A9) \
600  /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
601  /* and Accumulate */ \
602  V(evmhegumiaa, EVMHEGUMIAA, 0x10000528) \
603  /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
604  /* and Accumulate Negative */ \
605  V(evmhegumian, EVMHEGUMIAN, 0x100005A8) \
606  /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional */ \
607  V(evmhesmf, EVMHESMF, 0x1000040B) \
608  /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional to */ \
609  /* Accumulator */ \
610  V(evmhesmfa, EVMHESMFA, 0x1000042B) \
611  /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
612  /* Accumulate into Words */ \
613  V(evmhesmfaaw, EVMHESMFAAW, 0x1000050B) \
614  /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
615  /* Accumulate Negative into Words */ \
616  V(evmhesmfanw, EVMHESMFANW, 0x1000058B) \
617  /* Vector Multiply Half Words, Even, Signed, Modulo, Integer */ \
618  V(evmhesmi, EVMHESMI, 0x10000409) \
619  /* Vector Multiply Half Words, Even, Signed, Modulo, Integer to */ \
620  /* Accumulator */ \
621  V(evmhesmia, EVMHESMIA, 0x10000429) \
622  /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
623  /* Accumulate into Words */ \
624  V(evmhesmiaaw, EVMHESMIAAW, 0x10000509) \
625  /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
626  /* Accumulate Negative into Words */ \
627  V(evmhesmianw, EVMHESMIANW, 0x10000589) \
628  /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional */ \
629  V(evmhessf, EVMHESSF, 0x10000403) \
630  /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional to */ \
631  /* Accumulator */ \
632  V(evmhessfa, EVMHESSFA, 0x10000423) \
633  /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
634  /* Accumulate into Words */ \
635  V(evmhessfaaw, EVMHESSFAAW, 0x10000503) \
636  /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
637  /* Accumulate Negative into Words */ \
638  V(evmhessfanw, EVMHESSFANW, 0x10000583) \
639  /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
640  /* Accumulate into Words */ \
641  V(evmhessiaaw, EVMHESSIAAW, 0x10000501) \
642  /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
643  /* Accumulate Negative into Words */ \
644  V(evmhessianw, EVMHESSIANW, 0x10000581) \
645  /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer */ \
646  V(evmheumi, EVMHEUMI, 0x10000408) \
647  /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer to */ \
648  /* Accumulator */ \
649  V(evmheumia, EVMHEUMIA, 0x10000428) \
650  /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
651  /* Accumulate into Words */ \
652  V(evmheumiaaw, EVMHEUMIAAW, 0x10000508) \
653  /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
654  /* Accumulate Negative into Words */ \
655  V(evmheumianw, EVMHEUMIANW, 0x10000588) \
656  /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
657  /* Accumulate into Words */ \
658  V(evmheusiaaw, EVMHEUSIAAW, 0x10000500) \
659  /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
660  /* Accumulate Negative into Words */ \
661  V(evmheusianw, EVMHEUSIANW, 0x10000580) \
662  /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
663  /* and Accumulate */ \
664  V(evmhogsmfaa, EVMHOGSMFAA, 0x1000052F) \
665  /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
666  /* and Accumulate Negative */ \
667  V(evmhogsmfan, EVMHOGSMFAN, 0x100005AF) \
668  /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer, */ \
669  /* and Accumulate */ \
670  V(evmhogsmiaa, EVMHOGSMIAA, 0x1000052D) \
671  /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer and */ \
672  /* Accumulate Negative */ \
673  V(evmhogsmian, EVMHOGSMIAN, 0x100005AD) \
674  /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
675  /* and Accumulate */ \
676  V(evmhogumiaa, EVMHOGUMIAA, 0x1000052C) \
677  /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
678  /* and Accumulate Negative */ \
679  V(evmhogumian, EVMHOGUMIAN, 0x100005AC) \
680  /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional */ \
681  V(evmhosmf, EVMHOSMF, 0x1000040F) \
682  /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional to */ \
683  /* Accumulator */ \
684  V(evmhosmfa, EVMHOSMFA, 0x1000042F) \
685  /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
686  /* Accumulate into Words */ \
687  V(evmhosmfaaw, EVMHOSMFAAW, 0x1000050F) \
688  /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
689  /* Accumulate Negative into Words */ \
690  V(evmhosmfanw, EVMHOSMFANW, 0x1000058F) \
691  /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer */ \
692  V(evmhosmi, EVMHOSMI, 0x1000040D) \
693  /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer to */ \
694  /* Accumulator */ \
695  V(evmhosmia, EVMHOSMIA, 0x1000042D) \
696  /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
697  /* Accumulate into Words */ \
698  V(evmhosmiaaw, EVMHOSMIAAW, 0x1000050D) \
699  /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
700  /* Accumulate Negative into Words */ \
701  V(evmhosmianw, EVMHOSMIANW, 0x1000058D) \
702  /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional */ \
703  V(evmhossf, EVMHOSSF, 0x10000407) \
704  /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional to */ \
705  /* Accumulator */ \
706  V(evmhossfa, EVMHOSSFA, 0x10000427) \
707  /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
708  /* Accumulate into Words */ \
709  V(evmhossfaaw, EVMHOSSFAAW, 0x10000507) \
710  /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
711  /* Accumulate Negative into Words */ \
712  V(evmhossfanw, EVMHOSSFANW, 0x10000587) \
713  /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
714  /* Accumulate into Words */ \
715  V(evmhossiaaw, EVMHOSSIAAW, 0x10000505) \
716  /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
717  /* Accumulate Negative into Words */ \
718  V(evmhossianw, EVMHOSSIANW, 0x10000585) \
719  /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer */ \
720  V(evmhoumi, EVMHOUMI, 0x1000040C) \
721  /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer to */ \
722  /* Accumulator */ \
723  V(evmhoumia, EVMHOUMIA, 0x1000042C) \
724  /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
725  /* Accumulate into Words */ \
726  V(evmhoumiaaw, EVMHOUMIAAW, 0x1000050C) \
727  /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
728  /* Accumulate Negative into Words */ \
729  V(evmhoumianw, EVMHOUMIANW, 0x1000058C) \
730  /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
731  /* Accumulate into Words */ \
732  V(evmhousiaaw, EVMHOUSIAAW, 0x10000504) \
733  /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
734  /* Accumulate Negative into Words */ \
735  V(evmhousianw, EVMHOUSIANW, 0x10000584) \
736  /* Initialize Accumulator */ \
737  V(evmra, EVMRA, 0x100004C4) \
738  /* Vector Multiply Word High Signed, Modulo, Fractional */ \
739  V(evmwhsmf, EVMWHSMF, 0x1000044F) \
740  /* Vector Multiply Word High Signed, Modulo, Fractional to Accumulator */ \
741  V(evmwhsmfa, EVMWHSMFA, 0x1000046F) \
742  /* Vector Multiply Word High Signed, Modulo, Integer */ \
743  V(evmwhsmi, EVMWHSMI, 0x1000044D) \
744  /* Vector Multiply Word High Signed, Modulo, Integer to Accumulator */ \
745  V(evmwhsmia, EVMWHSMIA, 0x1000046D) \
746  /* Vector Multiply Word High Signed, Saturate, Fractional */ \
747  V(evmwhssf, EVMWHSSF, 0x10000447) \
748  /* Vector Multiply Word High Signed, Saturate, Fractional to Accumulator */ \
749  V(evmwhssfa, EVMWHSSFA, 0x10000467) \
750  /* Vector Multiply Word High Unsigned, Modulo, Integer */ \
751  V(evmwhumi, EVMWHUMI, 0x1000044C) \
752  /* Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator */ \
753  V(evmwhumia, EVMWHUMIA, 0x1000046C) \
754  /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate in */ \
755  /* Words */ \
756  V(evmwlsmiaaw, EVMWLSMIAAW, 0x10000549) \
757  /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate */ \
758  /* Negative in Words */ \
759  V(evmwlsmianw, EVMWLSMIANW, 0x100005C9) \
760  /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate in */ \
761  /* Words */ \
762  V(evmwlssiaaw, EVMWLSSIAAW, 0x10000541) \
763  /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate */ \
764  /* Negative in Words */ \
765  V(evmwlssianw, EVMWLSSIANW, 0x100005C1) \
766  /* Vector Multiply Word Low Unsigned, Modulo, Integer */ \
767  V(evmwlumi, EVMWLUMI, 0x10000448) \
768  /* Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator */ \
769  V(evmwlumia, EVMWLUMIA, 0x10000468) \
770  /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate in */ \
771  /* Words */ \
772  V(evmwlumiaaw, EVMWLUMIAAW, 0x10000548) \
773  /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate */ \
774  /* Negative in Words */ \
775  V(evmwlumianw, EVMWLUMIANW, 0x100005C8) \
776  /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
777  /* in Words */ \
778  V(evmwlusiaaw, EVMWLUSIAAW, 0x10000540) \
779  /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
780  /* Negative in Words */ \
781  V(evmwlusianw, EVMWLUSIANW, 0x100005C0) \
782  /* Vector Multiply Word Signed, Modulo, Fractional */ \
783  V(evmwsmf, EVMWSMF, 0x1000045B) \
784  /* Vector Multiply Word Signed, Modulo, Fractional to Accumulator */ \
785  V(evmwsmfa, EVMWSMFA, 0x1000047B) \
786  /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
787  V(evmwsmfaa, EVMWSMFAA, 0x1000055B) \
788  /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
789  /* Negative */ \
790  V(evmwsmfan, EVMWSMFAN, 0x100005DB) \
791  /* Vector Multiply Word Signed, Modulo, Integer */ \
792  V(evmwsmi, EVMWSMI, 0x10000459) \
793  /* Vector Multiply Word Signed, Modulo, Integer to Accumulator */ \
794  V(evmwsmia, EVMWSMIA, 0x10000479) \
795  /* Vector Multiply Word Signed, Modulo, Integer and Accumulate */ \
796  V(evmwsmiaa, EVMWSMIAA, 0x10000559) \
797  /* Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative */ \
798  V(evmwsmian, EVMWSMIAN, 0x100005D9) \
799  /* Vector Multiply Word Signed, Saturate, Fractional */ \
800  V(evmwssf, EVMWSSF, 0x10000453) \
801  /* Vector Multiply Word Signed, Saturate, Fractional to Accumulator */ \
802  V(evmwssfa, EVMWSSFA, 0x10000473) \
803  /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
804  V(evmwssfaa, EVMWSSFAA, 0x10000553) \
805  /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
806  /* Negative */ \
807  V(evmwssfan, EVMWSSFAN, 0x100005D3) \
808  /* Vector Multiply Word Unsigned, Modulo, Integer */ \
809  V(evmwumi, EVMWUMI, 0x10000458) \
810  /* Vector Multiply Word Unsigned, Modulo, Integer to Accumulator */ \
811  V(evmwumia, EVMWUMIA, 0x10000478) \
812  /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
813  V(evmwumiaa, EVMWUMIAA, 0x10000558) \
814  /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
815  /* Negative */ \
816  V(evmwumian, EVMWUMIAN, 0x100005D8) \
817  /* Vector NAND */ \
818  V(evnand, EVNAND, 0x1000021E) \
819  /* Vector Negate */ \
820  V(evneg, EVNEG, 0x10000209) \
821  /* Vector NOR */ \
822  V(evnor, EVNOR, 0x10000218) \
823  /* Vector OR */ \
824  V(evor, EVOR, 0x10000217) \
825  /* Vector OR with Complement */ \
826  V(evorc, EVORC, 0x1000021B) \
827  /* Vector Rotate Left Word */ \
828  V(evrlw, EVRLW, 0x10000228) \
829  /* Vector Rotate Left Word Immediate */ \
830  V(evrlwi, EVRLWI, 0x1000022A) \
831  /* Vector Round Word */ \
832  V(evrndw, EVRNDW, 0x1000020C) \
833  /* Vector Shift Left Word */ \
834  V(evslw, EVSLW, 0x10000224) \
835  /* Vector Shift Left Word Immediate */ \
836  V(evslwi, EVSLWI, 0x10000226) \
837  /* Vector Splat Fractional Immediate */ \
838  V(evsplatfi, EVSPLATFI, 0x1000022B) \
839  /* Vector Splat Immediate */ \
840  V(evsplati, EVSPLATI, 0x10000229) \
841  /* Vector Shift Right Word Immediate Signed */ \
842  V(evsrwis, EVSRWIS, 0x10000223) \
843  /* Vector Shift Right Word Immediate Unsigned */ \
844  V(evsrwiu, EVSRWIU, 0x10000222) \
845  /* Vector Shift Right Word Signed */ \
846  V(evsrws, EVSRWS, 0x10000221) \
847  /* Vector Shift Right Word Unsigned */ \
848  V(evsrwu, EVSRWU, 0x10000220) \
849  /* Vector Store Double of Double */ \
850  V(evstdd, EVSTDD, 0x10000321) \
851  /* Vector Store Double of Double Indexed */ \
852  V(evstddx, EVSTDDX, 0x10000320) \
853  /* Vector Store Double of Four Half Words */ \
854  V(evstdh, EVSTDH, 0x10000325) \
855  /* Vector Store Double of Four Half Words Indexed */ \
856  V(evstdhx, EVSTDHX, 0x10000324) \
857  /* Vector Store Double of Two Words */ \
858  V(evstdw, EVSTDW, 0x10000323) \
859  /* Vector Store Double of Two Words Indexed */ \
860  V(evstdwx, EVSTDWX, 0x10000322) \
861  /* Vector Store Word of Two Half Words from Even */ \
862  V(evstwhe, EVSTWHE, 0x10000331) \
863  /* Vector Store Word of Two Half Words from Even Indexed */ \
864  V(evstwhex, EVSTWHEX, 0x10000330) \
865  /* Vector Store Word of Two Half Words from Odd */ \
866  V(evstwho, EVSTWHO, 0x10000335) \
867  /* Vector Store Word of Two Half Words from Odd Indexed */ \
868  V(evstwhox, EVSTWHOX, 0x10000334) \
869  /* Vector Store Word of Word from Even */ \
870  V(evstwwe, EVSTWWE, 0x10000339) \
871  /* Vector Store Word of Word from Even Indexed */ \
872  V(evstwwex, EVSTWWEX, 0x10000338) \
873  /* Vector Store Word of Word from Odd */ \
874  V(evstwwo, EVSTWWO, 0x1000033D) \
875  /* Vector Store Word of Word from Odd Indexed */ \
876  V(evstwwox, EVSTWWOX, 0x1000033C) \
877  /* Vector Subtract Signed, Modulo, Integer to Accumulator Word */ \
878  V(evsubfsmiaaw, EVSUBFSMIAAW, 0x100004CB) \
879  /* Vector Subtract Signed, Saturate, Integer to Accumulator Word */ \
880  V(evsubfssiaaw, EVSUBFSSIAAW, 0x100004C3) \
881  /* Vector Subtract Unsigned, Modulo, Integer to Accumulator Word */ \
882  V(evsubfumiaaw, EVSUBFUMIAAW, 0x100004CA) \
883  /* Vector Subtract Unsigned, Saturate, Integer to Accumulator Word */ \
884  V(evsubfusiaaw, EVSUBFUSIAAW, 0x100004C2) \
885  /* Vector Subtract from Word */ \
886  V(evsubfw, EVSUBFW, 0x10000204) \
887  /* Vector Subtract Immediate from Word */ \
888  V(evsubifw, EVSUBIFW, 0x10000206) \
889  /* Vector XOR */ \
890  V(evxor, EVXOR, 0x10000216) \
891  /* Floating-Point Double-Precision Absolute Value */ \
892  V(efdabs, EFDABS, 0x100002E4) \
893  /* Floating-Point Double-Precision Add */ \
894  V(efdadd, EFDADD, 0x100002E0) \
895  /* Floating-Point Double-Precision Convert from Single-Precision */ \
896  V(efdcfs, EFDCFS, 0x100002EF) \
897  /* Convert Floating-Point Double-Precision from Signed Fraction */ \
898  V(efdcfsf, EFDCFSF, 0x100002F3) \
899  /* Convert Floating-Point Double-Precision from Signed Integer */ \
900  V(efdcfsi, EFDCFSI, 0x100002F1) \
901  /* Convert Floating-Point Double-Precision from Signed Integer */ \
902  /* Doubleword */ \
903  V(efdcfsid, EFDCFSID, 0x100002E3) \
904  /* Convert Floating-Point Double-Precision from Unsigned Fraction */ \
905  V(efdcfuf, EFDCFUF, 0x100002F2) \
906  /* Convert Floating-Point Double-Precision from Unsigned Integer */ \
907  V(efdcfui, EFDCFUI, 0x100002F0) \
908  /* Convert Floating-Point Double-Precision fromUnsigned Integer */ \
909  /* Doubleword */ \
910  V(efdcfuid, EFDCFUID, 0x100002E2) \
911  /* Floating-Point Double-Precision Compare Equal */ \
912  V(efdcmpeq, EFDCMPEQ, 0x100002EE) \
913  /* Floating-Point Double-Precision Compare Greater Than */ \
914  V(efdcmpgt, EFDCMPGT, 0x100002EC) \
915  /* Floating-Point Double-Precision Compare Less Than */ \
916  V(efdcmplt, EFDCMPLT, 0x100002ED) \
917  /* Convert Floating-Point Double-Precision to Signed Fraction */ \
918  V(efdctsf, EFDCTSF, 0x100002F7) \
919  /* Convert Floating-Point Double-Precision to Signed Integer */ \
920  V(efdctsi, EFDCTSI, 0x100002F5) \
921  /* Convert Floating-Point Double-Precision to Signed Integer Doubleword */ \
922  /* with Round toward Zero */ \
923  V(efdctsidz, EFDCTSIDZ, 0x100002EB) \
924  /* Convert Floating-Point Double-Precision to Signed Integer with Round */ \
925  /* toward Zero */ \
926  V(efdctsiz, EFDCTSIZ, 0x100002FA) \
927  /* Convert Floating-Point Double-Precision to Unsigned Fraction */ \
928  V(efdctuf, EFDCTUF, 0x100002F6) \
929  /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
930  V(efdctui, EFDCTUI, 0x100002F4) \
931  /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
932  /* Doubleword with Round toward Zero */ \
933  V(efdctuidz, EFDCTUIDZ, 0x100002EA) \
934  /* Convert Floating-Point Double-Precision to Unsigned Integer with */ \
935  /* Round toward Zero */ \
936  V(efdctuiz, EFDCTUIZ, 0x100002F8) \
937  /* Floating-Point Double-Precision Divide */ \
938  V(efddiv, EFDDIV, 0x100002E9) \
939  /* Floating-Point Double-Precision Multiply */ \
940  V(efdmul, EFDMUL, 0x100002E8) \
941  /* Floating-Point Double-Precision Negative Absolute Value */ \
942  V(efdnabs, EFDNABS, 0x100002E5) \
943  /* Floating-Point Double-Precision Negate */ \
944  V(efdneg, EFDNEG, 0x100002E6) \
945  /* Floating-Point Double-Precision Subtract */ \
946  V(efdsub, EFDSUB, 0x100002E1) \
947  /* Floating-Point Double-Precision Test Equal */ \
948  V(efdtsteq, EFDTSTEQ, 0x100002FE) \
949  /* Floating-Point Double-Precision Test Greater Than */ \
950  V(efdtstgt, EFDTSTGT, 0x100002FC) \
951  /* Floating-Point Double-Precision Test Less Than */ \
952  V(efdtstlt, EFDTSTLT, 0x100002FD) \
953  /* Floating-Point Single-Precision Convert from Double-Precision */ \
954  V(efscfd, EFSCFD, 0x100002CF) \
955  /* Floating-Point Absolute Value */ \
956  V(efsabs, EFSABS, 0x100002C4) \
957  /* Floating-Point Add */ \
958  V(efsadd, EFSADD, 0x100002C0) \
959  /* Convert Floating-Point from Signed Fraction */ \
960  V(efscfsf, EFSCFSF, 0x100002D3) \
961  /* Convert Floating-Point from Signed Integer */ \
962  V(efscfsi, EFSCFSI, 0x100002D1) \
963  /* Convert Floating-Point from Unsigned Fraction */ \
964  V(efscfuf, EFSCFUF, 0x100002D2) \
965  /* Convert Floating-Point from Unsigned Integer */ \
966  V(efscfui, EFSCFUI, 0x100002D0) \
967  /* Floating-Point Compare Equal */ \
968  V(efscmpeq, EFSCMPEQ, 0x100002CE) \
969  /* Floating-Point Compare Greater Than */ \
970  V(efscmpgt, EFSCMPGT, 0x100002CC) \
971  /* Floating-Point Compare Less Than */ \
972  V(efscmplt, EFSCMPLT, 0x100002CD) \
973  /* Convert Floating-Point to Signed Fraction */ \
974  V(efsctsf, EFSCTSF, 0x100002D7) \
975  /* Convert Floating-Point to Signed Integer */ \
976  V(efsctsi, EFSCTSI, 0x100002D5) \
977  /* Convert Floating-Point to Signed Integer with Round toward Zero */ \
978  V(efsctsiz, EFSCTSIZ, 0x100002DA) \
979  /* Convert Floating-Point to Unsigned Fraction */ \
980  V(efsctuf, EFSCTUF, 0x100002D6) \
981  /* Convert Floating-Point to Unsigned Integer */ \
982  V(efsctui, EFSCTUI, 0x100002D4) \
983  /* Convert Floating-Point to Unsigned Integer with Round toward Zero */ \
984  V(efsctuiz, EFSCTUIZ, 0x100002D8) \
985  /* Floating-Point Divide */ \
986  V(efsdiv, EFSDIV, 0x100002C9) \
987  /* Floating-Point Multiply */ \
988  V(efsmul, EFSMUL, 0x100002C8) \
989  /* Floating-Point Negative Absolute Value */ \
990  V(efsnabs, EFSNABS, 0x100002C5) \
991  /* Floating-Point Negate */ \
992  V(efsneg, EFSNEG, 0x100002C6) \
993  /* Floating-Point Subtract */ \
994  V(efssub, EFSSUB, 0x100002C1) \
995  /* Floating-Point Test Equal */ \
996  V(efststeq, EFSTSTEQ, 0x100002DE) \
997  /* Floating-Point Test Greater Than */ \
998  V(efststgt, EFSTSTGT, 0x100002DC) \
999  /* Floating-Point Test Less Than */ \
1000  V(efststlt, EFSTSTLT, 0x100002DD) \
1001  /* Vector Floating-Point Absolute Value */ \
1002  V(evfsabs, EVFSABS, 0x10000284) \
1003  /* Vector Floating-Point Add */ \
1004  V(evfsadd, EVFSADD, 0x10000280) \
1005  /* Vector Convert Floating-Point from Signed Fraction */ \
1006  V(evfscfsf, EVFSCFSF, 0x10000293) \
1007  /* Vector Convert Floating-Point from Signed Integer */ \
1008  V(evfscfsi, EVFSCFSI, 0x10000291) \
1009  /* Vector Convert Floating-Point from Unsigned Fraction */ \
1010  V(evfscfuf, EVFSCFUF, 0x10000292) \
1011  /* Vector Convert Floating-Point from Unsigned Integer */ \
1012  V(evfscfui, EVFSCFUI, 0x10000290) \
1013  /* Vector Floating-Point Compare Equal */ \
1014  V(evfscmpeq, EVFSCMPEQ, 0x1000028E) \
1015  /* Vector Floating-Point Compare Greater Than */ \
1016  V(evfscmpgt, EVFSCMPGT, 0x1000028C) \
1017  /* Vector Floating-Point Compare Less Than */ \
1018  V(evfscmplt, EVFSCMPLT, 0x1000028D) \
1019  /* Vector Convert Floating-Point to Signed Fraction */ \
1020  V(evfsctsf, EVFSCTSF, 0x10000297) \
1021  /* Vector Convert Floating-Point to Signed Integer */ \
1022  V(evfsctsi, EVFSCTSI, 0x10000295) \
1023  /* Vector Convert Floating-Point to Signed Integer with Round toward */ \
1024  /* Zero */ \
1025  V(evfsctsiz, EVFSCTSIZ, 0x1000029A) \
1026  /* Vector Convert Floating-Point to Unsigned Fraction */ \
1027  V(evfsctuf, EVFSCTUF, 0x10000296) \
1028  /* Vector Convert Floating-Point to Unsigned Integer */ \
1029  V(evfsctui, EVFSCTUI, 0x10000294) \
1030  /* Vector Convert Floating-Point to Unsigned Integer with Round toward */ \
1031  /* Zero */ \
1032  V(evfsctuiz, EVFSCTUIZ, 0x10000298) \
1033  /* Vector Floating-Point Divide */ \
1034  V(evfsdiv, EVFSDIV, 0x10000289) \
1035  /* Vector Floating-Point Multiply */ \
1036  V(evfsmul, EVFSMUL, 0x10000288) \
1037  /* Vector Floating-Point Negative Absolute Value */ \
1038  V(evfsnabs, EVFSNABS, 0x10000285) \
1039  /* Vector Floating-Point Negate */ \
1040  V(evfsneg, EVFSNEG, 0x10000286) \
1041  /* Vector Floating-Point Subtract */ \
1042  V(evfssub, EVFSSUB, 0x10000281) \
1043  /* Vector Floating-Point Test Equal */ \
1044  V(evfststeq, EVFSTSTEQ, 0x1000029E) \
1045  /* Vector Floating-Point Test Greater Than */ \
1046  V(evfststgt, EVFSTSTGT, 0x1000029C) \
1047  /* Vector Floating-Point Test Less Than */ \
1048  V(evfststlt, EVFSTSTLT, 0x1000029D)
1049 
1050 #define PPC_VC_OPCODE_LIST(V) \
1051  /* Vector Compare Bounds Single-Precision */ \
1052  V(vcmpbfp, VCMPBFP, 0x100003C6) \
1053  /* Vector Compare Equal To Single-Precision */ \
1054  V(vcmpeqfp, VCMPEQFP, 0x100000C6) \
1055  /* Vector Compare Equal To Unsigned Byte */ \
1056  V(vcmpequb, VCMPEQUB, 0x10000006) \
1057  /* Vector Compare Equal To Unsigned Doubleword */ \
1058  V(vcmpequd, VCMPEQUD, 0x100000C7) \
1059  /* Vector Compare Equal To Unsigned Halfword */ \
1060  V(vcmpequh, VCMPEQUH, 0x10000046) \
1061  /* Vector Compare Equal To Unsigned Word */ \
1062  V(vcmpequw, VCMPEQUW, 0x10000086) \
1063  /* Vector Compare Greater Than or Equal To Single-Precision */ \
1064  V(vcmpgefp, VCMPGEFP, 0x100001C6) \
1065  /* Vector Compare Greater Than Single-Precision */ \
1066  V(vcmpgtfp, VCMPGTFP, 0x100002C6) \
1067  /* Vector Compare Greater Than Signed Byte */ \
1068  V(vcmpgtsb, VCMPGTSB, 0x10000306) \
1069  /* Vector Compare Greater Than Signed Doubleword */ \
1070  V(vcmpgtsd, VCMPGTSD, 0x100003C7) \
1071  /* Vector Compare Greater Than Signed Halfword */ \
1072  V(vcmpgtsh, VCMPGTSH, 0x10000346) \
1073  /* Vector Compare Greater Than Signed Word */ \
1074  V(vcmpgtsw, VCMPGTSW, 0x10000386) \
1075  /* Vector Compare Greater Than Unsigned Byte */ \
1076  V(vcmpgtub, VCMPGTUB, 0x10000206) \
1077  /* Vector Compare Greater Than Unsigned Doubleword */ \
1078  V(vcmpgtud, VCMPGTUD, 0x100002C7) \
1079  /* Vector Compare Greater Than Unsigned Halfword */ \
1080  V(vcmpgtuh, VCMPGTUH, 0x10000246) \
1081  /* Vector Compare Greater Than Unsigned Word */ \
1082  V(vcmpgtuw, VCMPGTUW, 0x10000286)
1083 
1084 #define PPC_X_OPCODE_A_FORM_LIST(V) \
1085  /* Modulo Signed Dword */ \
1086  V(modsd, MODSD, 0x7C000612) \
1087  /* Modulo Unsigned Dword */ \
1088  V(modud, MODUD, 0x7C000212) \
1089  /* Modulo Signed Word */ \
1090  V(modsw, MODSW, 0x7C000616) \
1091  /* Modulo Unsigned Word */ \
1092  V(moduw, MODUW, 0x7C000216)
1093 
1094 #define PPC_X_OPCODE_B_FORM_LIST(V) \
1095  /* XOR */ \
1096  V(xor_, XORX, 0x7C000278) \
1097  /* AND */ \
1098  V(and_, ANDX, 0x7C000038) \
1099  /* AND with Complement */ \
1100  V(andc, ANDCX, 0x7C000078) \
1101  /* OR */ \
1102  V(orx, ORX, 0x7C000378) \
1103  /* OR with Complement */ \
1104  V(orc, ORC, 0x7C000338) \
1105  /* NOR */ \
1106  V(nor, NORX, 0x7C0000F8) \
1107  /* Shift Right Word */ \
1108  V(srw, SRWX, 0x7C000430) \
1109  /* Shift Left Word */ \
1110  V(slw, SLWX, 0x7C000030) \
1111  /* Shift Right Algebraic Word */ \
1112  V(sraw, SRAW, 0x7C000630) \
1113  /* Shift Left Doubleword */ \
1114  V(sld, SLDX, 0x7C000036) \
1115  /* Shift Right Algebraic Doubleword */ \
1116  V(srad, SRAD, 0x7C000634) \
1117  /* Shift Right Doubleword */ \
1118  V(srd, SRDX, 0x7C000436)
1119 
1120 #define PPC_X_OPCODE_C_FORM_LIST(V) \
1121  /* Count Leading Zeros Word */ \
1122  V(cntlzw, CNTLZWX, 0x7C000034) \
1123  /* Count Leading Zeros Doubleword */ \
1124  V(cntlzd, CNTLZDX, 0x7C000074) \
1125  /* Population Count Byte-wise */ \
1126  V(popcntb, POPCNTB, 0x7C0000F4) \
1127  /* Population Count Words */ \
1128  V(popcntw, POPCNTW, 0x7C0002F4) \
1129  /* Population Count Doubleword */ \
1130  V(popcntd, POPCNTD, 0x7C0003F4) \
1131  /* Extend Sign Byte */ \
1132  V(extsb, EXTSB, 0x7C000774) \
1133  /* Extend Sign Halfword */ \
1134  V(extsh, EXTSH, 0x7C000734)
1135 
1136 #define PPC_X_OPCODE_D_FORM_LIST(V) \
1137  /* Load Halfword Byte-Reverse Indexed */ \
1138  V(lhbrx, LHBRX, 0x7C00062C) \
1139  /* Load Word Byte-Reverse Indexed */ \
1140  V(lwbrx, LWBRX, 0x7C00042C) \
1141  /* Load Doubleword Byte-Reverse Indexed */ \
1142  V(ldbrx, LDBRX, 0x7C000428) \
1143  /* Load Byte and Zero Indexed */ \
1144  V(lbzx, LBZX, 0x7C0000AE) \
1145  /* Load Byte and Zero with Update Indexed */ \
1146  V(lbzux, LBZUX, 0x7C0000EE) \
1147  /* Load Halfword and Zero Indexed */ \
1148  V(lhzx, LHZX, 0x7C00022E) \
1149  /* Load Halfword and Zero with Update Indexed */ \
1150  V(lhzux, LHZUX, 0x7C00026E) \
1151  /* Load Halfword Algebraic Indexed */ \
1152  V(lhax, LHAX, 0x7C0002AE) \
1153  /* Load Word and Zero Indexed */ \
1154  V(lwzx, LWZX, 0x7C00002E) \
1155  /* Load Word and Zero with Update Indexed */ \
1156  V(lwzux, LWZUX, 0x7C00006E) \
1157  /* Load Doubleword Indexed */ \
1158  V(ldx, LDX, 0x7C00002A) \
1159  /* Load Doubleword with Update Indexed */ \
1160  V(ldux, LDUX, 0x7C00006A) \
1161  /* Load Floating-Point Double Indexed */ \
1162  V(lfdx, LFDX, 0x7C0004AE) \
1163  /* Load Floating-Point Single Indexed */ \
1164  V(lfsx, LFSX, 0x7C00042E) \
1165  /* Load Floating-Point Double with Update Indexed */ \
1166  V(lfdux, LFDUX, 0x7C0004EE) \
1167  /* Load Floating-Point Single with Update Indexed */ \
1168  V(lfsux, LFSUX, 0x7C00046E) \
1169  /* Store Byte with Update Indexed */ \
1170  V(stbux, STBUX, 0x7C0001EE) \
1171  /* Store Byte Indexed */ \
1172  V(stbx, STBX, 0x7C0001AE) \
1173  /* Store Halfword with Update Indexed */ \
1174  V(sthux, STHUX, 0x7C00036E) \
1175  /* Store Halfword Indexed */ \
1176  V(sthx, STHX, 0x7C00032E) \
1177  /* Store Word with Update Indexed */ \
1178  V(stwux, STWUX, 0x7C00016E) \
1179  /* Store Word Indexed */ \
1180  V(stwx, STWX, 0x7C00012E) \
1181  /* Store Doubleword with Update Indexed */ \
1182  V(stdux, STDUX, 0x7C00016A) \
1183  /* Store Doubleword Indexed */ \
1184  V(stdx, STDX, 0x7C00012A) \
1185  /* Store Floating-Point Double with Update Indexed */ \
1186  V(stfdux, STFDUX, 0x7C0005EE) \
1187  /* Store Floating-Point Double Indexed */ \
1188  V(stfdx, STFDX, 0x7C0005AE) \
1189  /* Store Floating-Point Single with Update Indexed */ \
1190  V(stfsux, STFSUX, 0x7C00056E) \
1191  /* Store Floating-Point Single Indexed */ \
1192  V(stfsx, STFSX, 0x7C00052E)
1193 
1194 #define PPC_X_OPCODE_E_FORM_LIST(V) \
1195  /* Shift Right Algebraic Word Immediate */ \
1196  V(srawi, SRAWIX, 0x7C000670)
1197 
1198 #define PPC_X_OPCODE_F_FORM_LIST(V) \
1199  /* Compare */ \
1200  V(cmp, CMP, 0x7C000000) \
1201  /* Compare Logical */ \
1202  V(cmpl, CMPL, 0x7C000040)
1203 
1204 #define PPC_X_OPCODE_EH_S_FORM_LIST(V) \
1205  /* Store Byte Conditional Indexed */ \
1206  V(stbcx, STBCX, 0x7C00056D) \
1207  /* Store Halfword Conditional Indexed Xform */ \
1208  V(sthcx, STHCX, 0x7C0005AD) \
1209  /* Store Word Conditional Indexed & record CR0 */ \
1210  V(stwcx, STWCX, 0x7C00012D) \
1211  /* Store Doubleword Conditional Indexed & record CR0 */ \
1212  V(stdcx, STDCX, 0x7C0001AD)
1213 
1214 #define PPC_X_OPCODE_EH_L_FORM_LIST(V) \
1215  /* Load Byte And Reserve Indexed */ \
1216  V(lbarx, LBARX, 0x7C000068) \
1217  /* Load Halfword And Reserve Indexed Xform */ \
1218  V(lharx, LHARX, 0x7C0000E8) \
1219  /* Load Word and Reserve Indexed */ \
1220  V(lwarx, LWARX, 0x7C000028) \
1221  /* Load Doubleword And Reserve Indexed */ \
1222  V(ldarx, LDARX, 0x7C0000A8)
1223 
1224 #define PPC_X_OPCODE_UNUSED_LIST(V) \
1225  /* Bit Permute Doubleword */ \
1226  V(bpermd, BPERMD, 0x7C0001F8) \
1227  /* Extend Sign Word */ \
1228  V(extsw, EXTSW, 0x7C0007B4) \
1229  /* Load Word Algebraic with Update Indexed */ \
1230  V(lwaux, LWAUX, 0x7C0002EA) \
1231  /* Load Word Algebraic Indexed */ \
1232  V(lwax, LWAX, 0x7C0002AA) \
1233  /* Parity Doubleword */ \
1234  V(prtyd, PRTYD, 0x7C000174) \
1235  /* Store Doubleword Byte-Reverse Indexed */ \
1236  V(stdbrx, STDBRX, 0x7C000528) \
1237  /* Trap Doubleword */ \
1238  V(td, TD, 0x7C000088) \
1239  /* Branch Conditional to Branch Target Address Register */ \
1240  V(bctar, BCTAR, 0x4C000460) \
1241  /* Compare Byte */ \
1242  V(cmpb, CMPB, 0x7C0003F8) \
1243  /* Data Cache Block Flush */ \
1244  V(dcbf, DCBF, 0x7C0000AC) \
1245  /* Data Cache Block Store */ \
1246  V(dcbst, DCBST, 0x7C00006C) \
1247  /* Data Cache Block Touch */ \
1248  V(dcbt, DCBT, 0x7C00022C) \
1249  /* Data Cache Block Touch for Store */ \
1250  V(dcbtst, DCBTST, 0x7C0001EC) \
1251  /* Data Cache Block Zero */ \
1252  V(dcbz, DCBZ, 0x7C0007EC) \
1253  /* Equivalent */ \
1254  V(eqv, EQV, 0x7C000238) \
1255  /* Instruction Cache Block Invalidate */ \
1256  V(icbi, ICBI, 0x7C0007AC) \
1257  /* NAND */ \
1258  V(nand, NAND, 0x7C0003B8) \
1259  /* Parity Word */ \
1260  V(prtyw, PRTYW, 0x7C000134) \
1261  /* Store Halfword Byte-Reverse Indexed */ \
1262  V(sthbrx, STHBRX, 0x7C00072C) \
1263  /* Store Word Byte-Reverse Indexed */ \
1264  V(stwbrx, STWBRX, 0x7C00052C) \
1265  /* Synchronize */ \
1266  V(sync, SYNC, 0x7C0004AC) \
1267  /* Trap Word */ \
1268  V(tw, TW, 0x7C000008) \
1269  /* ExecuExecuted No Operation */ \
1270  V(xnop, XNOP, 0x68000000) \
1271  /* Convert Binary Coded Decimal To Declets */ \
1272  V(cbcdtd, CBCDTD, 0x7C000274) \
1273  /* Convert Declets To Binary Coded Decimal */ \
1274  V(cdtbcd, CDTBCD, 0x7C000234) \
1275  /* Decimal Floating Add */ \
1276  V(dadd, DADD, 0xEC000004) \
1277  /* Decimal Floating Add Quad */ \
1278  V(daddq, DADDQ, 0xFC000004) \
1279  /* Decimal Floating Convert From Fixed */ \
1280  V(dcffix, DCFFIX, 0xEC000644) \
1281  /* Decimal Floating Convert From Fixed Quad */ \
1282  V(dcffixq, DCFFIXQ, 0xFC000644) \
1283  /* Decimal Floating Compare Ordered */ \
1284  V(dcmpo, DCMPO, 0xEC000104) \
1285  /* Decimal Floating Compare Ordered Quad */ \
1286  V(dcmpoq, DCMPOQ, 0xFC000104) \
1287  /* Decimal Floating Compare Unordered */ \
1288  V(dcmpu, DCMPU, 0xEC000504) \
1289  /* Decimal Floating Compare Unordered Quad */ \
1290  V(dcmpuq, DCMPUQ, 0xFC000504) \
1291  /* Decimal Floating Convert To DFP Long */ \
1292  V(dctdp, DCTDP, 0xEC000204) \
1293  /* Decimal Floating Convert To Fixed */ \
1294  V(dctfix, DCTFIX, 0xEC000244) \
1295  /* Decimal Floating Convert To Fixed Quad */ \
1296  V(dctfixq, DCTFIXQ, 0xFC000244) \
1297  /* Decimal Floating Convert To DFP Extended */ \
1298  V(dctqpq, DCTQPQ, 0xFC000204) \
1299  /* Decimal Floating Decode DPD To BCD */ \
1300  V(ddedpd, DDEDPD, 0xEC000284) \
1301  /* Decimal Floating Decode DPD To BCD Quad */ \
1302  V(ddedpdq, DDEDPDQ, 0xFC000284) \
1303  /* Decimal Floating Divide */ \
1304  V(ddiv, DDIV, 0xEC000444) \
1305  /* Decimal Floating Divide Quad */ \
1306  V(ddivq, DDIVQ, 0xFC000444) \
1307  /* Decimal Floating Encode BCD To DPD */ \
1308  V(denbcd, DENBCD, 0xEC000684) \
1309  /* Decimal Floating Encode BCD To DPD Quad */ \
1310  V(denbcdq, DENBCDQ, 0xFC000684) \
1311  /* Decimal Floating Insert Exponent */ \
1312  V(diex, DIEX, 0xEC0006C4) \
1313  /* Decimal Floating Insert Exponent Quad */ \
1314  V(diexq, DIEXQ, 0xFC0006C4) \
1315  /* Decimal Floating Multiply */ \
1316  V(dmul, DMUL, 0xEC000044) \
1317  /* Decimal Floating Multiply Quad */ \
1318  V(dmulq, DMULQ, 0xFC000044) \
1319  /* Decimal Floating Round To DFP Long */ \
1320  V(drdpq, DRDPQ, 0xFC000604) \
1321  /* Decimal Floating Round To DFP Short */ \
1322  V(drsp, DRSP, 0xEC000604) \
1323  /* Decimal Floating Subtract */ \
1324  V(dsub, DSUB, 0xEC000404) \
1325  /* Decimal Floating Subtract Quad */ \
1326  V(dsubq, DSUBQ, 0xFC000404) \
1327  /* Decimal Floating Test Exponent */ \
1328  V(dtstex, DTSTEX, 0xEC000144) \
1329  /* Decimal Floating Test Exponent Quad */ \
1330  V(dtstexq, DTSTEXQ, 0xFC000144) \
1331  /* Decimal Floating Test Significance */ \
1332  V(dtstsf, DTSTSF, 0xEC000544) \
1333  /* Decimal Floating Test Significance Quad */ \
1334  V(dtstsfq, DTSTSFQ, 0xFC000544) \
1335  /* Decimal Floating Extract Exponent */ \
1336  V(dxex, DXEX, 0xEC0002C4) \
1337  /* Decimal Floating Extract Exponent Quad */ \
1338  V(dxexq, DXEXQ, 0xFC0002C4) \
1339  /* Decorated Storage Notify */ \
1340  V(dsn, DSN, 0x7C0003C6) \
1341  /* Load Byte with Decoration Indexed */ \
1342  V(lbdx, LBDX, 0x7C000406) \
1343  /* Load Doubleword with Decoration Indexed */ \
1344  V(lddx, LDDX, 0x7C0004C6) \
1345  /* Load Floating Doubleword with Decoration Indexed */ \
1346  V(lfddx, LFDDX, 0x7C000646) \
1347  /* Load Halfword with Decoration Indexed */ \
1348  V(lhdx, LHDX, 0x7C000446) \
1349  /* Load Word with Decoration Indexed */ \
1350  V(lwdx, LWDX, 0x7C000486) \
1351  /* Store Byte with Decoration Indexed */ \
1352  V(stbdx, STBDX, 0x7C000506) \
1353  /* Store Doubleword with Decoration Indexed */ \
1354  V(stddx, STDDX, 0x7C0005C6) \
1355  /* Store Floating Doubleword with Decoration Indexed */ \
1356  V(stfddx, STFDDX, 0x7C000746) \
1357  /* Store Halfword with Decoration Indexed */ \
1358  V(sthdx, STHDX, 0x7C000546) \
1359  /* Store Word with Decoration Indexed */ \
1360  V(stwdx, STWDX, 0x7C000586) \
1361  /* Data Cache Block Allocate */ \
1362  V(dcba, DCBA, 0x7C0005EC) \
1363  /* Data Cache Block Invalidate */ \
1364  V(dcbi, DCBI, 0x7C0003AC) \
1365  /* Instruction Cache Block Touch */ \
1366  V(icbt, ICBT, 0x7C00002C) \
1367  /* Move to Condition Register from XER */ \
1368  V(mcrxr, MCRXR, 0x7C000400) \
1369  /* TLB Invalidate Local Indexed */ \
1370  V(tlbilx, TLBILX, 0x7C000024) \
1371  /* TLB Invalidate Virtual Address Indexed */ \
1372  V(tlbivax, TLBIVAX, 0x7C000624) \
1373  /* TLB Read Entry */ \
1374  V(tlbre, TLBRE, 0x7C000764) \
1375  /* TLB Search Indexed */ \
1376  V(tlbsx, TLBSX, 0x7C000724) \
1377  /* TLB Write Entry */ \
1378  V(tlbwe, TLBWE, 0x7C0007A4) \
1379  /* Write External Enable */ \
1380  V(wrtee, WRTEE, 0x7C000106) \
1381  /* Write External Enable Immediate */ \
1382  V(wrteei, WRTEEI, 0x7C000146) \
1383  /* Data Cache Read */ \
1384  V(dcread, DCREAD, 0x7C00028C) \
1385  /* Instruction Cache Read */ \
1386  V(icread, ICREAD, 0x7C0007CC) \
1387  /* Data Cache Invalidate */ \
1388  V(dci, DCI, 0x7C00038C) \
1389  /* Instruction Cache Invalidate */ \
1390  V(ici, ICI, 0x7C00078C) \
1391  /* Move From Device Control Register User Mode Indexed */ \
1392  V(mfdcrux, MFDCRUX, 0x7C000246) \
1393  /* Move From Device Control Register Indexed */ \
1394  V(mfdcrx, MFDCRX, 0x7C000206) \
1395  /* Move To Device Control Register User Mode Indexed */ \
1396  V(mtdcrux, MTDCRUX, 0x7C000346) \
1397  /* Move To Device Control Register Indexed */ \
1398  V(mtdcrx, MTDCRX, 0x7C000306) \
1399  /* Return From Debug Interrupt */ \
1400  V(rfdi, RFDI, 0x4C00004E) \
1401  /* Data Cache Block Flush by External PID */ \
1402  V(dcbfep, DCBFEP, 0x7C0000FE) \
1403  /* Data Cache Block Store by External PID */ \
1404  V(dcbstep, DCBSTEP, 0x7C00007E) \
1405  /* Data Cache Block Touch by External PID */ \
1406  V(dcbtep, DCBTEP, 0x7C00027E) \
1407  /* Data Cache Block Touch for Store by External PID */ \
1408  V(dcbtstep, DCBTSTEP, 0x7C0001FE) \
1409  /* Data Cache Block Zero by External PID */ \
1410  V(dcbzep, DCBZEP, 0x7C0007FE) \
1411  /* Instruction Cache Block Invalidate by External PID */ \
1412  V(icbiep, ICBIEP, 0x7C0007BE) \
1413  /* Load Byte and Zero by External PID Indexed */ \
1414  V(lbepx, LBEPX, 0x7C0000BE) \
1415  /* Load Floating-Point Double by External PID Indexed */ \
1416  V(lfdepx, LFDEPX, 0x7C0004BE) \
1417  /* Load Halfword and Zero by External PID Indexed */ \
1418  V(lhepx, LHEPX, 0x7C00023E) \
1419  /* Load Vector by External PID Indexed */ \
1420  V(lvepx, LVEPX, 0x7C00024E) \
1421  /* Load Vector by External PID Indexed Last */ \
1422  V(lvepxl, LVEPXL, 0x7C00020E) \
1423  /* Load Word and Zero by External PID Indexed */ \
1424  V(lwepx, LWEPX, 0x7C00003E) \
1425  /* Store Byte by External PID Indexed */ \
1426  V(stbepx, STBEPX, 0x7C0001BE) \
1427  /* Store Floating-Point Double by External PID Indexed */ \
1428  V(stfdepx, STFDEPX, 0x7C0005BE) \
1429  /* Store Halfword by External PID Indexed */ \
1430  V(sthepx, STHEPX, 0x7C00033E) \
1431  /* Store Vector by External PID Indexed */ \
1432  V(stvepx, STVEPX, 0x7C00064E) \
1433  /* Store Vector by External PID Indexed Last */ \
1434  V(stvepxl, STVEPXL, 0x7C00060E) \
1435  /* Store Word by External PID Indexed */ \
1436  V(stwepx, STWEPX, 0x7C00013E) \
1437  /* Load Doubleword by External PID Indexed */ \
1438  V(ldepx, LDEPX, 0x7C00003A) \
1439  /* Store Doubleword by External PID Indexed */ \
1440  V(stdepx, STDEPX, 0x7C00013A) \
1441  /* TLB Search and Reserve Indexed */ \
1442  V(tlbsrx, TLBSRX, 0x7C0006A5) \
1443  /* External Control In Word Indexed */ \
1444  V(eciwx, ECIWX, 0x7C00026C) \
1445  /* External Control Out Word Indexed */ \
1446  V(ecowx, ECOWX, 0x7C00036C) \
1447  /* Data Cache Block Lock Clear */ \
1448  V(dcblc, DCBLC, 0x7C00030C) \
1449  /* Data Cache Block Lock Query */ \
1450  V(dcblq, DCBLQ, 0x7C00034D) \
1451  /* Data Cache Block Touch and Lock Set */ \
1452  V(dcbtls, DCBTLS, 0x7C00014C) \
1453  /* Data Cache Block Touch for Store and Lock Set */ \
1454  V(dcbtstls, DCBTSTLS, 0x7C00010C) \
1455  /* Instruction Cache Block Lock Clear */ \
1456  V(icblc, ICBLC, 0x7C0001CC) \
1457  /* Instruction Cache Block Lock Query */ \
1458  V(icblq, ICBLQ, 0x7C00018D) \
1459  /* Instruction Cache Block Touch and Lock Set */ \
1460  V(icbtls, ICBTLS, 0x7C0003CC) \
1461  /* Floating Compare Ordered */ \
1462  V(fcmpo, FCMPO, 0xFC000040) \
1463  /* Floating Compare Unordered */ \
1464  V(fcmpu, FCMPU, 0xFC000000) \
1465  /* Floating Test for software Divide */ \
1466  V(ftdiv, FTDIV, 0xFC000100) \
1467  /* Floating Test for software Square Root */ \
1468  V(ftsqrt, FTSQRT, 0xFC000140) \
1469  /* Load Floating-Point as Integer Word Algebraic Indexed */ \
1470  V(lfiwax, LFIWAX, 0x7C0006AE) \
1471  /* Load Floating-Point as Integer Word and Zero Indexed */ \
1472  V(lfiwzx, LFIWZX, 0x7C0006EE) \
1473  /* Move To Condition Register from FPSCR */ \
1474  V(mcrfs, MCRFS, 0xFC000080) \
1475  /* Store Floating-Point as Integer Word Indexed */ \
1476  V(stfiwx, STFIWX, 0x7C0007AE) \
1477  /* Load Floating-Point Double Pair Indexed */ \
1478  V(lfdpx, LFDPX, 0x7C00062E) \
1479  /* Store Floating-Point Double Pair Indexed */ \
1480  V(stfdpx, STFDPX, 0x7C00072E) \
1481  /* Floating Absolute Value */ \
1482  V(fabs, FABS, 0xFC000210) \
1483  /* Floating Convert From Integer Doubleword */ \
1484  V(fcfid, FCFID, 0xFC00069C) \
1485  /* Floating Convert From Integer Doubleword Single */ \
1486  V(fcfids, FCFIDS, 0xEC00069C) \
1487  /* Floating Convert From Integer Doubleword Unsigned */ \
1488  V(fcfidu, FCFIDU, 0xFC00079C) \
1489  /* Floating Convert From Integer Doubleword Unsigned Single */ \
1490  V(fcfidus, FCFIDUS, 0xEC00079C) \
1491  /* Floating Copy Sign */ \
1492  V(fcpsgn, FCPSGN, 0xFC000010) \
1493  /* Floating Convert To Integer Doubleword */ \
1494  V(fctid, FCTID, 0xFC00065C) \
1495  /* Floating Convert To Integer Doubleword Unsigned */ \
1496  V(fctidu, FCTIDU, 0xFC00075C) \
1497  /* Floating Convert To Integer Doubleword Unsigned with round toward */ \
1498  /* Zero */ \
1499  V(fctiduz, FCTIDUZ, 0xFC00075E) \
1500  /* Floating Convert To Integer Doubleword with round toward Zero */ \
1501  V(fctidz, FCTIDZ, 0xFC00065E) \
1502  /* Floating Convert To Integer Word */ \
1503  V(fctiw, FCTIW, 0xFC00001C) \
1504  /* Floating Convert To Integer Word Unsigned */ \
1505  V(fctiwu, FCTIWU, 0xFC00011C) \
1506  /* Floating Convert To Integer Word Unsigned with round toward Zero */ \
1507  V(fctiwuz, FCTIWUZ, 0xFC00011E) \
1508  /* Floating Convert To Integer Word with round to Zero */ \
1509  V(fctiwz, FCTIWZ, 0xFC00001E) \
1510  /* Floating Move Register */ \
1511  V(fmr, FMR, 0xFC000090) \
1512  /* Floating Negative Absolute Value */ \
1513  V(fnabs, FNABS, 0xFC000110) \
1514  /* Floating Negate */ \
1515  V(fneg, FNEG, 0xFC000050) \
1516  /* Floating Round to Single-Precision */ \
1517  V(frsp, FRSP, 0xFC000018) \
1518  /* Move From FPSCR */ \
1519  V(mffs, MFFS, 0xFC00048E) \
1520  /* Move To FPSCR Bit 0 */ \
1521  V(mtfsb0, MTFSB0, 0xFC00008C) \
1522  /* Move To FPSCR Bit 1 */ \
1523  V(mtfsb1, MTFSB1, 0xFC00004C) \
1524  /* Move To FPSCR Field Immediate */ \
1525  V(mtfsfi, MTFSFI, 0xFC00010C) \
1526  /* Floating Round To Integer Minus */ \
1527  V(frim, FRIM, 0xFC0003D0) \
1528  /* Floating Round To Integer Nearest */ \
1529  V(frin, FRIN, 0xFC000310) \
1530  /* Floating Round To Integer Plus */ \
1531  V(frip, FRIP, 0xFC000390) \
1532  /* Floating Round To Integer toward Zero */ \
1533  V(friz, FRIZ, 0xFC000350) \
1534  /* Multiply Cross Halfword to Word Signed */ \
1535  V(mulchw, MULCHW, 0x10000150) \
1536  /* Multiply Cross Halfword to Word Unsigned */ \
1537  V(mulchwu, MULCHWU, 0x10000110) \
1538  /* Multiply High Halfword to Word Signed */ \
1539  V(mulhhw, MULHHW, 0x10000050) \
1540  /* Multiply High Halfword to Word Unsigned */ \
1541  V(mulhhwu, MULHHWU, 0x10000010) \
1542  /* Multiply Low Halfword to Word Signed */ \
1543  V(mullhw, MULLHW, 0x10000350) \
1544  /* Multiply Low Halfword to Word Unsigned */ \
1545  V(mullhwu, MULLHWU, 0x10000310) \
1546  /* Determine Leftmost Zero Byte DQ 56 E0000000 P 58 LSQ lq Load Quadword */ \
1547  V(dlmzb, DLMZB, 0x7C00009C) \
1548  /* Load Quadword And Reserve Indexed */ \
1549  V(lqarx, LQARX, 0x7C000228) \
1550  /* Store Quadword Conditional Indexed and record CR0 */ \
1551  V(stqcx, STQCX, 0x7C00016D) \
1552  /* Load String Word Immediate */ \
1553  V(lswi, LSWI, 0x7C0004AA) \
1554  /* Load String Word Indexed */ \
1555  V(lswx, LSWX, 0x7C00042A) \
1556  /* Store String Word Immediate */ \
1557  V(stswi, STSWI, 0x7C0005AA) \
1558  /* Store String Word Indexed */ \
1559  V(stswx, STSWX, 0x7C00052A) \
1560  /* Clear BHRB */ \
1561  V(clrbhrb, CLRBHRB, 0x7C00035C) \
1562  /* Enforce In-order Execution of I/O */ \
1563  V(eieio, EIEIO, 0x7C0006AC) \
1564  /* Load Byte and Zero Caching Inhibited Indexed */ \
1565  V(lbzcix, LBZCIX, 0x7C0006AA) \
1566  /* Load Doubleword Caching Inhibited Indexed */ \
1567  V(ldcix, LDCIX, 0x7C0006EA) \
1568  /* Load Halfword and Zero Caching Inhibited Indexed */ \
1569  V(lhzcix, LHZCIX, 0x7C00066A) \
1570  /* Load Word and Zero Caching Inhibited Indexed */ \
1571  V(lwzcix, LWZCIX, 0x7C00062A) \
1572  /* Move From Segment Register */ \
1573  V(mfsr, MFSR, 0x7C0004A6) \
1574  /* Move From Segment Register Indirect */ \
1575  V(mfsrin, MFSRIN, 0x7C000526) \
1576  /* Move To Machine State Register Doubleword */ \
1577  V(mtmsrd, MTMSRD, 0x7C000164) \
1578  /* Move To Split Little Endian */ \
1579  V(mtsle, MTSLE, 0x7C000126) \
1580  /* Move To Segment Register */ \
1581  V(mtsr, MTSR, 0x7C0001A4) \
1582  /* Move To Segment Register Indirect */ \
1583  V(mtsrin, MTSRIN, 0x7C0001E4) \
1584  /* SLB Find Entry ESID */ \
1585  V(slbfee, SLBFEE, 0x7C0007A7) \
1586  /* SLB Invalidate All */ \
1587  V(slbia, SLBIA, 0x7C0003E4) \
1588  /* SLB Invalidate Entry */ \
1589  V(slbie, SLBIE, 0x7C000364) \
1590  /* SLB Move From Entry ESID */ \
1591  V(slbmfee, SLBMFEE, 0x7C000726) \
1592  /* SLB Move From Entry VSID */ \
1593  V(slbmfev, SLBMFEV, 0x7C0006A6) \
1594  /* SLB Move To Entry */ \
1595  V(slbmte, SLBMTE, 0x7C000324) \
1596  /* Store Byte Caching Inhibited Indexed */ \
1597  V(stbcix, STBCIX, 0x7C0007AA) \
1598  /* Store Doubleword Caching Inhibited Indexed */ \
1599  V(stdcix, STDCIX, 0x7C0007EA) \
1600  /* Store Halfword and Zero Caching Inhibited Indexed */ \
1601  V(sthcix, STHCIX, 0x7C00076A) \
1602  /* Store Word and Zero Caching Inhibited Indexed */ \
1603  V(stwcix, STWCIX, 0x7C00072A) \
1604  /* TLB Invalidate All */ \
1605  V(tlbia, TLBIA, 0x7C0002E4) \
1606  /* TLB Invalidate Entry */ \
1607  V(tlbie, TLBIE, 0x7C000264) \
1608  /* TLB Invalidate Entry Local */ \
1609  V(tlbiel, TLBIEL, 0x7C000224) \
1610  /* Message Clear Privileged */ \
1611  V(msgclrp, MSGCLRP, 0x7C00015C) \
1612  /* Message Send Privileged */ \
1613  V(msgsndp, MSGSNDP, 0x7C00011C) \
1614  /* Message Clear */ \
1615  V(msgclr, MSGCLR, 0x7C0001DC) \
1616  /* Message Send */ \
1617  V(msgsnd, MSGSND, 0x7C00019C) \
1618  /* Move From Machine State Register */ \
1619  V(mfmsr, MFMSR, 0x7C0000A6) \
1620  /* Move To Machine State Register */ \
1621  V(mtmsr, MTMSR, 0x7C000124) \
1622  /* TLB Synchronize */ \
1623  V(tlbsync, TLBSYNC, 0x7C00046C) \
1624  /* Transaction Abort */ \
1625  V(tabort, TABORT, 0x7C00071D) \
1626  /* Transaction Abort Doubleword Conditional */ \
1627  V(tabortdc, TABORTDC, 0x7C00065D) \
1628  /* Transaction Abort Doubleword Conditional Immediate */ \
1629  V(tabortdci, TABORTDCI, 0x7C0006DD) \
1630  /* Transaction Abort Word Conditional */ \
1631  V(tabortwc, TABORTWC, 0x7C00061D) \
1632  /* Transaction Abort Word Conditional Immediate */ \
1633  V(tabortwci, TABORTWCI, 0x7C00069D) \
1634  /* Transaction Begin */ \
1635  V(tbegin, TBEGIN, 0x7C00051D) \
1636  /* Transaction Check */ \
1637  V(tcheck, TCHECK, 0x7C00059C) \
1638  /* Transaction End */ \
1639  V(tend, TEND, 0x7C00055C) \
1640  /* Transaction Recheckpoint */ \
1641  V(trechkpt, TRECHKPT, 0x7C0007DD) \
1642  /* Transaction Reclaim */ \
1643  V(treclaim, TRECLAIM, 0x7C00075D) \
1644  /* Transaction Suspend or Resume */ \
1645  V(tsr, TSR, 0x7C0005DC) \
1646  /* Load Vector Element Byte Indexed */ \
1647  V(lvebx, LVEBX, 0x7C00000E) \
1648  /* Load Vector Element Halfword Indexed */ \
1649  V(lvehx, LVEHX, 0x7C00004E) \
1650  /* Load Vector Element Word Indexed */ \
1651  V(lvewx, LVEWX, 0x7C00008E) \
1652  /* Load Vector for Shift Left */ \
1653  V(lvsl, LVSL, 0x7C00000C) \
1654  /* Load Vector for Shift Right */ \
1655  V(lvsr, LVSR, 0x7C00004C) \
1656  /* Load Vector Indexed */ \
1657  V(lvx, LVX, 0x7C0000CE) \
1658  /* Load Vector Indexed Last */ \
1659  V(lvxl, LVXL, 0x7C0002CE) \
1660  /* Store Vector Element Byte Indexed */ \
1661  V(stvebx, STVEBX, 0x7C00010E) \
1662  /* Store Vector Element Halfword Indexed */ \
1663  V(stvehx, STVEHX, 0x7C00014E) \
1664  /* Store Vector Element Word Indexed */ \
1665  V(stvewx, STVEWX, 0x7C00018E) \
1666  /* Store Vector Indexed */ \
1667  V(stvx, STVX, 0x7C0001CE) \
1668  /* Store Vector Indexed Last */ \
1669  V(stvxl, STVXL, 0x7C0003CE) \
1670  /* Vector Minimum Signed Doubleword */ \
1671  V(vminsd, VMINSD, 0x100003C2) \
1672  /* Floating Merge Even Word */ \
1673  V(fmrgew, FMRGEW, 0xFC00078C) \
1674  /* Floating Merge Odd Word */ \
1675  V(fmrgow, FMRGOW, 0xFC00068C) \
1676  /* Wait for Interrupt */ \
1677  V(wait, WAIT, 0x7C00007C)
1678 
1679 #define PPC_X_OPCODE_LIST(V) \
1680  PPC_X_OPCODE_A_FORM_LIST(V) \
1681  PPC_X_OPCODE_B_FORM_LIST(V) \
1682  PPC_X_OPCODE_C_FORM_LIST(V) \
1683  PPC_X_OPCODE_D_FORM_LIST(V) \
1684  PPC_X_OPCODE_E_FORM_LIST(V) \
1685  PPC_X_OPCODE_F_FORM_LIST(V) \
1686  PPC_X_OPCODE_EH_L_FORM_LIST(V) \
1687  PPC_X_OPCODE_UNUSED_LIST(V)
1688 
1689 #define PPC_EVS_OPCODE_LIST(V) \
1690  /* Vector Select */ \
1691  V(evsel, EVSEL, 0x10000278)
1692 
1693 #define PPC_DS_OPCODE_LIST(V) \
1694  /* Load Doubleword */ \
1695  V(ld, LD, 0xE8000000) \
1696  /* Load Doubleword with Update */ \
1697  V(ldu, LDU, 0xE8000001) \
1698  /* Load Word Algebraic */ \
1699  V(lwa, LWA, 0xE8000002) \
1700  /* Store Doubleword */ \
1701  V(std, STD, 0xF8000000) \
1702  /* Store Doubleword with Update */ \
1703  V(stdu, STDU, 0xF8000001) \
1704  /* Load Floating-Point Double Pair */ \
1705  V(lfdp, LFDP, 0xE4000000) \
1706  /* Store Floating-Point Double Pair */ \
1707  V(stfdp, STFDP, 0xF4000000) \
1708  /* Store Quadword */ \
1709  V(stq, STQ, 0xF8000002)
1710 
1711 #define PPC_DQ_OPCODE_LIST(V) \
1712  V(lsq, LSQ, 0xE0000000)
1713 
1714 #define PPC_D_OPCODE_LIST(V) \
1715  /* Trap Doubleword Immediate */ \
1716  V(tdi, TDI, 0x08000000) \
1717  /* Add Immediate */ \
1718  V(addi, ADDI, 0x38000000) \
1719  /* Add Immediate Carrying */ \
1720  V(addic, ADDIC, 0x30000000) \
1721  /* Add Immediate Carrying & record CR0 */ \
1722  V(addicx, ADDICx, 0x34000000) \
1723  /* Add Immediate Shifted */ \
1724  V(addis, ADDIS, 0x3C000000) \
1725  /* AND Immediate & record CR0 */ \
1726  V(andix, ANDIx, 0x70000000) \
1727  /* AND Immediate Shifted & record CR0 */ \
1728  V(andisx, ANDISx, 0x74000000) \
1729  /* Compare Immediate */ \
1730  V(cmpi, CMPI, 0x2C000000) \
1731  /* Compare Logical Immediate */ \
1732  V(cmpli, CMPLI, 0x28000000) \
1733  /* Load Byte and Zero */ \
1734  V(lbz, LBZ, 0x88000000) \
1735  /* Load Byte and Zero with Update */ \
1736  V(lbzu, LBZU, 0x8C000000) \
1737  /* Load Halfword Algebraic */ \
1738  V(lha, LHA, 0xA8000000) \
1739  /* Load Halfword Algebraic with Update */ \
1740  V(lhau, LHAU, 0xAC000000) \
1741  /* Load Halfword and Zero */ \
1742  V(lhz, LHZ, 0xA0000000) \
1743  /* Load Halfword and Zero with Update */ \
1744  V(lhzu, LHZU, 0xA4000000) \
1745  /* Load Multiple Word */ \
1746  V(lmw, LMW, 0xB8000000) \
1747  /* Load Word and Zero */ \
1748  V(lwz, LWZ, 0x80000000) \
1749  /* Load Word and Zero with Update */ \
1750  V(lwzu, LWZU, 0x84000000) \
1751  /* Multiply Low Immediate */ \
1752  V(mulli, MULLI, 0x1C000000) \
1753  /* OR Immediate */ \
1754  V(ori, ORI, 0x60000000) \
1755  /* OR Immediate Shifted */ \
1756  V(oris, ORIS, 0x64000000) \
1757  /* Store Byte */ \
1758  V(stb, STB, 0x98000000) \
1759  /* Store Byte with Update */ \
1760  V(stbu, STBU, 0x9C000000) \
1761  /* Store Halfword */ \
1762  V(sth, STH, 0xB0000000) \
1763  /* Store Halfword with Update */ \
1764  V(sthu, STHU, 0xB4000000) \
1765  /* Store Multiple Word */ \
1766  V(stmw, STMW, 0xBC000000) \
1767  /* Store Word */ \
1768  V(stw, STW, 0x90000000) \
1769  /* Store Word with Update */ \
1770  V(stwu, STWU, 0x94000000) \
1771  /* Subtract From Immediate Carrying */ \
1772  V(subfic, SUBFIC, 0x20000000) \
1773  /* Trap Word Immediate */ \
1774  V(twi, TWI, 0x0C000000) \
1775  /* XOR Immediate */ \
1776  V(xori, XORI, 0x68000000) \
1777  /* XOR Immediate Shifted */ \
1778  V(xoris, XORIS, 0x6C000000) \
1779  /* Load Floating-Point Double */ \
1780  V(lfd, LFD, 0xC8000000) \
1781  /* Load Floating-Point Double with Update */ \
1782  V(lfdu, LFDU, 0xCC000000) \
1783  /* Load Floating-Point Single */ \
1784  V(lfs, LFS, 0xC0000000) \
1785  /* Load Floating-Point Single with Update */ \
1786  V(lfsu, LFSU, 0xC4000000) \
1787  /* Store Floating-Point Double */ \
1788  V(stfd, STFD, 0xD8000000) \
1789  /* Store Floating-Point Double with Update */ \
1790  V(stfdu, STFDU, 0xDC000000) \
1791  /* Store Floating-Point Single */ \
1792  V(stfs, STFS, 0xD0000000) \
1793  /* Store Floating-Point Single with Update */ \
1794  V(stfsu, STFSU, 0xD4000000)
1795 
1796 #define PPC_XFL_OPCODE_LIST(V) \
1797  /* Move To FPSCR Fields */ \
1798  V(mtfsf, MTFSF, 0xFC00058E)
1799 
1800 #define PPC_XFX_OPCODE_LIST(V) \
1801  /* Move From Condition Register */ \
1802  V(mfcr, MFCR, 0x7C000026) \
1803  /* Move From One Condition Register Field */ \
1804  V(mfocrf, MFOCRF, 0x7C100026) \
1805  /* Move From Special Purpose Register */ \
1806  V(mfspr, MFSPR, 0x7C0002A6) \
1807  /* Move To Condition Register Fields */ \
1808  V(mtcrf, MTCRF, 0x7C000120) \
1809  /* Move To One Condition Register Field */ \
1810  V(mtocrf, MTOCRF, 0x7C100120) \
1811  /* Move To Special Purpose Register */ \
1812  V(mtspr, MTSPR, 0x7C0003A6) \
1813  /* Debugger Notify Halt */ \
1814  V(dnh, DNH, 0x4C00018C) \
1815  /* Move From Device Control Register */ \
1816  V(mfdcr, MFDCR, 0x7C000286) \
1817  /* Move To Device Control Register */ \
1818  V(mtdcr, MTDCR, 0x7C000386) \
1819  /* Move from Performance Monitor Register */ \
1820  V(mfpmr, MFPMR, 0x7C00029C) \
1821  /* Move To Performance Monitor Register */ \
1822  V(mtpmr, MTPMR, 0x7C00039C) \
1823  /* Move From Branch History Rolling Buffer */ \
1824  V(mfbhrbe, MFBHRBE, 0x7C00025C) \
1825  /* Move From Time Base */ \
1826  V(mftb, MFTB, 0x7C0002E6)
1827 
1828 #define PPC_MDS_OPCODE_LIST(V) \
1829  /* Rotate Left Doubleword then Clear Left */ \
1830  V(rldcl, RLDCL, 0x78000010) \
1831  /* Rotate Left Doubleword then Clear Right */ \
1832  V(rldcr, RLDCR, 0x78000012)
1833 
1834 #define PPC_A_OPCODE_LIST(V) \
1835  /* Integer Select */ \
1836  V(isel, ISEL, 0x7C00001E) \
1837  /* Floating Add */ \
1838  V(fadd, FADD, 0xFC00002A) \
1839  /* Floating Add Single */ \
1840  V(fadds, FADDS, 0xEC00002A) \
1841  /* Floating Divide */ \
1842  V(fdiv, FDIV, 0xFC000024) \
1843  /* Floating Divide Single */ \
1844  V(fdivs, FDIVS, 0xEC000024) \
1845  /* Floating Multiply-Add */ \
1846  V(fmadd, FMADD, 0xFC00003A) \
1847  /* Floating Multiply-Add Single */ \
1848  V(fmadds, FMADDS, 0xEC00003A) \
1849  /* Floating Multiply-Subtract */ \
1850  V(fmsub, FMSUB, 0xFC000038) \
1851  /* Floating Multiply-Subtract Single */ \
1852  V(fmsubs, FMSUBS, 0xEC000038) \
1853  /* Floating Multiply */ \
1854  V(fmul, FMUL, 0xFC000032) \
1855  /* Floating Multiply Single */ \
1856  V(fmuls, FMULS, 0xEC000032) \
1857  /* Floating Negative Multiply-Add */ \
1858  V(fnmadd, FNMADD, 0xFC00003E) \
1859  /* Floating Negative Multiply-Add Single */ \
1860  V(fnmadds, FNMADDS, 0xEC00003E) \
1861  /* Floating Negative Multiply-Subtract */ \
1862  V(fnmsub, FNMSUB, 0xFC00003C) \
1863  /* Floating Negative Multiply-Subtract Single */ \
1864  V(fnmsubs, FNMSUBS, 0xEC00003C) \
1865  /* Floating Reciprocal Estimate Single */ \
1866  V(fres, FRES, 0xEC000030) \
1867  /* Floating Reciprocal Square Root Estimate */ \
1868  V(frsqrte, FRSQRTE, 0xFC000034) \
1869  /* Floating Select */ \
1870  V(fsel, FSEL, 0xFC00002E) \
1871  /* Floating Square Root */ \
1872  V(fsqrt, FSQRT, 0xFC00002C) \
1873  /* Floating Square Root Single */ \
1874  V(fsqrts, FSQRTS, 0xEC00002C) \
1875  /* Floating Subtract */ \
1876  V(fsub, FSUB, 0xFC000028) \
1877  /* Floating Subtract Single */ \
1878  V(fsubs, FSUBS, 0xEC000028) \
1879  /* Floating Reciprocal Estimate */ \
1880  V(fre, FRE, 0xFC000030) \
1881  /* Floating Reciprocal Square Root Estimate Single */ \
1882  V(frsqrtes, FRSQRTES, 0xEC000034)
1883 
1884 #define PPC_VA_OPCODE_LIST(V) \
1885  /* Vector Add Extended & write Carry Unsigned Quadword */ \
1886  V(vaddecuq, VADDECUQ, 0x1000003D) \
1887  /* Vector Add Extended Unsigned Quadword Modulo */ \
1888  V(vaddeuqm, VADDEUQM, 0x1000003C) \
1889  /* Vector Multiply-Add Single-Precision */ \
1890  V(vmaddfp, VMADDFP, 0x1000002E) \
1891  /* Vector Multiply-High-Add Signed Halfword Saturate */ \
1892  V(vmhaddshs, VMHADDSHS, 0x10000020) \
1893  /* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \
1894  V(vmhraddshs, VMHRADDSHS, 0x10000021) \
1895  /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
1896  V(vmladduhm, VMLADDUHM, 0x10000022) \
1897  /* Vector Multiply-Sum Mixed Byte Modulo */ \
1898  V(vmsummbm, VMSUMMBM, 0x10000025) \
1899  /* Vector Multiply-Sum Signed Halfword Modulo */ \
1900  V(vmsumshm, VMSUMSHM, 0x10000028) \
1901  /* Vector Multiply-Sum Signed Halfword Saturate */ \
1902  V(vmsumshs, VMSUMSHS, 0x10000029) \
1903  /* Vector Multiply-Sum Unsigned Byte Modulo */ \
1904  V(vmsumubm, VMSUMUBM, 0x10000024) \
1905  /* Vector Multiply-Sum Unsigned Halfword Modulo */ \
1906  V(vmsumuhm, VMSUMUHM, 0x10000026) \
1907  /* Vector Multiply-Sum Unsigned Halfword Saturate */ \
1908  V(vmsumuhs, VMSUMUHS, 0x10000027) \
1909  /* Vector Negative Multiply-Subtract Single-Precision */ \
1910  V(vnmsubfp, VNMSUBFP, 0x1000002F) \
1911  /* Vector Permute */ \
1912  V(vperm, VPERM, 0x1000002B) \
1913  /* Vector Select */ \
1914  V(vsel, VSEL, 0x1000002A) \
1915  /* Vector Shift Left Double by Octet Immediate */ \
1916  V(vsldoi, VSLDOI, 0x1000002C) \
1917  /* Vector Subtract Extended & write Carry Unsigned Quadword */ \
1918  V(vsubecuq, VSUBECUQ, 0x1000003F) \
1919  /* Vector Subtract Extended Unsigned Quadword Modulo */ \
1920  V(vsubeuqm, VSUBEUQM, 0x1000003E) \
1921  /* Vector Permute and Exclusive-OR */ \
1922  V(vpermxor, VPERMXOR, 0x1000002D)
1923 
1924 #define PPC_XX1_OPCODE_LIST(V) \
1925  /* Load VSR Scalar Doubleword Indexed */ \
1926  V(lxsdx, LXSDX, 0x7C000498) \
1927  /* Load VSX Scalar as Integer Word Algebraic Indexed */ \
1928  V(lxsiwax, LXSIWAX, 0x7C000098) \
1929  /* Load VSX Scalar as Integer Word and Zero Indexed */ \
1930  V(lxsiwzx, LXSIWZX, 0x7C000018) \
1931  /* Load VSX Scalar Single-Precision Indexed */ \
1932  V(lxsspx, LXSSPX, 0x7C000418) \
1933  /* Load VSR Vector Doubleword*2 Indexed */ \
1934  V(lxvd, LXVD, 0x7C000698) \
1935  /* Load VSR Vector Doubleword & Splat Indexed */ \
1936  V(lxvdsx, LXVDSX, 0x7C000298) \
1937  /* Load VSR Vector Word*4 Indexed */ \
1938  V(lxvw, LXVW, 0x7C000618) \
1939  /* Move From VSR Doubleword */ \
1940  V(mfvsrd, MFVSRD, 0x7C000066) \
1941  /* Move From VSR Word and Zero */ \
1942  V(mfvsrwz, MFVSRWZ, 0x7C0000E6) \
1943  /* Store VSR Scalar Doubleword Indexed */ \
1944  V(stxsdx, STXSDX, 0x7C000598) \
1945  /* Store VSX Scalar as Integer Word Indexed */ \
1946  V(stxsiwx, STXSIWX, 0x7C000118) \
1947  /* Store VSR Scalar Word Indexed */ \
1948  V(stxsspx, STXSSPX, 0x7C000518) \
1949  /* Store VSR Vector Doubleword*2 Indexed */ \
1950  V(stxvd, STXVD, 0x7C000798) \
1951  /* Store VSR Vector Word*4 Indexed */ \
1952  V(stxvw, STXVW, 0x7C000718)
1953 
1954 #define PPC_B_OPCODE_LIST(V) \
1955  /* Branch Conditional */ \
1956  V(bc, BCX, 0x40000000)
1957 
1958 #define PPC_XO_OPCODE_LIST(V) \
1959  /* Divide Doubleword */ \
1960  V(divd, DIVD, 0x7C0003D2) \
1961  /* Divide Doubleword Extended */ \
1962  V(divde, DIVDE, 0x7C000352) \
1963  /* Divide Doubleword Extended & record OV */ \
1964  V(divdeo, DIVDEO, 0x7C000752) \
1965  /* Divide Doubleword Extended Unsigned */ \
1966  V(divdeu, DIVDEU, 0x7C000312) \
1967  /* Divide Doubleword Extended Unsigned & record OV */ \
1968  V(divdeuo, DIVDEUO, 0x7C000712) \
1969  /* Divide Doubleword & record OV */ \
1970  V(divdo, DIVDO, 0x7C0007D2) \
1971  /* Divide Doubleword Unsigned */ \
1972  V(divdu, DIVDU, 0x7C000392) \
1973  /* Divide Doubleword Unsigned & record OV */ \
1974  V(divduo, DIVDUO, 0x7C000792) \
1975  /* Multiply High Doubleword */ \
1976  V(mulhd, MULHD, 0x7C000092) \
1977  /* Multiply High Doubleword Unsigned */ \
1978  V(mulhdu, MULHDU, 0x7C000012) \
1979  /* Multiply Low Doubleword */ \
1980  V(mulld, MULLD, 0x7C0001D2) \
1981  /* Multiply Low Doubleword & record OV */ \
1982  V(mulldo, MULLDO, 0x7C0005D2) \
1983  /* Add */ \
1984  V(add, ADDX, 0x7C000214) \
1985  /* Add Carrying */ \
1986  V(addc, ADDCX, 0x7C000014) \
1987  /* Add Carrying & record OV */ \
1988  V(addco, ADDCO, 0x7C000414) \
1989  /* Add Extended */ \
1990  V(adde, ADDEX, 0x7C000114) \
1991  /* Add Extended & record OV & record OV */ \
1992  V(addeo, ADDEO, 0x7C000514) \
1993  /* Add to Minus One Extended */ \
1994  V(addme, ADDME, 0x7C0001D4) \
1995  /* Add to Minus One Extended & record OV */ \
1996  V(addmeo, ADDMEO, 0x7C0005D4) \
1997  /* Add & record OV */ \
1998  V(addo, ADDO, 0x7C000614) \
1999  /* Add to Zero Extended */ \
2000  V(addze, ADDZEX, 0x7C000194) \
2001  /* Add to Zero Extended & record OV */ \
2002  V(addzeo, ADDZEO, 0x7C000594) \
2003  /* Divide Word Format */ \
2004  V(divw, DIVW, 0x7C0003D6) \
2005  /* Divide Word Extended */ \
2006  V(divwe, DIVWE, 0x7C000356) \
2007  /* Divide Word Extended & record OV */ \
2008  V(divweo, DIVWEO, 0x7C000756) \
2009  /* Divide Word Extended Unsigned */ \
2010  V(divweu, DIVWEU, 0x7C000316) \
2011  /* Divide Word Extended Unsigned & record OV */ \
2012  V(divweuo, DIVWEUO, 0x7C000716) \
2013  /* Divide Word & record OV */ \
2014  V(divwo, DIVWO, 0x7C0007D6) \
2015  /* Divide Word Unsigned */ \
2016  V(divwu, DIVWU, 0x7C000396) \
2017  /* Divide Word Unsigned & record OV */ \
2018  V(divwuo, DIVWUO, 0x7C000796) \
2019  /* Multiply High Word */ \
2020  V(mulhw, MULHWX, 0x7C000096) \
2021  /* Multiply High Word Unsigned */ \
2022  V(mulhwu, MULHWUX, 0x7C000016) \
2023  /* Multiply Low Word */ \
2024  V(mullw, MULLW, 0x7C0001D6) \
2025  /* Multiply Low Word & record OV */ \
2026  V(mullwo, MULLWO, 0x7C0005D6) \
2027  /* Negate */ \
2028  V(neg, NEGX, 0x7C0000D0) \
2029  /* Negate & record OV */ \
2030  V(nego, NEGO, 0x7C0004D0) \
2031  /* Subtract From */ \
2032  V(subf, SUBFX, 0x7C000050) \
2033  /* Subtract From Carrying */ \
2034  V(subfc, SUBFCX, 0x7C000010) \
2035  /* Subtract From Carrying & record OV */ \
2036  V(subfco, SUBFCO, 0x7C000410) \
2037  /* Subtract From Extended */ \
2038  V(subfe, SUBFEX, 0x7C000110) \
2039  /* Subtract From Extended & record OV */ \
2040  V(subfeo, SUBFEO, 0x7C000510) \
2041  /* Subtract From Minus One Extended */ \
2042  V(subfme, SUBFME, 0x7C0001D0) \
2043  /* Subtract From Minus One Extended & record OV */ \
2044  V(subfmeo, SUBFMEO, 0x7C0005D0) \
2045  /* Subtract From & record OV */ \
2046  V(subfo, SUBFO, 0x7C000450) \
2047  /* Subtract From Zero Extended */ \
2048  V(subfze, SUBFZE, 0x7C000190) \
2049  /* Subtract From Zero Extended & record OV */ \
2050  V(subfzeo, SUBFZEO, 0x7C000590) \
2051  /* Add and Generate Sixes */ \
2052  V(addg, ADDG, 0x7C000094) \
2053  /* Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
2054  V(macchw, MACCHW, 0x10000158) \
2055  /* Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
2056  V(macchws, MACCHWS, 0x100001D8) \
2057  /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned */ \
2058  V(macchwsu, MACCHWSU, 0x10000198) \
2059  /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned */ \
2060  V(macchwu, MACCHWU, 0x10000118) \
2061  /* Multiply Accumulate High Halfword to Word Modulo Signed */ \
2062  V(machhw, MACHHW, 0x10000058) \
2063  /* Multiply Accumulate High Halfword to Word Saturate Signed */ \
2064  V(machhws, MACHHWS, 0x100000D8) \
2065  /* Multiply Accumulate High Halfword to Word Saturate Unsigned */ \
2066  V(machhwsu, MACHHWSU, 0x10000098) \
2067  /* Multiply Accumulate High Halfword to Word Modulo Unsigned */ \
2068  V(machhwu, MACHHWU, 0x10000018) \
2069  /* Multiply Accumulate Low Halfword to Word Modulo Signed */ \
2070  V(maclhw, MACLHW, 0x10000358) \
2071  /* Multiply Accumulate Low Halfword to Word Saturate Signed */ \
2072  V(maclhws, MACLHWS, 0x100003D8) \
2073  /* Multiply Accumulate Low Halfword to Word Saturate Unsigned */ \
2074  V(maclhwsu, MACLHWSU, 0x10000398) \
2075  /* Multiply Accumulate Low Halfword to Word Modulo Unsigned */ \
2076  V(maclhwu, MACLHWU, 0x10000318) \
2077  /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
2078  V(nmacchw, NMACCHW, 0x1000015C) \
2079  /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
2080  V(nmacchws, NMACCHWS, 0x100001DC) \
2081  /* Negative Multiply Accumulate High Halfword to Word Modulo Signed */ \
2082  V(nmachhw, NMACHHW, 0x1000005C) \
2083  /* Negative Multiply Accumulate High Halfword to Word Saturate Signed */ \
2084  V(nmachhws, NMACHHWS, 0x100000DC) \
2085  /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed */ \
2086  V(nmaclhw, NMACLHW, 0x1000035C) \
2087  /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed */ \
2088  V(nmaclhws, NMACLHWS, 0x100003DC) \
2089 
2090 #define PPC_XL_OPCODE_LIST(V) \
2091  /* Branch Conditional to Count Register */ \
2092  V(bcctr, BCCTRX, 0x4C000420) \
2093  /* Branch Conditional to Link Register */ \
2094  V(bclr, BCLRX, 0x4C000020) \
2095  /* Condition Register AND */ \
2096  V(crand, CRAND, 0x4C000202) \
2097  /* Condition Register AND with Complement */ \
2098  V(crandc, CRANDC, 0x4C000102) \
2099  /* Condition Register Equivalent */ \
2100  V(creqv, CREQV, 0x4C000242) \
2101  /* Condition Register NAND */ \
2102  V(crnand, CRNAND, 0x4C0001C2) \
2103  /* Condition Register NOR */ \
2104  V(crnor, CRNOR, 0x4C000042) \
2105  /* Condition Register OR */ \
2106  V(cror, CROR, 0x4C000382) \
2107  /* Condition Register OR with Complement */ \
2108  V(crorc, CRORC, 0x4C000342) \
2109  /* Condition Register XOR */ \
2110  V(crxor, CRXOR, 0x4C000182) \
2111  /* Instruction Synchronize */ \
2112  V(isync, ISYNC, 0x4C00012C) \
2113  /* Move Condition Register Field */ \
2114  V(mcrf, MCRF, 0x4C000000) \
2115  /* Return From Critical Interrupt */ \
2116  V(rfci, RFCI, 0x4C000066) \
2117  /* Return From Interrupt */ \
2118  V(rfi, RFI, 0x4C000064) \
2119  /* Return From Machine Check Interrupt */ \
2120  V(rfmci, RFMCI, 0x4C00004C) \
2121  /* Embedded Hypervisor Privilege */ \
2122  V(ehpriv, EHPRIV, 0x7C00021C) \
2123  /* Return From Guest Interrupt */ \
2124  V(rfgi, RFGI, 0x4C0000CC) \
2125  /* Doze */ \
2126  V(doze, DOZE, 0x4C000324) \
2127  /* Return From Interrupt Doubleword Hypervisor */ \
2128  V(hrfid, HRFID, 0x4C000224) \
2129  /* Nap */ \
2130  V(nap, NAP, 0x4C000364) \
2131  /* Return from Event Based Branch */ \
2132  V(rfebb, RFEBB, 0x4C000124) \
2133  /* Return from Interrupt Doubleword */ \
2134  V(rfid, RFID, 0x4C000024) \
2135  /* Rip Van Winkle */ \
2136  V(rvwinkle, RVWINKLE, 0x4C0003E4) \
2137  /* Sleep */ \
2138  V(sleep, SLEEP, 0x4C0003A4)
2139 
2140 #define PPC_XX4_OPCODE_LIST(V) \
2141  /* VSX Select */ \
2142  V(xxsel, XXSEL, 0xF0000030)
2143 
2144 #define PPC_I_OPCODE_LIST(V) \
2145  /* Branch */ \
2146  V(b, BX, 0x48000000)
2147 
2148 #define PPC_M_OPCODE_LIST(V) \
2149  /* Rotate Left Word Immediate then Mask Insert */ \
2150  V(rlwimi, RLWIMIX, 0x50000000) \
2151  /* Rotate Left Word Immediate then AND with Mask */ \
2152  V(rlwinm, RLWINMX, 0x54000000) \
2153  /* Rotate Left Word then AND with Mask */ \
2154  V(rlwnm, RLWNMX, 0x5C000000)
2155 
2156 #define PPC_VX_OPCODE_LIST(V) \
2157  /* Decimal Add Modulo */ \
2158  V(bcdadd, BCDADD, 0xF0000400) \
2159  /* Decimal Subtract Modulo */ \
2160  V(bcdsub, BCDSUB, 0xF0000440) \
2161  /* Move From Vector Status and Control Register */ \
2162  V(mfvscr, MFVSCR, 0x10000604) \
2163  /* Move To Vector Status and Control Register */ \
2164  V(mtvscr, MTVSCR, 0x10000644) \
2165  /* Vector Add & write Carry Unsigned Quadword */ \
2166  V(vaddcuq, VADDCUQ, 0x10000140) \
2167  /* Vector Add and Write Carry-Out Unsigned Word */ \
2168  V(vaddcuw, VADDCUW, 0x10000180) \
2169  /* Vector Add Single-Precision */ \
2170  V(vaddfp, VADDFP, 0x1000000A) \
2171  /* Vector Add Signed Byte Saturate */ \
2172  V(vaddsbs, VADDSBS, 0x10000300) \
2173  /* Vector Add Signed Halfword Saturate */ \
2174  V(vaddshs, VADDSHS, 0x10000340) \
2175  /* Vector Add Signed Word Saturate */ \
2176  V(vaddsws, VADDSWS, 0x10000380) \
2177  /* Vector Add Unsigned Byte Modulo */ \
2178  V(vaddubm, VADDUBM, 0x10000000) \
2179  /* Vector Add Unsigned Byte Saturate */ \
2180  V(vaddubs, VADDUBS, 0x10000200) \
2181  /* Vector Add Unsigned Doubleword Modulo */ \
2182  V(vaddudm, VADDUDM, 0x100000C0) \
2183  /* Vector Add Unsigned Halfword Modulo */ \
2184  V(vadduhm, VADDUHM, 0x10000040) \
2185  /* Vector Add Unsigned Halfword Saturate */ \
2186  V(vadduhs, VADDUHS, 0x10000240) \
2187  /* Vector Add Unsigned Quadword Modulo */ \
2188  V(vadduqm, VADDUQM, 0x10000100) \
2189  /* Vector Add Unsigned Word Modulo */ \
2190  V(vadduwm, VADDUWM, 0x10000080) \
2191  /* Vector Add Unsigned Word Saturate */ \
2192  V(vadduws, VADDUWS, 0x10000280) \
2193  /* Vector Logical AND */ \
2194  V(vand, VAND, 0x10000404) \
2195  /* Vector Logical AND with Complement */ \
2196  V(vandc, VANDC, 0x10000444) \
2197  /* Vector Average Signed Byte */ \
2198  V(vavgsb, VAVGSB, 0x10000502) \
2199  /* Vector Average Signed Halfword */ \
2200  V(vavgsh, VAVGSH, 0x10000542) \
2201  /* Vector Average Signed Word */ \
2202  V(vavgsw, VAVGSW, 0x10000582) \
2203  /* Vector Average Unsigned Byte */ \
2204  V(vavgub, VAVGUB, 0x10000402) \
2205  /* Vector Average Unsigned Halfword */ \
2206  V(vavguh, VAVGUH, 0x10000442) \
2207  /* Vector Average Unsigned Word */ \
2208  V(vavguw, VAVGUW, 0x10000482) \
2209  /* Vector Bit Permute Quadword */ \
2210  V(vbpermq, VBPERMQ, 0x1000054C) \
2211  /* Vector Convert From Signed Fixed-Point Word To Single-Precision */ \
2212  V(vcfsx, VCFSX, 0x1000034A) \
2213  /* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */ \
2214  V(vcfux, VCFUX, 0x1000030A) \
2215  /* Vector Count Leading Zeros Byte */ \
2216  V(vclzb, VCLZB, 0x10000702) \
2217  /* Vector Count Leading Zeros Doubleword */ \
2218  V(vclzd, VCLZD, 0x100007C2) \
2219  /* Vector Count Leading Zeros Halfword */ \
2220  V(vclzh, VCLZH, 0x10000742) \
2221  /* Vector Count Leading Zeros Word */ \
2222  V(vclzw, VCLZW, 0x10000782) \
2223  /* Vector Convert From Single-Precision To Signed Fixed-Point Word */ \
2224  /* Saturate */ \
2225  V(vctsxs, VCTSXS, 0x100003CA) \
2226  /* Vector Convert From Single-Precision To Unsigned Fixed-Point Word */ \
2227  /* Saturate */ \
2228  V(vctuxs, VCTUXS, 0x1000038A) \
2229  /* Vector Equivalence */ \
2230  V(veqv, VEQV, 0x10000684) \
2231  /* Vector 2 Raised to the Exponent Estimate Single-Precision */ \
2232  V(vexptefp, VEXPTEFP, 0x1000018A) \
2233  /* Vector Gather Bits by Byte by Doubleword */ \
2234  V(vgbbd, VGBBD, 0x1000050C) \
2235  /* Vector Log Base 2 Estimate Single-Precision */ \
2236  V(vlogefp, VLOGEFP, 0x100001CA) \
2237  /* Vector Maximum Single-Precision */ \
2238  V(vmaxfp, VMAXFP, 0x1000040A) \
2239  /* Vector Maximum Signed Byte */ \
2240  V(vmaxsb, VMAXSB, 0x10000102) \
2241  /* Vector Maximum Signed Doubleword */ \
2242  V(vmaxsd, VMAXSD, 0x100001C2) \
2243  /* Vector Maximum Signed Halfword */ \
2244  V(vmaxsh, VMAXSH, 0x10000142) \
2245  /* Vector Maximum Signed Word */ \
2246  V(vmaxsw, VMAXSW, 0x10000182) \
2247  /* Vector Maximum Unsigned Byte */ \
2248  V(vmaxub, VMAXUB, 0x10000002) \
2249  /* Vector Maximum Unsigned Doubleword */ \
2250  V(vmaxud, VMAXUD, 0x100000C2) \
2251  /* Vector Maximum Unsigned Halfword */ \
2252  V(vmaxuh, VMAXUH, 0x10000042) \
2253  /* Vector Maximum Unsigned Word */ \
2254  V(vmaxuw, VMAXUW, 0x10000082) \
2255  /* Vector Minimum Single-Precision */ \
2256  V(vminfp, VMINFP, 0x1000044A) \
2257  /* Vector Minimum Signed Byte */ \
2258  V(vminsb, VMINSB, 0x10000302) \
2259  /* Vector Minimum Signed Halfword */ \
2260  V(vminsh, VMINSH, 0x10000342) \
2261  /* Vector Minimum Signed Word */ \
2262  V(vminsw, VMINSW, 0x10000382) \
2263  /* Vector Minimum Unsigned Byte */ \
2264  V(vminub, VMINUB, 0x10000202) \
2265  /* Vector Minimum Unsigned Doubleword */ \
2266  V(vminud, VMINUD, 0x100002C2) \
2267  /* Vector Minimum Unsigned Halfword */ \
2268  V(vminuh, VMINUH, 0x10000242) \
2269  /* Vector Minimum Unsigned Word */ \
2270  V(vminuw, VMINUW, 0x10000282) \
2271  /* Vector Merge High Byte */ \
2272  V(vmrghb, VMRGHB, 0x1000000C) \
2273  /* Vector Merge High Halfword */ \
2274  V(vmrghh, VMRGHH, 0x1000004C) \
2275  /* Vector Merge High Word */ \
2276  V(vmrghw, VMRGHW, 0x1000008C) \
2277  /* Vector Merge Low Byte */ \
2278  V(vmrglb, VMRGLB, 0x1000010C) \
2279  /* Vector Merge Low Halfword */ \
2280  V(vmrglh, VMRGLH, 0x1000014C) \
2281  /* Vector Merge Low Word */ \
2282  V(vmrglw, VMRGLW, 0x1000018C) \
2283  /* Vector Multiply Even Signed Byte */ \
2284  V(vmulesb, VMULESB, 0x10000308) \
2285  /* Vector Multiply Even Signed Halfword */ \
2286  V(vmulesh, VMULESH, 0x10000348) \
2287  /* Vector Multiply Even Signed Word */ \
2288  V(vmulesw, VMULESW, 0x10000388) \
2289  /* Vector Multiply Even Unsigned Byte */ \
2290  V(vmuleub, VMULEUB, 0x10000208) \
2291  /* Vector Multiply Even Unsigned Halfword */ \
2292  V(vmuleuh, VMULEUH, 0x10000248) \
2293  /* Vector Multiply Even Unsigned Word */ \
2294  V(vmuleuw, VMULEUW, 0x10000288) \
2295  /* Vector Multiply Odd Signed Byte */ \
2296  V(vmulosb, VMULOSB, 0x10000108) \
2297  /* Vector Multiply Odd Signed Halfword */ \
2298  V(vmulosh, VMULOSH, 0x10000148) \
2299  /* Vector Multiply Odd Signed Word */ \
2300  V(vmulosw, VMULOSW, 0x10000188) \
2301  /* Vector Multiply Odd Unsigned Byte */ \
2302  V(vmuloub, VMULOUB, 0x10000008) \
2303  /* Vector Multiply Odd Unsigned Halfword */ \
2304  V(vmulouh, VMULOUH, 0x10000048) \
2305  /* Vector Multiply Odd Unsigned Word */ \
2306  V(vmulouw, VMULOUW, 0x10000088) \
2307  /* Vector Multiply Unsigned Word Modulo */ \
2308  V(vmuluwm, VMULUWM, 0x10000089) \
2309  /* Vector NAND */ \
2310  V(vnand, VNAND, 0x10000584) \
2311  /* Vector Logical NOR */ \
2312  V(vnor, VNOR, 0x10000504) \
2313  /* Vector Logical OR */ \
2314  V(vor, VOR, 0x10000484) \
2315  /* Vector OR with Complement */ \
2316  V(vorc, VORC, 0x10000544) \
2317  /* Vector Pack Pixel */ \
2318  V(vpkpx, VPKPX, 0x1000030E) \
2319  /* Vector Pack Signed Doubleword Signed Saturate */ \
2320  V(vpksdss, VPKSDSS, 0x100005CE) \
2321  /* Vector Pack Signed Doubleword Unsigned Saturate */ \
2322  V(vpksdus, VPKSDUS, 0x1000054E) \
2323  /* Vector Pack Signed Halfword Signed Saturate */ \
2324  V(vpkshss, VPKSHSS, 0x1000018E) \
2325  /* Vector Pack Signed Halfword Unsigned Saturate */ \
2326  V(vpkshus, VPKSHUS, 0x1000010E) \
2327  /* Vector Pack Signed Word Signed Saturate */ \
2328  V(vpkswss, VPKSWSS, 0x100001CE) \
2329  /* Vector Pack Signed Word Unsigned Saturate */ \
2330  V(vpkswus, VPKSWUS, 0x1000014E) \
2331  /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
2332  V(vpkudum, VPKUDUM, 0x1000044E) \
2333  /* Vector Pack Unsigned Doubleword Unsigned Saturate */ \
2334  V(vpkudus, VPKUDUS, 0x100004CE) \
2335  /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
2336  V(vpkuhum, VPKUHUM, 0x1000000E) \
2337  /* Vector Pack Unsigned Halfword Unsigned Saturate */ \
2338  V(vpkuhus, VPKUHUS, 0x1000008E) \
2339  /* Vector Pack Unsigned Word Unsigned Modulo */ \
2340  V(vpkuwum, VPKUWUM, 0x1000004E) \
2341  /* Vector Pack Unsigned Word Unsigned Saturate */ \
2342  V(vpkuwus, VPKUWUS, 0x100000CE) \
2343  /* Vector Polynomial Multiply-Sum Byte */ \
2344  V(vpmsumb, VPMSUMB, 0x10000408) \
2345  /* Vector Polynomial Multiply-Sum Doubleword */ \
2346  V(vpmsumd, VPMSUMD, 0x100004C8) \
2347  /* Vector Polynomial Multiply-Sum Halfword */ \
2348  V(vpmsumh, VPMSUMH, 0x10000448) \
2349  /* Vector Polynomial Multiply-Sum Word */ \
2350  V(vpmsumw, VPMSUMW, 0x10000488) \
2351  /* Vector Population Count Byte */ \
2352  V(vpopcntb, VPOPCNTB, 0x10000703) \
2353  /* Vector Population Count Doubleword */ \
2354  V(vpopcntd, VPOPCNTD, 0x100007C3) \
2355  /* Vector Population Count Halfword */ \
2356  V(vpopcnth, VPOPCNTH, 0x10000743) \
2357  /* Vector Population Count Word */ \
2358  V(vpopcntw, VPOPCNTW, 0x10000783) \
2359  /* Vector Reciprocal Estimate Single-Precision */ \
2360  V(vrefp, VREFP, 0x1000010A) \
2361  /* Vector Round to Single-Precision Integer toward -Infinity */ \
2362  V(vrfim, VRFIM, 0x100002CA) \
2363  /* Vector Round to Single-Precision Integer Nearest */ \
2364  V(vrfin, VRFIN, 0x1000020A) \
2365  /* Vector Round to Single-Precision Integer toward +Infinity */ \
2366  V(vrfip, VRFIP, 0x1000028A) \
2367  /* Vector Round to Single-Precision Integer toward Zero */ \
2368  V(vrfiz, VRFIZ, 0x1000024A) \
2369  /* Vector Rotate Left Byte */ \
2370  V(vrlb, VRLB, 0x10000004) \
2371  /* Vector Rotate Left Doubleword */ \
2372  V(vrld, VRLD, 0x100000C4) \
2373  /* Vector Rotate Left Halfword */ \
2374  V(vrlh, VRLH, 0x10000044) \
2375  /* Vector Rotate Left Word */ \
2376  V(vrlw, VRLW, 0x10000084) \
2377  /* Vector Reciprocal Square Root Estimate Single-Precision */ \
2378  V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \
2379  /* Vector Shift Left */ \
2380  V(vsl, VSL, 0x100001C4) \
2381  /* Vector Shift Left Byte */ \
2382  V(vslb, VSLB, 0x10000104) \
2383  /* Vector Shift Left Doubleword */ \
2384  V(vsld, VSLD, 0x100005C4) \
2385  /* Vector Shift Left Halfword */ \
2386  V(vslh, VSLH, 0x10000144) \
2387  /* Vector Shift Left by Octet */ \
2388  V(vslo, VSLO, 0x1000040C) \
2389  /* Vector Shift Left Word */ \
2390  V(vslw, VSLW, 0x10000184) \
2391  /* Vector Splat Byte */ \
2392  V(vspltb, VSPLTB, 0x1000020C) \
2393  /* Vector Splat Halfword */ \
2394  V(vsplth, VSPLTH, 0x1000024C) \
2395  /* Vector Splat Immediate Signed Byte */ \
2396  V(vspltisb, VSPLTISB, 0x1000030C) \
2397  /* Vector Splat Immediate Signed Halfword */ \
2398  V(vspltish, VSPLTISH, 0x1000034C) \
2399  /* Vector Splat Immediate Signed Word */ \
2400  V(vspltisw, VSPLTISW, 0x1000038C) \
2401  /* Vector Splat Word */ \
2402  V(vspltw, VSPLTW, 0x1000028C) \
2403  /* Vector Shift Right */ \
2404  V(vsr, VSR, 0x100002C4) \
2405  /* Vector Shift Right Algebraic Byte */ \
2406  V(vsrab, VSRAB, 0x10000304) \
2407  /* Vector Shift Right Algebraic Doubleword */ \
2408  V(vsrad, VSRAD, 0x100003C4) \
2409  /* Vector Shift Right Algebraic Halfword */ \
2410  V(vsrah, VSRAH, 0x10000344) \
2411  /* Vector Shift Right Algebraic Word */ \
2412  V(vsraw, VSRAW, 0x10000384) \
2413  /* Vector Shift Right Byte */ \
2414  V(vsrb, VSRB, 0x10000204) \
2415  /* Vector Shift Right Doubleword */ \
2416  V(vsrd, VSRD, 0x100006C4) \
2417  /* Vector Shift Right Halfword */ \
2418  V(vsrh, VSRH, 0x10000244) \
2419  /* Vector Shift Right by Octet */ \
2420  V(vsro, VSRO, 0x1000044C) \
2421  /* Vector Shift Right Word */ \
2422  V(vsrw, VSRW, 0x10000284) \
2423  /* Vector Subtract & write Carry Unsigned Quadword */ \
2424  V(vsubcuq, VSUBCUQ, 0x10000540) \
2425  /* Vector Subtract and Write Carry-Out Unsigned Word */ \
2426  V(vsubcuw, VSUBCUW, 0x10000580) \
2427  /* Vector Subtract Single-Precision */ \
2428  V(vsubfp, VSUBFP, 0x1000004A) \
2429  /* Vector Subtract Signed Byte Saturate */ \
2430  V(vsubsbs, VSUBSBS, 0x10000700) \
2431  /* Vector Subtract Signed Halfword Saturate */ \
2432  V(vsubshs, VSUBSHS, 0x10000740) \
2433  /* Vector Subtract Signed Word Saturate */ \
2434  V(vsubsws, VSUBSWS, 0x10000780) \
2435  /* Vector Subtract Unsigned Byte Modulo */ \
2436  V(vsububm, VSUBUBM, 0x10000400) \
2437  /* Vector Subtract Unsigned Byte Saturate */ \
2438  V(vsububs, VSUBUBS, 0x10000600) \
2439  /* Vector Subtract Unsigned Doubleword Modulo */ \
2440  V(vsubudm, VSUBUDM, 0x100004C0) \
2441  /* Vector Subtract Unsigned Halfword Modulo */ \
2442  V(vsubuhm, VSUBUHM, 0x10000440) \
2443  /* Vector Subtract Unsigned Halfword Saturate */ \
2444  V(vsubuhs, VSUBUHS, 0x10000640) \
2445  /* Vector Subtract Unsigned Quadword Modulo */ \
2446  V(vsubuqm, VSUBUQM, 0x10000500) \
2447  /* Vector Subtract Unsigned Word Modulo */ \
2448  V(vsubuwm, VSUBUWM, 0x10000480) \
2449  /* Vector Subtract Unsigned Word Saturate */ \
2450  V(vsubuws, VSUBUWS, 0x10000680) \
2451  /* Vector Sum across Half Signed Word Saturate */ \
2452  V(vsum2sws, VSUM2SWS, 0x10000688) \
2453  /* Vector Sum across Quarter Signed Byte Saturate */ \
2454  V(vsum4sbs, VSUM4SBS, 0x10000708) \
2455  /* Vector Sum across Quarter Signed Halfword Saturate */ \
2456  V(vsum4shs, VSUM4SHS, 0x10000648) \
2457  /* Vector Sum across Quarter Unsigned Byte Saturate */ \
2458  V(vsum4bus, VSUM4BUS, 0x10000608) \
2459  /* Vector Sum across Signed Word Saturate */ \
2460  V(vsumsws, VSUMSWS, 0x10000788) \
2461  /* Vector Unpack High Pixel */ \
2462  V(vupkhpx, VUPKHPX, 0x1000034E) \
2463  /* Vector Unpack High Signed Byte */ \
2464  V(vupkhsb, VUPKHSB, 0x1000020E) \
2465  /* Vector Unpack High Signed Halfword */ \
2466  V(vupkhsh, VUPKHSH, 0x1000024E) \
2467  /* Vector Unpack High Signed Word */ \
2468  V(vupkhsw, VUPKHSW, 0x1000064E) \
2469  /* Vector Unpack Low Pixel */ \
2470  V(vupklpx, VUPKLPX, 0x100003CE) \
2471  /* Vector Unpack Low Signed Byte */ \
2472  V(vupklsb, VUPKLSB, 0x1000028E) \
2473  /* Vector Unpack Low Signed Halfword */ \
2474  V(vupklsh, VUPKLSH, 0x100002CE) \
2475  /* Vector Unpack Low Signed Word */ \
2476  V(vupklsw, VUPKLSW, 0x100006CE) \
2477  /* Vector Logical XOR */ \
2478  V(vxor, VXOR, 0x100004C4) \
2479  /* Vector AES Cipher */ \
2480  V(vcipher, VCIPHER, 0x10000508) \
2481  /* Vector AES Cipher Last */ \
2482  V(vcipherlast, VCIPHERLAST, 0x10000509) \
2483  /* Vector AES Inverse Cipher */ \
2484  V(vncipher, VNCIPHER, 0x10000548) \
2485  /* Vector AES Inverse Cipher Last */ \
2486  V(vncipherlast, VNCIPHERLAST, 0x10000549) \
2487  /* Vector AES S-Box */ \
2488  V(vsbox, VSBOX, 0x100005C8) \
2489  /* Vector SHA-512 Sigma Doubleword */ \
2490  V(vshasigmad, VSHASIGMAD, 0x100006C2) \
2491  /* Vector SHA-256 Sigma Word */ \
2492  V(vshasigmaw, VSHASIGMAW, 0x10000682) \
2493  /* Vector Merge Even Word */ \
2494  V(vmrgew, VMRGEW, 0x1000078C) \
2495  /* Vector Merge Odd Word */ \
2496  V(vmrgow, VMRGOW, 0x1000068C)
2497 
2498 #define PPC_XS_OPCODE_LIST(V) \
2499  /* Shift Right Algebraic Doubleword Immediate */ \
2500  V(sradi, SRADIX, 0x7C000674)
2501 
2502 #define PPC_MD_OPCODE_LIST(V) \
2503  /* Rotate Left Doubleword Immediate then Clear */ \
2504  V(rldic, RLDIC, 0x78000008) \
2505  /* Rotate Left Doubleword Immediate then Clear Left */ \
2506  V(rldicl, RLDICL, 0x78000000) \
2507  /* Rotate Left Doubleword Immediate then Clear Right */ \
2508  V(rldicr, RLDICR, 0x78000004) \
2509  /* Rotate Left Doubleword Immediate then Mask Insert */ \
2510  V(rldimi, RLDIMI, 0x7800000C)
2511 
2512 #define PPC_SC_OPCODE_LIST(V) \
2513  /* System Call */ \
2514  V(sc, SC, 0x44000002)
2515 
2516 #define PPC_OPCODE_LIST(V) \
2517  PPC_X_OPCODE_LIST(V) \
2518  PPC_X_OPCODE_EH_S_FORM_LIST(V) \
2519  PPC_XO_OPCODE_LIST(V) \
2520  PPC_DS_OPCODE_LIST(V) \
2521  PPC_DQ_OPCODE_LIST(V) \
2522  PPC_MDS_OPCODE_LIST(V) \
2523  PPC_MD_OPCODE_LIST(V) \
2524  PPC_XS_OPCODE_LIST(V) \
2525  PPC_D_OPCODE_LIST(V) \
2526  PPC_I_OPCODE_LIST(V) \
2527  PPC_B_OPCODE_LIST(V) \
2528  PPC_XL_OPCODE_LIST(V) \
2529  PPC_A_OPCODE_LIST(V) \
2530  PPC_XFX_OPCODE_LIST(V) \
2531  PPC_M_OPCODE_LIST(V) \
2532  PPC_SC_OPCODE_LIST(V) \
2533  PPC_Z23_OPCODE_LIST(V) \
2534  PPC_Z22_OPCODE_LIST(V) \
2535  PPC_EVX_OPCODE_LIST(V) \
2536  PPC_XFL_OPCODE_LIST(V) \
2537  PPC_EVS_OPCODE_LIST(V) \
2538  PPC_VX_OPCODE_LIST(V) \
2539  PPC_VA_OPCODE_LIST(V) \
2540  PPC_VC_OPCODE_LIST(V) \
2541  PPC_XX1_OPCODE_LIST(V) \
2542  PPC_XX2_OPCODE_LIST(V) \
2543  PPC_XX3_OPCODE_LIST(V) \
2544  PPC_XX4_OPCODE_LIST(V)
2545 
2546 enum Opcode : uint32_t {
2547 #define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \
2548  opcode_name = opcode_value,
2549  PPC_OPCODE_LIST(DECLARE_INSTRUCTION)
2550 #undef DECLARE_INSTRUCTION
2551  EXT1 = 0x4C000000, // Extended code set 1
2552  EXT2 = 0x7C000000, // Extended code set 2
2553  EXT3 = 0xEC000000, // Extended code set 3
2554  EXT4 = 0xFC000000, // Extended code set 4
2555  EXT5 = 0x78000000, // Extended code set 5 - 64bit only
2556  EXT6 = 0xF0000000, // Extended code set 6
2557 };
2558 
2559 // Instruction encoding bits and masks.
2560 enum {
2561  // Instruction encoding bit
2562  B1 = 1 << 1,
2563  B2 = 1 << 2,
2564  B3 = 1 << 3,
2565  B4 = 1 << 4,
2566  B5 = 1 << 5,
2567  B7 = 1 << 7,
2568  B8 = 1 << 8,
2569  B9 = 1 << 9,
2570  B12 = 1 << 12,
2571  B18 = 1 << 18,
2572  B19 = 1 << 19,
2573  B20 = 1 << 20,
2574  B22 = 1 << 22,
2575  B23 = 1 << 23,
2576  B24 = 1 << 24,
2577  B25 = 1 << 25,
2578  B26 = 1 << 26,
2579  B27 = 1 << 27,
2580  B28 = 1 << 28,
2581  B6 = 1 << 6,
2582  B10 = 1 << 10,
2583  B11 = 1 << 11,
2584  B16 = 1 << 16,
2585  B17 = 1 << 17,
2586  B21 = 1 << 21,
2587 
2588  // Instruction bit masks
2589  kCondMask = 0x1F << 21,
2590  kOff12Mask = (1 << 12) - 1,
2591  kImm24Mask = (1 << 24) - 1,
2592  kOff16Mask = (1 << 16) - 1,
2593  kImm16Mask = (1 << 16) - 1,
2594  kImm22Mask = (1 << 22) - 1,
2595  kImm26Mask = (1 << 26) - 1,
2596  kBOfieldMask = 0x1f << 21,
2597  kOpcodeMask = 0x3f << 26,
2598  kExt1OpcodeMask = 0x3ff << 1,
2599  kExt2OpcodeMask = 0x3ff << 1,
2600  kExt2OpcodeVariant2Mask = 0x1ff << 2,
2601  kExt5OpcodeMask = 0x3 << 2,
2602  kBOMask = 0x1f << 21,
2603  kBIMask = 0x1F << 16,
2604  kBDMask = 0x14 << 2,
2605  kAAMask = 0x01 << 1,
2606  kLKMask = 0x01,
2607  kRCMask = 0x01,
2608  kTOMask = 0x1f << 21
2609 };
2610 
2611 // -----------------------------------------------------------------------------
2612 // Addressing modes and instruction variants.
2613 
2614 // Overflow Exception
2615 enum OEBit {
2616  SetOE = 1 << 10, // Set overflow exception
2617  LeaveOE = 0 << 10 // No overflow exception
2618 };
2619 
2620 // Record bit
2621 enum RCBit { // Bit 0
2622  SetRC = 1, // LT,GT,EQ,SO
2623  LeaveRC = 0 // None
2624 };
2625 // Exclusive Access hint bit
2626 enum EHBit { // Bit 0
2627  SetEH = 1, // Exclusive Access
2628  LeaveEH = 0 // Atomic Update
2629 };
2630 
2631 // Link bit
2632 enum LKBit { // Bit 0
2633  SetLK = 1, // Load effective address of next instruction
2634  LeaveLK = 0 // No action
2635 };
2636 
2637 enum BOfield { // Bits 25-21
2638  DCBNZF = 0 << 21, // Decrement CTR; branch if CTR != 0 and condition false
2639  DCBEZF = 2 << 21, // Decrement CTR; branch if CTR == 0 and condition false
2640  BF = 4 << 21, // Branch if condition false
2641  DCBNZT = 8 << 21, // Decrement CTR; branch if CTR != 0 and condition true
2642  DCBEZT = 10 << 21, // Decrement CTR; branch if CTR == 0 and condition true
2643  BT = 12 << 21, // Branch if condition true
2644  DCBNZ = 16 << 21, // Decrement CTR; branch if CTR != 0
2645  DCBEZ = 18 << 21, // Decrement CTR; branch if CTR == 0
2646  BA = 20 << 21 // Branch always
2647 };
2648 
2649 #if V8_OS_AIX
2650 #undef CR_LT
2651 #undef CR_GT
2652 #undef CR_EQ
2653 #undef CR_SO
2654 #endif
2655 
2656 enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
2657 
2658 #define CRWIDTH 4
2659 
2660 // These are the documented bit positions biased down by 32
2661 enum FPSCRBit {
2662  VXSOFT = 21, // 53: Software-Defined Condition
2663  VXSQRT = 22, // 54: Invalid Square Root
2664  VXCVI = 23 // 55: Invalid Integer Convert
2665 };
2666 
2667 // -----------------------------------------------------------------------------
2668 // Supervisor Call (svc) specific support.
2669 
2670 // Special Software Interrupt codes when used in the presence of the PPC
2671 // simulator.
2672 // svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
2673 // standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
2674 enum SoftwareInterruptCodes {
2675  // transition to C code
2676  kCallRtRedirected = 0x10,
2677  // break point
2678  kBreakpoint = 0x821008, // bits23-0 of 0x7d821008 = twge r2, r2
2679  // stop
2680  kStopCode = 1 << 23
2681 };
2682 const uint32_t kStopCodeMask = kStopCode - 1;
2683 const uint32_t kMaxStopCode = kStopCode - 1;
2684 const int32_t kDefaultStopCode = -1;
2685 
2686 // FP rounding modes.
2687 enum FPRoundingMode {
2688  RN = 0, // Round to Nearest.
2689  RZ = 1, // Round towards zero.
2690  RP = 2, // Round towards Plus Infinity.
2691  RM = 3, // Round towards Minus Infinity.
2692 
2693  // Aliases.
2694  kRoundToNearest = RN,
2695  kRoundToZero = RZ,
2696  kRoundToPlusInf = RP,
2697  kRoundToMinusInf = RM
2698 };
2699 
2700 const uint32_t kFPRoundingModeMask = 3;
2701 
2702 enum CheckForInexactConversion {
2703  kCheckForInexactConversion,
2704  kDontCheckForInexactConversion
2705 };
2706 
2707 // -----------------------------------------------------------------------------
2708 // Specific instructions, constants, and masks.
2709 // These constants are declared in assembler-arm.cc, as they use named registers
2710 // and other constants.
2711 
2712 
2713 // add(sp, sp, 4) instruction (aka Pop())
2714 extern const Instr kPopInstruction;
2715 
2716 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
2717 // register r is not encoded.
2718 extern const Instr kPushRegPattern;
2719 
2720 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
2721 // register r is not encoded.
2722 extern const Instr kPopRegPattern;
2723 
2724 // use TWI to indicate redirection call for simulation mode
2725 const Instr rtCallRedirInstr = TWI;
2726 
2727 // -----------------------------------------------------------------------------
2728 // Instruction abstraction.
2729 
2730 // The class Instruction enables access to individual fields defined in the PPC
2731 // architecture instruction set encoding.
2732 // Note that the Assembler uses typedef int32_t Instr.
2733 //
2734 // Example: Test whether the instruction at ptr does set the condition code
2735 // bits.
2736 //
2737 // bool InstructionSetsConditionCodes(byte* ptr) {
2738 // Instruction* instr = Instruction::At(ptr);
2739 // int type = instr->TypeValue();
2740 // return ((type == 0) || (type == 1)) && instr->HasS();
2741 // }
2742 //
2743 
2744 constexpr uint8_t kInstrSize = 4;
2745 constexpr uint8_t kInstrSizeLog2 = 2;
2746 constexpr uint8_t kPcLoadDelta = 8;
2747 
2748 class Instruction {
2749  public:
2750 // Helper macro to define static accessors.
2751 // We use the cast to char* trick to bypass the strict anti-aliasing rules.
2752 #define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
2753  static inline return_type Name(Instr instr) { \
2754  char* temp = reinterpret_cast<char*>(&instr); \
2755  return reinterpret_cast<Instruction*>(temp)->Name(); \
2756  }
2757 
2758 #define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
2759 
2760  // Get the raw instruction bits.
2761  inline Instr InstructionBits() const {
2762  return *reinterpret_cast<const Instr*>(this);
2763  }
2764 
2765  // Set the raw instruction bits to value.
2766  inline void SetInstructionBits(Instr value) {
2767  *reinterpret_cast<Instr*>(this) = value;
2768  }
2769 
2770  // Read one particular bit out of the instruction bits.
2771  inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
2772 
2773  // Read a bit field's value out of the instruction bits.
2774  inline int Bits(int hi, int lo) const {
2775  return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
2776  }
2777 
2778  // Read a bit field out of the instruction bits.
2779  inline uint32_t BitField(int hi, int lo) const {
2780  return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
2781  }
2782 
2783  // Static support.
2784 
2785  // Read one particular bit out of the instruction bits.
2786  static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
2787 
2788  // Read the value of a bit field out of the instruction bits.
2789  static inline int Bits(Instr instr, int hi, int lo) {
2790  return (instr >> lo) & ((2 << (hi - lo)) - 1);
2791  }
2792 
2793 
2794  // Read a bit field out of the instruction bits.
2795  static inline uint32_t BitField(Instr instr, int hi, int lo) {
2796  return instr & (((2 << (hi - lo)) - 1) << lo);
2797  }
2798 
2799  inline int RSValue() const { return Bits(25, 21); }
2800  inline int RTValue() const { return Bits(25, 21); }
2801  inline int RAValue() const { return Bits(20, 16); }
2802  DECLARE_STATIC_ACCESSOR(RAValue);
2803  inline int RBValue() const { return Bits(15, 11); }
2804  DECLARE_STATIC_ACCESSOR(RBValue);
2805  inline int RCValue() const { return Bits(10, 6); }
2806  DECLARE_STATIC_ACCESSOR(RCValue);
2807 
2808  inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); }
2809  inline uint32_t OpcodeField() const {
2810  return static_cast<Opcode>(BitField(31, 26));
2811  }
2812 
2813 #define OPCODE_CASES(name, opcode_name, opcode_value) \
2814  case opcode_name:
2815 
2816  inline Opcode OpcodeBase() const {
2817  uint32_t opcode = OpcodeField();
2818  uint32_t extcode = OpcodeField();
2819  switch (opcode) {
2820  PPC_D_OPCODE_LIST(OPCODE_CASES)
2821  PPC_I_OPCODE_LIST(OPCODE_CASES)
2822  PPC_B_OPCODE_LIST(OPCODE_CASES)
2823  PPC_M_OPCODE_LIST(OPCODE_CASES)
2824  return static_cast<Opcode>(opcode);
2825  }
2826 
2827  opcode = extcode | BitField(10, 0);
2828  switch (opcode) {
2829  PPC_VX_OPCODE_LIST(OPCODE_CASES)
2830  PPC_X_OPCODE_EH_S_FORM_LIST(OPCODE_CASES)
2831  return static_cast<Opcode>(opcode);
2832  }
2833  opcode = extcode | BitField(9, 0);
2834  switch (opcode) {
2835  PPC_VC_OPCODE_LIST(OPCODE_CASES)
2836  return static_cast<Opcode>(opcode);
2837  }
2838  opcode = extcode | BitField(10, 1) | BitField(20, 20);
2839  switch (opcode) {
2840  PPC_XFX_OPCODE_LIST(OPCODE_CASES)
2841  return static_cast<Opcode>(opcode);
2842  }
2843  opcode = extcode | BitField(10, 1);
2844  switch (opcode) {
2845  PPC_X_OPCODE_LIST(OPCODE_CASES)
2846  PPC_XL_OPCODE_LIST(OPCODE_CASES)
2847  PPC_XFL_OPCODE_LIST(OPCODE_CASES)
2848  PPC_XX1_OPCODE_LIST(OPCODE_CASES)
2849  PPC_XX2_OPCODE_LIST(OPCODE_CASES)
2850  PPC_EVX_OPCODE_LIST(OPCODE_CASES)
2851  return static_cast<Opcode>(opcode);
2852  }
2853  opcode = extcode | BitField(9, 1);
2854  switch (opcode) {
2855  PPC_XO_OPCODE_LIST(OPCODE_CASES)
2856  PPC_Z22_OPCODE_LIST(OPCODE_CASES)
2857  return static_cast<Opcode>(opcode);
2858  }
2859  opcode = extcode | BitField(10, 2);
2860  switch (opcode) {
2861  PPC_XS_OPCODE_LIST(OPCODE_CASES)
2862  return static_cast<Opcode>(opcode);
2863  }
2864  opcode = extcode | BitField(10, 3);
2865  switch (opcode) {
2866  PPC_EVS_OPCODE_LIST(OPCODE_CASES)
2867  PPC_XX3_OPCODE_LIST(OPCODE_CASES)
2868  return static_cast<Opcode>(opcode);
2869  }
2870  opcode = extcode | BitField(8, 1);
2871  switch (opcode) {
2872  PPC_Z23_OPCODE_LIST(OPCODE_CASES)
2873  return static_cast<Opcode>(opcode);
2874  }
2875  opcode = extcode | BitField(5, 0);
2876  switch (opcode) {
2877  PPC_VA_OPCODE_LIST(OPCODE_CASES)
2878  return static_cast<Opcode>(opcode);
2879  }
2880  opcode = extcode | BitField(5, 1);
2881  switch (opcode) {
2882  PPC_A_OPCODE_LIST(OPCODE_CASES)
2883  return static_cast<Opcode>(opcode);
2884  }
2885  opcode = extcode | BitField(4, 1);
2886  switch (opcode) {
2887  PPC_MDS_OPCODE_LIST(OPCODE_CASES)
2888  return static_cast<Opcode>(opcode);
2889  }
2890  opcode = extcode | BitField(4, 2);
2891  switch (opcode) {
2892  PPC_MD_OPCODE_LIST(OPCODE_CASES)
2893  return static_cast<Opcode>(opcode);
2894  }
2895  opcode = extcode | BitField(5, 4);
2896  switch (opcode) {
2897  PPC_XX4_OPCODE_LIST(OPCODE_CASES)
2898  return static_cast<Opcode>(opcode);
2899  }
2900  opcode = extcode | BitField(2, 0);
2901  switch (opcode) {
2902  PPC_DQ_OPCODE_LIST(OPCODE_CASES)
2903  return static_cast<Opcode>(opcode);
2904  }
2905  opcode = extcode | BitField(1, 0);
2906  switch (opcode) {
2907  PPC_DS_OPCODE_LIST(OPCODE_CASES)
2908  return static_cast<Opcode>(opcode);
2909  }
2910  opcode = extcode | BitField(1, 1);
2911  switch (opcode) {
2912  PPC_SC_OPCODE_LIST(OPCODE_CASES)
2913  return static_cast<Opcode>(opcode);
2914  }
2915  UNIMPLEMENTED();
2916  return static_cast<Opcode>(0);
2917  }
2918 
2919 #undef OPCODE_CASES
2920 
2921  // Fields used in Software interrupt instructions
2922  inline SoftwareInterruptCodes SvcValue() const {
2923  return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
2924  }
2925 
2926  // Instructions are read of out a code stream. The only way to get a
2927  // reference to an instruction is to convert a pointer. There is no way
2928  // to allocate or create instances of class Instruction.
2929  // Use the At(pc) function to create references to Instruction.
2930  static Instruction* At(byte* pc) {
2931  return reinterpret_cast<Instruction*>(pc);
2932  }
2933 
2934 
2935  private:
2936  // We need to prevent the creation of instances of class Instruction.
2937  DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
2938 };
2939 
2940 
2941 // Helper functions for converting between register numbers and names.
2942 class Registers {
2943  public:
2944  // Lookup the register number for the name provided.
2945  static int Number(const char* name);
2946 
2947  private:
2948  static const char* names_[kNumRegisters];
2949 };
2950 
2951 // Helper functions for converting between FP register numbers and names.
2953  public:
2954  // Lookup the register number for the name provided.
2955  static int Number(const char* name);
2956 
2957  private:
2958  static const char* names_[kNumDoubleRegisters];
2959 };
2960 } // namespace internal
2961 } // namespace v8
2962 
2963 #endif // V8_PPC_CONSTANTS_PPC_H_
Definition: libplatform.h:13