V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
assembler-mips64.cc
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 #include "src/mips64/assembler-mips64.h"
36 
37 #if V8_TARGET_ARCH_MIPS64
38 
39 #include "src/base/cpu.h"
40 #include "src/code-stubs.h"
41 #include "src/deoptimizer.h"
42 #include "src/mips64/assembler-mips64-inl.h"
43 #include "src/string-constants.h"
44 
45 namespace v8 {
46 namespace internal {
47 
48 
49 // Get the CPU features enabled by the build. For cross compilation the
50 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
51 // can be defined to enable FPU instructions when building the
52 // snapshot.
53 static unsigned CpuFeaturesImpliedByCompiler() {
54  unsigned answer = 0;
55 #ifdef CAN_USE_FPU_INSTRUCTIONS
56  answer |= 1u << FPU;
57 #endif // def CAN_USE_FPU_INSTRUCTIONS
58 
59  // If the compiler is allowed to use FPU then we can use FPU too in our code
60  // generation even when generating snapshots. This won't work for cross
61  // compilation.
62 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
63  answer |= 1u << FPU;
64 #endif
65 
66  return answer;
67 }
68 
69 
70 void CpuFeatures::ProbeImpl(bool cross_compile) {
71  supported_ |= CpuFeaturesImpliedByCompiler();
72 
73  // Only use statically determined features for cross compile (snapshot).
74  if (cross_compile) return;
75 
76  // If the compiler is allowed to use fpu then we can use fpu too in our
77  // code generation.
78 #ifndef __mips__
79  // For the simulator build, use FPU.
80  supported_ |= 1u << FPU;
81 #if defined(_MIPS_ARCH_MIPS64R6) && defined(_MIPS_MSA)
82  supported_ |= 1u << MIPS_SIMD;
83 #endif
84 #else
85  // Probe for additional features at runtime.
86  base::CPU cpu;
87  if (cpu.has_fpu()) supported_ |= 1u << FPU;
88 #if defined(_MIPS_ARCH_MIPS64R6)
89 #if defined(_MIPS_MSA)
90  supported_ |= 1u << MIPS_SIMD;
91 #else
92  if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
93 #endif
94 #endif
95 #endif
96 }
97 
98 
99 void CpuFeatures::PrintTarget() { }
100 void CpuFeatures::PrintFeatures() { }
101 
102 
103 int ToNumber(Register reg) {
104  DCHECK(reg.is_valid());
105  const int kNumbers[] = {
106  0, // zero_reg
107  1, // at
108  2, // v0
109  3, // v1
110  4, // a0
111  5, // a1
112  6, // a2
113  7, // a3
114  8, // a4
115  9, // a5
116  10, // a6
117  11, // a7
118  12, // t0
119  13, // t1
120  14, // t2
121  15, // t3
122  16, // s0
123  17, // s1
124  18, // s2
125  19, // s3
126  20, // s4
127  21, // s5
128  22, // s6
129  23, // s7
130  24, // t8
131  25, // t9
132  26, // k0
133  27, // k1
134  28, // gp
135  29, // sp
136  30, // fp
137  31, // ra
138  };
139  return kNumbers[reg.code()];
140 }
141 
142 
143 Register ToRegister(int num) {
144  DCHECK(num >= 0 && num < kNumRegisters);
145  const Register kRegisters[] = {
146  zero_reg,
147  at,
148  v0, v1,
149  a0, a1, a2, a3, a4, a5, a6, a7,
150  t0, t1, t2, t3,
151  s0, s1, s2, s3, s4, s5, s6, s7,
152  t8, t9,
153  k0, k1,
154  gp,
155  sp,
156  fp,
157  ra
158  };
159  return kRegisters[num];
160 }
161 
162 
163 // -----------------------------------------------------------------------------
164 // Implementation of RelocInfo.
165 
166 const int RelocInfo::kApplyMask =
167  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
168  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
169 
170 bool RelocInfo::IsCodedSpecially() {
171  // The deserializer needs to know whether a pointer is specially coded. Being
172  // specially coded on MIPS means that it is a lui/ori instruction, and that is
173  // always the case inside code objects.
174  return true;
175 }
176 
177 
178 bool RelocInfo::IsInConstantPool() {
179  return false;
180 }
181 
182 int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
183  DCHECK(IsRuntimeEntry(rmode_));
184  return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
185 }
186 
187 uint32_t RelocInfo::wasm_call_tag() const {
188  DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
189  return static_cast<uint32_t>(
190  Assembler::target_address_at(pc_, constant_pool_));
191 }
192 
193 // -----------------------------------------------------------------------------
194 // Implementation of Operand and MemOperand.
195 // See assembler-mips-inl.h for inlined constructors.
196 
197 Operand::Operand(Handle<HeapObject> handle)
198  : rm_(no_reg), rmode_(RelocInfo::EMBEDDED_OBJECT) {
199  value_.immediate = static_cast<intptr_t>(handle.address());
200 }
201 
202 Operand Operand::EmbeddedNumber(double value) {
203  int32_t smi;
204  if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
205  Operand result(0, RelocInfo::EMBEDDED_OBJECT);
206  result.is_heap_object_request_ = true;
207  result.value_.heap_object_request = HeapObjectRequest(value);
208  return result;
209 }
210 
211 Operand Operand::EmbeddedCode(CodeStub* stub) {
212  Operand result(0, RelocInfo::CODE_TARGET);
213  result.is_heap_object_request_ = true;
214  result.value_.heap_object_request = HeapObjectRequest(stub);
215  return result;
216 }
217 
218 Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
219  Operand result(0, RelocInfo::EMBEDDED_OBJECT);
220  result.is_heap_object_request_ = true;
221  result.value_.heap_object_request = HeapObjectRequest(str);
222  return result;
223 }
224 
225 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
226  offset_ = offset;
227 }
228 
229 
230 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
231  OffsetAddend offset_addend)
232  : Operand(rm) {
233  offset_ = unit * multiplier + offset_addend;
234 }
235 
236 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
237  DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
238  for (auto& request : heap_object_requests_) {
239  Handle<HeapObject> object;
240  switch (request.kind()) {
241  case HeapObjectRequest::kHeapNumber:
242  object =
243  isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
244  break;
245  case HeapObjectRequest::kCodeStub:
246  request.code_stub()->set_isolate(isolate);
247  object = request.code_stub()->GetCode();
248  break;
249  case HeapObjectRequest::kStringConstant:
250  const StringConstantBase* str = request.string();
251  CHECK_NOT_NULL(str);
252  object = str->AllocateStringConstant(isolate);
253  break;
254  }
255  Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
256  set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
257  }
258 }
259 
260 // -----------------------------------------------------------------------------
261 // Specific instructions, constants, and masks.
262 
263 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
264 // operations as post-increment of sp.
265 const Instr kPopInstruction = DADDIU | (sp.code() << kRsShift) |
266  (sp.code() << kRtShift) |
267  (kPointerSize & kImm16Mask); // NOLINT
268 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
269 const Instr kPushInstruction = DADDIU | (sp.code() << kRsShift) |
270  (sp.code() << kRtShift) |
271  (-kPointerSize & kImm16Mask); // NOLINT
272 // Sd(r, MemOperand(sp, 0))
273 const Instr kPushRegPattern =
274  SD | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
275 // Ld(r, MemOperand(sp, 0))
276 const Instr kPopRegPattern =
277  LD | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
278 
279 const Instr kLwRegFpOffsetPattern =
280  LW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
281 
282 const Instr kSwRegFpOffsetPattern =
283  SW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
284 
285 const Instr kLwRegFpNegOffsetPattern =
286  LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
287 
288 const Instr kSwRegFpNegOffsetPattern =
289  SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
290 // A mask for the Rt register for push, pop, lw, sw instructions.
291 const Instr kRtMask = kRtFieldMask;
292 const Instr kLwSwInstrTypeMask = 0xFFE00000;
293 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
294 const Instr kLwSwOffsetMask = kImm16Mask;
295 
296 Assembler::Assembler(const AssemblerOptions& options, void* buffer,
297  int buffer_size)
298  : AssemblerBase(options, buffer, buffer_size),
299  scratch_register_list_(at.bit()) {
300  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
301 
302  last_trampoline_pool_end_ = 0;
303  no_trampoline_pool_before_ = 0;
304  trampoline_pool_blocked_nesting_ = 0;
305  // We leave space (16 * kTrampolineSlotsSize)
306  // for BlockTrampolinePoolScope buffer.
307  next_buffer_check_ = FLAG_force_long_branches
308  ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
309  internal_trampoline_exception_ = false;
310  last_bound_pos_ = 0;
311 
312  trampoline_emitted_ = FLAG_force_long_branches;
313  unbound_labels_count_ = 0;
314  block_buffer_growth_ = false;
315 }
316 
317 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
318  EmitForbiddenSlotInstruction();
319  DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
320 
321  AllocateAndInstallRequestedHeapObjects(isolate);
322 
323  // Set up code descriptor.
324  desc->buffer = buffer_;
325  desc->buffer_size = buffer_size_;
326  desc->instr_size = pc_offset();
327  desc->reloc_size =
328  static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
329  desc->origin = this;
330  desc->constant_pool_size = 0;
331  desc->unwinding_info_size = 0;
332  desc->unwinding_info = nullptr;
333 }
334 
335 
336 void Assembler::Align(int m) {
337  DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
338  EmitForbiddenSlotInstruction();
339  while ((pc_offset() & (m - 1)) != 0) {
340  nop();
341  }
342 }
343 
344 
345 void Assembler::CodeTargetAlign() {
346  // No advantage to aligning branch/call targets to more than
347  // single instruction, that I am aware of.
348  Align(4);
349 }
350 
351 
352 Register Assembler::GetRtReg(Instr instr) {
353  return Register::from_code((instr & kRtFieldMask) >> kRtShift);
354 }
355 
356 
357 Register Assembler::GetRsReg(Instr instr) {
358  return Register::from_code((instr & kRsFieldMask) >> kRsShift);
359 }
360 
361 
362 Register Assembler::GetRdReg(Instr instr) {
363  return Register::from_code((instr & kRdFieldMask) >> kRdShift);
364 }
365 
366 
367 uint32_t Assembler::GetRt(Instr instr) {
368  return (instr & kRtFieldMask) >> kRtShift;
369 }
370 
371 
372 uint32_t Assembler::GetRtField(Instr instr) {
373  return instr & kRtFieldMask;
374 }
375 
376 
377 uint32_t Assembler::GetRs(Instr instr) {
378  return (instr & kRsFieldMask) >> kRsShift;
379 }
380 
381 
382 uint32_t Assembler::GetRsField(Instr instr) {
383  return instr & kRsFieldMask;
384 }
385 
386 
387 uint32_t Assembler::GetRd(Instr instr) {
388  return (instr & kRdFieldMask) >> kRdShift;
389 }
390 
391 
392 uint32_t Assembler::GetRdField(Instr instr) {
393  return instr & kRdFieldMask;
394 }
395 
396 
397 uint32_t Assembler::GetSa(Instr instr) {
398  return (instr & kSaFieldMask) >> kSaShift;
399 }
400 
401 
402 uint32_t Assembler::GetSaField(Instr instr) {
403  return instr & kSaFieldMask;
404 }
405 
406 
407 uint32_t Assembler::GetOpcodeField(Instr instr) {
408  return instr & kOpcodeMask;
409 }
410 
411 
412 uint32_t Assembler::GetFunction(Instr instr) {
413  return (instr & kFunctionFieldMask) >> kFunctionShift;
414 }
415 
416 
417 uint32_t Assembler::GetFunctionField(Instr instr) {
418  return instr & kFunctionFieldMask;
419 }
420 
421 
422 uint32_t Assembler::GetImmediate16(Instr instr) {
423  return instr & kImm16Mask;
424 }
425 
426 
427 uint32_t Assembler::GetLabelConst(Instr instr) {
428  return instr & ~kImm16Mask;
429 }
430 
431 
432 bool Assembler::IsPop(Instr instr) {
433  return (instr & ~kRtMask) == kPopRegPattern;
434 }
435 
436 
437 bool Assembler::IsPush(Instr instr) {
438  return (instr & ~kRtMask) == kPushRegPattern;
439 }
440 
441 
442 bool Assembler::IsSwRegFpOffset(Instr instr) {
443  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
444 }
445 
446 
447 bool Assembler::IsLwRegFpOffset(Instr instr) {
448  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
449 }
450 
451 
452 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
453  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
454  kSwRegFpNegOffsetPattern);
455 }
456 
457 
458 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
459  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
460  kLwRegFpNegOffsetPattern);
461 }
462 
463 
464 // Labels refer to positions in the (to be) generated code.
465 // There are bound, linked, and unused labels.
466 //
467 // Bound labels refer to known positions in the already
468 // generated code. pos() is the position the label refers to.
469 //
470 // Linked labels refer to unknown positions in the code
471 // to be generated; pos() is the position of the last
472 // instruction using the label.
473 
474 // The link chain is terminated by a value in the instruction of -1,
475 // which is an otherwise illegal value (branch -1 is inf loop).
476 // The instruction 16-bit offset field addresses 32-bit words, but in
477 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
478 
479 const int kEndOfChain = -4;
480 // Determines the end of the Jump chain (a subset of the label link chain).
481 const int kEndOfJumpChain = 0;
482 
483 bool Assembler::IsMsaBranch(Instr instr) {
484  uint32_t opcode = GetOpcodeField(instr);
485  uint32_t rs_field = GetRsField(instr);
486  if (opcode == COP1) {
487  switch (rs_field) {
488  case BZ_V:
489  case BZ_B:
490  case BZ_H:
491  case BZ_W:
492  case BZ_D:
493  case BNZ_V:
494  case BNZ_B:
495  case BNZ_H:
496  case BNZ_W:
497  case BNZ_D:
498  return true;
499  default:
500  return false;
501  }
502  } else {
503  return false;
504  }
505 }
506 
507 bool Assembler::IsBranch(Instr instr) {
508  uint32_t opcode = GetOpcodeField(instr);
509  uint32_t rt_field = GetRtField(instr);
510  uint32_t rs_field = GetRsField(instr);
511  // Checks if the instruction is a branch.
512  bool isBranch =
513  opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
514  opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
515  (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
516  rt_field == BLTZAL || rt_field == BGEZAL)) ||
517  (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
518  (opcode == COP1 && rs_field == BC1EQZ) ||
519  (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr);
520  if (!isBranch && kArchVariant == kMips64r6) {
521  // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
522  // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
523  isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
524  opcode == BALC ||
525  (opcode == POP66 && rs_field != 0) || // BEQZC
526  (opcode == POP76 && rs_field != 0); // BNEZC
527  }
528  return isBranch;
529 }
530 
531 
532 bool Assembler::IsBc(Instr instr) {
533  uint32_t opcode = GetOpcodeField(instr);
534  // Checks if the instruction is a BC or BALC.
535  return opcode == BC || opcode == BALC;
536 }
537 
538 bool Assembler::IsNal(Instr instr) {
539  uint32_t opcode = GetOpcodeField(instr);
540  uint32_t rt_field = GetRtField(instr);
541  uint32_t rs_field = GetRsField(instr);
542  return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0;
543 }
544 
545 bool Assembler::IsBzc(Instr instr) {
546  uint32_t opcode = GetOpcodeField(instr);
547  // Checks if the instruction is BEQZC or BNEZC.
548  return (opcode == POP66 && GetRsField(instr) != 0) ||
549  (opcode == POP76 && GetRsField(instr) != 0);
550 }
551 
552 
553 bool Assembler::IsEmittedConstant(Instr instr) {
554  uint32_t label_constant = GetLabelConst(instr);
555  return label_constant == 0; // Emitted label const in reg-exp engine.
556 }
557 
558 
559 bool Assembler::IsBeq(Instr instr) {
560  return GetOpcodeField(instr) == BEQ;
561 }
562 
563 
564 bool Assembler::IsBne(Instr instr) {
565  return GetOpcodeField(instr) == BNE;
566 }
567 
568 
569 bool Assembler::IsBeqzc(Instr instr) {
570  uint32_t opcode = GetOpcodeField(instr);
571  return opcode == POP66 && GetRsField(instr) != 0;
572 }
573 
574 
575 bool Assembler::IsBnezc(Instr instr) {
576  uint32_t opcode = GetOpcodeField(instr);
577  return opcode == POP76 && GetRsField(instr) != 0;
578 }
579 
580 
581 bool Assembler::IsBeqc(Instr instr) {
582  uint32_t opcode = GetOpcodeField(instr);
583  uint32_t rs = GetRsField(instr);
584  uint32_t rt = GetRtField(instr);
585  return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
586 }
587 
588 
589 bool Assembler::IsBnec(Instr instr) {
590  uint32_t opcode = GetOpcodeField(instr);
591  uint32_t rs = GetRsField(instr);
592  uint32_t rt = GetRtField(instr);
593  return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
594 }
595 
596 bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
597  uint32_t opcode = GetOpcodeField(instr);
598  uint32_t rd_field = GetRd(instr);
599  uint32_t rs_field = GetRs(instr);
600  uint32_t rt_field = GetRt(instr);
601  uint32_t rd_reg = static_cast<uint32_t>(rd.code());
602  uint32_t rs_reg = static_cast<uint32_t>(rs.code());
603  uint32_t function_field = GetFunctionField(instr);
604  // Checks if the instruction is a OR with zero_reg argument (aka MOV).
605  bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg &&
606  rs_field == rs_reg && rt_field == 0;
607  return res;
608 }
609 
610 bool Assembler::IsJump(Instr instr) {
611  uint32_t opcode = GetOpcodeField(instr);
612  uint32_t rt_field = GetRtField(instr);
613  uint32_t rd_field = GetRdField(instr);
614  uint32_t function_field = GetFunctionField(instr);
615  // Checks if the instruction is a jump.
616  return opcode == J || opcode == JAL ||
617  (opcode == SPECIAL && rt_field == 0 &&
618  ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
619 }
620 
621 
622 bool Assembler::IsJ(Instr instr) {
623  uint32_t opcode = GetOpcodeField(instr);
624  // Checks if the instruction is a jump.
625  return opcode == J;
626 }
627 
628 
629 bool Assembler::IsJal(Instr instr) {
630  return GetOpcodeField(instr) == JAL;
631 }
632 
633 
634 bool Assembler::IsJr(Instr instr) {
635  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
636 }
637 
638 
639 bool Assembler::IsJalr(Instr instr) {
640  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
641 }
642 
643 
644 bool Assembler::IsLui(Instr instr) {
645  uint32_t opcode = GetOpcodeField(instr);
646  // Checks if the instruction is a load upper immediate.
647  return opcode == LUI;
648 }
649 
650 
651 bool Assembler::IsOri(Instr instr) {
652  uint32_t opcode = GetOpcodeField(instr);
653  // Checks if the instruction is a load upper immediate.
654  return opcode == ORI;
655 }
656 
657 
658 bool Assembler::IsNop(Instr instr, unsigned int type) {
659  // See Assembler::nop(type).
660  DCHECK_LT(type, 32);
661  uint32_t opcode = GetOpcodeField(instr);
662  uint32_t function = GetFunctionField(instr);
663  uint32_t rt = GetRt(instr);
664  uint32_t rd = GetRd(instr);
665  uint32_t sa = GetSa(instr);
666 
667  // Traditional mips nop == sll(zero_reg, zero_reg, 0)
668  // When marking non-zero type, use sll(zero_reg, at, type)
669  // to avoid use of mips ssnop and ehb special encodings
670  // of the sll instruction.
671 
672  Register nop_rt_reg = (type == 0) ? zero_reg : at;
673  bool ret = (opcode == SPECIAL && function == SLL &&
674  rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
675  rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
676  sa == type);
677 
678  return ret;
679 }
680 
681 
682 int32_t Assembler::GetBranchOffset(Instr instr) {
683  DCHECK(IsBranch(instr));
684  return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
685 }
686 
687 
688 bool Assembler::IsLw(Instr instr) {
689  return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
690 }
691 
692 
693 int16_t Assembler::GetLwOffset(Instr instr) {
694  DCHECK(IsLw(instr));
695  return ((instr & kImm16Mask));
696 }
697 
698 
699 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
700  DCHECK(IsLw(instr));
701 
702  // We actually create a new lw instruction based on the original one.
703  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
704  | (offset & kImm16Mask);
705 
706  return temp_instr;
707 }
708 
709 
710 bool Assembler::IsSw(Instr instr) {
711  return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
712 }
713 
714 
715 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
716  DCHECK(IsSw(instr));
717  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
718 }
719 
720 
721 bool Assembler::IsAddImmediate(Instr instr) {
722  return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
723 }
724 
725 
726 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
727  DCHECK(IsAddImmediate(instr));
728  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
729 }
730 
731 
732 bool Assembler::IsAndImmediate(Instr instr) {
733  return GetOpcodeField(instr) == ANDI;
734 }
735 
736 
737 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
738  if (kArchVariant == kMips64r6) {
739  if (Assembler::IsBc(instr)) {
740  return Assembler::OffsetSize::kOffset26;
741  } else if (Assembler::IsBzc(instr)) {
742  return Assembler::OffsetSize::kOffset21;
743  }
744  }
745  return Assembler::OffsetSize::kOffset16;
746 }
747 
748 
749 static inline int32_t AddBranchOffset(int pos, Instr instr) {
750  int bits = OffsetSizeInBits(instr);
751  const int32_t mask = (1 << bits) - 1;
752  bits = 32 - bits;
753 
754  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
755  // the compiler uses arithmetic shifts for signed integers.
756  int32_t imm = ((instr & mask) << bits) >> (bits - 2);
757 
758  if (imm == kEndOfChain) {
759  // EndOfChain sentinel is returned directly, not relative to pc or pos.
760  return kEndOfChain;
761  } else {
762  return pos + Assembler::kBranchPCOffset + imm;
763  }
764 }
765 
766 
767 int Assembler::target_at(int pos, bool is_internal) {
768  if (is_internal) {
769  int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
770  int64_t address = *p;
771  if (address == kEndOfJumpChain) {
772  return kEndOfChain;
773  } else {
774  int64_t instr_address = reinterpret_cast<int64_t>(p);
775  DCHECK(instr_address - address < INT_MAX);
776  int delta = static_cast<int>(instr_address - address);
777  DCHECK(pos > delta);
778  return pos - delta;
779  }
780  }
781  Instr instr = instr_at(pos);
782  if ((instr & ~kImm16Mask) == 0) {
783  // Emitted label constant, not part of a branch.
784  if (instr == 0) {
785  return kEndOfChain;
786  } else {
787  int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
788  return (imm18 + pos);
789  }
790  }
791  // Check we have a branch or jump instruction.
792  DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr) ||
793  IsMov(instr, t8, ra));
794  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
795  // the compiler uses arithmetic shifts for signed integers.
796  if (IsBranch(instr)) {
797  return AddBranchOffset(pos, instr);
798  } else if (IsMov(instr, t8, ra)) {
799  int32_t imm32;
800  Instr instr_lui = instr_at(pos + 2 * kInstrSize);
801  Instr instr_ori = instr_at(pos + 3 * kInstrSize);
802  DCHECK(IsLui(instr_lui));
803  DCHECK(IsOri(instr_ori));
804  imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
805  imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
806  if (imm32 == kEndOfJumpChain) {
807  // EndOfChain sentinel is returned directly, not relative to pc or pos.
808  return kEndOfChain;
809  }
810  return pos + Assembler::kLongBranchPCOffset + imm32;
811  } else if (IsLui(instr)) {
812  if (IsNal(instr_at(pos + kInstrSize))) {
813  int32_t imm32;
814  Instr instr_lui = instr_at(pos + 0 * kInstrSize);
815  Instr instr_ori = instr_at(pos + 2 * kInstrSize);
816  DCHECK(IsLui(instr_lui));
817  DCHECK(IsOri(instr_ori));
818  imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
819  imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
820  if (imm32 == kEndOfJumpChain) {
821  // EndOfChain sentinel is returned directly, not relative to pc or pos.
822  return kEndOfChain;
823  }
824  return pos + Assembler::kLongBranchPCOffset + imm32;
825  } else {
826  Instr instr_lui = instr_at(pos + 0 * kInstrSize);
827  Instr instr_ori = instr_at(pos + 1 * kInstrSize);
828  Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
829  DCHECK(IsOri(instr_ori));
830  DCHECK(IsOri(instr_ori2));
831 
832  // TODO(plind) create named constants for shift values.
833  int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
834  imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
835  imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
836  // Sign extend address;
837  imm >>= 16;
838 
839  if (imm == kEndOfJumpChain) {
840  // EndOfChain sentinel is returned directly, not relative to pc or pos.
841  return kEndOfChain;
842  } else {
843  uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
844  DCHECK(instr_address - imm < INT_MAX);
845  int delta = static_cast<int>(instr_address - imm);
846  DCHECK(pos > delta);
847  return pos - delta;
848  }
849  }
850  } else {
851  DCHECK(IsJ(instr) || IsJal(instr));
852  int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
853  if (imm28 == kEndOfJumpChain) {
854  // EndOfChain sentinel is returned directly, not relative to pc or pos.
855  return kEndOfChain;
856  } else {
857  // Sign extend 28-bit offset.
858  int32_t delta = static_cast<int32_t>((imm28 << 4) >> 4);
859  return pos + delta;
860  }
861  }
862 }
863 
864 
865 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
866  Instr instr) {
867  int32_t bits = OffsetSizeInBits(instr);
868  int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
869  DCHECK_EQ(imm & 3, 0);
870  imm >>= 2;
871 
872  const int32_t mask = (1 << bits) - 1;
873  instr &= ~mask;
874  DCHECK(is_intn(imm, bits));
875 
876  return instr | (imm & mask);
877 }
878 
879 
880 void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
881  if (is_internal) {
882  uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
883  *reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
884  return;
885  }
886  Instr instr = instr_at(pos);
887  if ((instr & ~kImm16Mask) == 0) {
888  DCHECK(target_pos == kEndOfChain || target_pos >= 0);
889  // Emitted label constant, not part of a branch.
890  // Make label relative to Code pointer of generated Code object.
891  instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
892  return;
893  }
894 
895  if (IsBranch(instr)) {
896  instr = SetBranchOffset(pos, target_pos, instr);
897  instr_at_put(pos, instr);
898  } else if (IsLui(instr)) {
899  if (IsNal(instr_at(pos + kInstrSize))) {
900  Instr instr_lui = instr_at(pos + 0 * kInstrSize);
901  Instr instr_ori = instr_at(pos + 2 * kInstrSize);
902  DCHECK(IsLui(instr_lui));
903  DCHECK(IsOri(instr_ori));
904  int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
905  DCHECK_EQ(imm & 3, 0);
906  if (is_int16(imm + Assembler::kLongBranchPCOffset -
907  Assembler::kBranchPCOffset)) {
908  // Optimize by converting to regular branch and link with 16-bit
909  // offset.
910  Instr instr_b = REGIMM | BGEZAL; // Branch and link.
911  instr_b = SetBranchOffset(pos, target_pos, instr_b);
912  // Correct ra register to point to one instruction after jalr from
913  // TurboAssembler::BranchAndLinkLong.
914  Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
915  kOptimizedBranchAndLinkLongReturnOffset;
916 
917  instr_at_put(pos, instr_b);
918  instr_at_put(pos + 1 * kInstrSize, instr_a);
919  } else {
920  instr_lui &= ~kImm16Mask;
921  instr_ori &= ~kImm16Mask;
922 
923  instr_at_put(pos + 0 * kInstrSize,
924  instr_lui | ((imm >> kLuiShift) & kImm16Mask));
925  instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask));
926  }
927  } else {
928  Instr instr_lui = instr_at(pos + 0 * kInstrSize);
929  Instr instr_ori = instr_at(pos + 1 * kInstrSize);
930  Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
931  DCHECK(IsOri(instr_ori));
932  DCHECK(IsOri(instr_ori2));
933 
934  uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
935  DCHECK_EQ(imm & 3, 0);
936 
937  instr_lui &= ~kImm16Mask;
938  instr_ori &= ~kImm16Mask;
939  instr_ori2 &= ~kImm16Mask;
940 
941  instr_at_put(pos + 0 * kInstrSize,
942  instr_lui | ((imm >> 32) & kImm16Mask));
943  instr_at_put(pos + 1 * kInstrSize,
944  instr_ori | ((imm >> 16) & kImm16Mask));
945  instr_at_put(pos + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask));
946  }
947  } else if (IsMov(instr, t8, ra)) {
948  Instr instr_lui = instr_at(pos + 2 * kInstrSize);
949  Instr instr_ori = instr_at(pos + 3 * kInstrSize);
950  DCHECK(IsLui(instr_lui));
951  DCHECK(IsOri(instr_ori));
952 
953  int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset);
954 
955  if (is_int16(imm_short)) {
956  // Optimize by converting to regular branch with 16-bit
957  // offset
958  Instr instr_b = BEQ;
959  instr_b = SetBranchOffset(pos, target_pos, instr_b);
960 
961  Instr instr_j = instr_at(pos + 5 * kInstrSize);
962  Instr instr_branch_delay;
963 
964  if (IsJump(instr_j)) {
965  instr_branch_delay = instr_at(pos + 6 * kInstrSize);
966  } else {
967  instr_branch_delay = instr_at(pos + 7 * kInstrSize);
968  }
969  instr_at_put(pos, instr_b);
970  instr_at_put(pos + 1 * kInstrSize, instr_branch_delay);
971  } else {
972  int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
973  DCHECK_EQ(imm & 3, 0);
974 
975  instr_lui &= ~kImm16Mask;
976  instr_ori &= ~kImm16Mask;
977 
978  instr_at_put(pos + 2 * kInstrSize,
979  instr_lui | ((imm >> kLuiShift) & kImm16Mask));
980  instr_at_put(pos + 3 * kInstrSize, instr_ori | (imm & kImm16Mask));
981  }
982  } else if (IsJ(instr) || IsJal(instr)) {
983  int32_t imm28 = target_pos - pos;
984  DCHECK_EQ(imm28 & 3, 0);
985 
986  uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
987  DCHECK(is_uint26(imm26));
988  // Place 26-bit signed offset with markings.
989  // When code is committed it will be resolved to j/jal.
990  int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark;
991  instr_at_put(pos, mark | (imm26 & kImm26Mask));
992  } else {
993  int32_t imm28 = target_pos - pos;
994  DCHECK_EQ(imm28 & 3, 0);
995 
996  uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
997  DCHECK(is_uint26(imm26));
998  // Place raw 26-bit signed offset.
999  // When code is committed it will be resolved to j/jal.
1000  instr &= ~kImm26Mask;
1001  instr_at_put(pos, instr | (imm26 & kImm26Mask));
1002  }
1003 }
1004 
1005 void Assembler::print(const Label* L) {
1006  if (L->is_unused()) {
1007  PrintF("unused label\n");
1008  } else if (L->is_bound()) {
1009  PrintF("bound label to %d\n", L->pos());
1010  } else if (L->is_linked()) {
1011  Label l;
1012  l.link_to(L->pos());
1013  PrintF("unbound label");
1014  while (l.is_linked()) {
1015  PrintF("@ %d ", l.pos());
1016  Instr instr = instr_at(l.pos());
1017  if ((instr & ~kImm16Mask) == 0) {
1018  PrintF("value\n");
1019  } else {
1020  PrintF("%d\n", instr);
1021  }
1022  next(&l, is_internal_reference(&l));
1023  }
1024  } else {
1025  PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
1026  }
1027 }
1028 
1029 
1030 void Assembler::bind_to(Label* L, int pos) {
1031  DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
1032  int trampoline_pos = kInvalidSlotPos;
1033  bool is_internal = false;
1034  if (L->is_linked() && !trampoline_emitted_) {
1035  unbound_labels_count_--;
1036  if (!is_internal_reference(L)) {
1037  next_buffer_check_ += kTrampolineSlotsSize;
1038  }
1039  }
1040 
1041  while (L->is_linked()) {
1042  int fixup_pos = L->pos();
1043  int dist = pos - fixup_pos;
1044  is_internal = is_internal_reference(L);
1045  next(L, is_internal); // Call next before overwriting link with target at
1046  // fixup_pos.
1047  Instr instr = instr_at(fixup_pos);
1048  if (is_internal) {
1049  target_at_put(fixup_pos, pos, is_internal);
1050  } else {
1051  if (IsBranch(instr)) {
1052  int branch_offset = BranchOffset(instr);
1053  if (dist > branch_offset) {
1054  if (trampoline_pos == kInvalidSlotPos) {
1055  trampoline_pos = get_trampoline_entry(fixup_pos);
1056  CHECK_NE(trampoline_pos, kInvalidSlotPos);
1057  }
1058  CHECK((trampoline_pos - fixup_pos) <= branch_offset);
1059  target_at_put(fixup_pos, trampoline_pos, false);
1060  fixup_pos = trampoline_pos;
1061  }
1062  target_at_put(fixup_pos, pos, false);
1063  } else {
1064  DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
1065  IsEmittedConstant(instr) || IsMov(instr, t8, ra));
1066  target_at_put(fixup_pos, pos, false);
1067  }
1068  }
1069  }
1070  L->bind_to(pos);
1071 
1072  // Keep track of the last bound label so we don't eliminate any instructions
1073  // before a bound label.
1074  if (pos > last_bound_pos_)
1075  last_bound_pos_ = pos;
1076 }
1077 
1078 
1079 void Assembler::bind(Label* L) {
1080  DCHECK(!L->is_bound()); // Label can only be bound once.
1081  bind_to(L, pc_offset());
1082 }
1083 
1084 
1085 void Assembler::next(Label* L, bool is_internal) {
1086  DCHECK(L->is_linked());
1087  int link = target_at(L->pos(), is_internal);
1088  if (link == kEndOfChain) {
1089  L->Unuse();
1090  } else {
1091  DCHECK_GE(link, 0);
1092  L->link_to(link);
1093  }
1094 }
1095 
1096 
1097 bool Assembler::is_near(Label* L) {
1098  DCHECK(L->is_bound());
1099  return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
1100 }
1101 
1102 
1103 bool Assembler::is_near(Label* L, OffsetSize bits) {
1104  if (L == nullptr || !L->is_bound()) return true;
1105  return ((pc_offset() - L->pos()) <
1106  (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
1107 }
1108 
1109 
1110 bool Assembler::is_near_branch(Label* L) {
1111  DCHECK(L->is_bound());
1112  return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
1113 }
1114 
1115 
1116 int Assembler::BranchOffset(Instr instr) {
1117  // At pre-R6 and for other R6 branches the offset is 16 bits.
1118  int bits = OffsetSize::kOffset16;
1119 
1120  if (kArchVariant == kMips64r6) {
1121  uint32_t opcode = GetOpcodeField(instr);
1122  switch (opcode) {
1123  // Checks BC or BALC.
1124  case BC:
1125  case BALC:
1126  bits = OffsetSize::kOffset26;
1127  break;
1128 
1129  // Checks BEQZC or BNEZC.
1130  case POP66:
1131  case POP76:
1132  if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
1133  break;
1134  default:
1135  break;
1136  }
1137  }
1138 
1139  return (1 << (bits + 2 - 1)) - 1;
1140 }
1141 
1142 
1143 // We have to use a temporary register for things that can be relocated even
1144 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
1145 // space. There is no guarantee that the relocated location can be similarly
1146 // encoded.
1147 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1148  return !RelocInfo::IsNone(rmode);
1149 }
1150 
1151 void Assembler::GenInstrRegister(Opcode opcode,
1152  Register rs,
1153  Register rt,
1154  Register rd,
1155  uint16_t sa,
1156  SecondaryField func) {
1157  DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1158  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1159  | (rd.code() << kRdShift) | (sa << kSaShift) | func;
1160  emit(instr);
1161 }
1162 
1163 
1164 void Assembler::GenInstrRegister(Opcode opcode,
1165  Register rs,
1166  Register rt,
1167  uint16_t msb,
1168  uint16_t lsb,
1169  SecondaryField func) {
1170  DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1171  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1172  | (msb << kRdShift) | (lsb << kSaShift) | func;
1173  emit(instr);
1174 }
1175 
1176 
1177 void Assembler::GenInstrRegister(Opcode opcode,
1178  SecondaryField fmt,
1179  FPURegister ft,
1180  FPURegister fs,
1181  FPURegister fd,
1182  SecondaryField func) {
1183  DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1184  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
1185  | (fd.code() << kFdShift) | func;
1186  emit(instr);
1187 }
1188 
1189 
1190 void Assembler::GenInstrRegister(Opcode opcode,
1191  FPURegister fr,
1192  FPURegister ft,
1193  FPURegister fs,
1194  FPURegister fd,
1195  SecondaryField func) {
1196  DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1197  Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
1198  | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1199  emit(instr);
1200 }
1201 
1202 
1203 void Assembler::GenInstrRegister(Opcode opcode,
1204  SecondaryField fmt,
1205  Register rt,
1206  FPURegister fs,
1207  FPURegister fd,
1208  SecondaryField func) {
1209  DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1210  Instr instr = opcode | fmt | (rt.code() << kRtShift)
1211  | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1212  emit(instr);
1213 }
1214 
1215 
1216 void Assembler::GenInstrRegister(Opcode opcode,
1217  SecondaryField fmt,
1218  Register rt,
1219  FPUControlRegister fs,
1220  SecondaryField func) {
1221  DCHECK(fs.is_valid() && rt.is_valid());
1222  Instr instr =
1223  opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1224  emit(instr);
1225 }
1226 
1227 
1228 // Instructions with immediate value.
1229 // Registers are in the order of the instruction encoding, from left to right.
1230 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1231  int32_t j,
1232  CompactBranchType is_compact_branch) {
1233  DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1234  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1235  | (j & kImm16Mask);
1236  emit(instr, is_compact_branch);
1237 }
1238 
1239 void Assembler::GenInstrImmediate(Opcode opcode, Register base, Register rt,
1240  int32_t offset9, int bit6,
1241  SecondaryField func) {
1242  DCHECK(base.is_valid() && rt.is_valid() && is_int9(offset9) &&
1243  is_uint1(bit6));
1244  Instr instr = opcode | (base.code() << kBaseShift) | (rt.code() << kRtShift) |
1245  ((offset9 << kImm9Shift) & kImm9Mask) | bit6 << kBit6Shift |
1246  func;
1247  emit(instr);
1248 }
1249 
1250 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1251  int32_t j,
1252  CompactBranchType is_compact_branch) {
1253  DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1254  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1255  emit(instr, is_compact_branch);
1256 }
1257 
1258 
1259 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1260  int32_t j,
1261  CompactBranchType is_compact_branch) {
1262  DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1263  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1264  | (j & kImm16Mask);
1265  emit(instr, is_compact_branch);
1266 }
1267 
1268 
1269 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1270  CompactBranchType is_compact_branch) {
1271  DCHECK(rs.is_valid() && (is_int21(offset21)));
1272  Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1273  emit(instr, is_compact_branch);
1274 }
1275 
1276 
1277 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1278  uint32_t offset21) {
1279  DCHECK(rs.is_valid() && (is_uint21(offset21)));
1280  Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1281  emit(instr);
1282 }
1283 
1284 
1285 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1286  CompactBranchType is_compact_branch) {
1287  DCHECK(is_int26(offset26));
1288  Instr instr = opcode | (offset26 & kImm26Mask);
1289  emit(instr, is_compact_branch);
1290 }
1291 
1292 
1293 void Assembler::GenInstrJump(Opcode opcode,
1294  uint32_t address) {
1295  BlockTrampolinePoolScope block_trampoline_pool(this);
1296  DCHECK(is_uint26(address));
1297  Instr instr = opcode | address;
1298  emit(instr);
1299  BlockTrampolinePoolFor(1); // For associated delay slot.
1300 }
1301 
1302 // MSA instructions
1303 void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8,
1304  MSARegister ws, MSARegister wd) {
1305  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1306  DCHECK(ws.is_valid() && wd.is_valid() && is_uint8(imm8));
1307  Instr instr = MSA | operation | ((imm8 & kImm8Mask) << kWtShift) |
1308  (ws.code() << kWsShift) | (wd.code() << kWdShift);
1309  emit(instr);
1310 }
1311 
1312 void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df,
1313  int32_t imm5, MSARegister ws, MSARegister wd) {
1314  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1315  DCHECK(ws.is_valid() && wd.is_valid());
1316  DCHECK((operation == MAXI_S) || (operation == MINI_S) ||
1317  (operation == CEQI) || (operation == CLTI_S) ||
1318  (operation == CLEI_S)
1319  ? is_int5(imm5)
1320  : is_uint5(imm5));
1321  Instr instr = MSA | operation | df | ((imm5 & kImm5Mask) << kWtShift) |
1322  (ws.code() << kWsShift) | (wd.code() << kWdShift);
1323  emit(instr);
1324 }
1325 
1326 void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df,
1327  uint32_t m, MSARegister ws, MSARegister wd) {
1328  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1329  DCHECK(ws.is_valid() && wd.is_valid() && is_valid_msa_df_m(df, m));
1330  Instr instr = MSA | operation | df | (m << kWtShift) |
1331  (ws.code() << kWsShift) | (wd.code() << kWdShift);
1332  emit(instr);
1333 }
1334 
1335 void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df,
1336  int32_t imm10, MSARegister wd) {
1337  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1338  DCHECK(wd.is_valid() && is_int10(imm10));
1339  Instr instr = MSA | operation | df | ((imm10 & kImm10Mask) << kWsShift) |
1340  (wd.code() << kWdShift);
1341  emit(instr);
1342 }
1343 
1344 template <typename RegType>
1345 void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df,
1346  RegType t, MSARegister ws, MSARegister wd) {
1347  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1348  DCHECK(t.is_valid() && ws.is_valid() && wd.is_valid());
1349  Instr instr = MSA | operation | df | (t.code() << kWtShift) |
1350  (ws.code() << kWsShift) | (wd.code() << kWdShift);
1351  emit(instr);
1352 }
1353 
1354 template <typename DstType, typename SrcType>
1355 void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df,
1356  uint32_t n, SrcType src, DstType dst) {
1357  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1358  DCHECK(src.is_valid() && dst.is_valid() && is_valid_msa_df_n(df, n));
1359  Instr instr = MSA | operation | df | (n << kWtShift) |
1360  (src.code() << kWsShift) | (dst.code() << kWdShift) |
1361  MSA_ELM_MINOR;
1362  emit(instr);
1363 }
1364 
1365 void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
1366  MSARegister wt, MSARegister ws, MSARegister wd) {
1367  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1368  DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1369  DCHECK_LT(df, 2);
1370  Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) |
1371  (ws.code() << kWsShift) | (wd.code() << kWdShift);
1372  emit(instr);
1373 }
1374 
1375 void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt,
1376  MSARegister ws, MSARegister wd) {
1377  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1378  DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1379  Instr instr = MSA | operation | (wt.code() << kWtShift) |
1380  (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1381  MSA_VEC_2R_2RF_MINOR;
1382  emit(instr);
1383 }
1384 
1385 void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10,
1386  Register rs, MSARegister wd) {
1387  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1388  DCHECK(rs.is_valid() && wd.is_valid() && is_int10(s10));
1389  Instr instr = MSA | operation | ((s10 & kImm10Mask) << kWtShift) |
1390  (rs.code() << kWsShift) | (wd.code() << kWdShift);
1391  emit(instr);
1392 }
1393 
1394 void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df,
1395  MSARegister ws, MSARegister wd) {
1396  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1397  DCHECK(ws.is_valid() && wd.is_valid());
1398  Instr instr = MSA | MSA_2R_FORMAT | operation | df | (ws.code() << kWsShift) |
1399  (wd.code() << kWdShift) | MSA_VEC_2R_2RF_MINOR;
1400  emit(instr);
1401 }
1402 
1403 void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df,
1404  MSARegister ws, MSARegister wd) {
1405  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1406  DCHECK(ws.is_valid() && wd.is_valid());
1407  Instr instr = MSA | MSA_2RF_FORMAT | operation | df |
1408  (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1409  MSA_VEC_2R_2RF_MINOR;
1410  emit(instr);
1411 }
1412 
1413 void Assembler::GenInstrMsaBranch(SecondaryField operation, MSARegister wt,
1414  int32_t offset16) {
1415  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
1416  DCHECK(wt.is_valid() && is_int16(offset16));
1417  BlockTrampolinePoolScope block_trampoline_pool(this);
1418  Instr instr =
1419  COP1 | operation | (wt.code() << kWtShift) | (offset16 & kImm16Mask);
1420  emit(instr);
1421  BlockTrampolinePoolFor(1); // For associated delay slot.
1422 }
1423 
1424 // Returns the next free trampoline entry.
1425 int32_t Assembler::get_trampoline_entry(int32_t pos) {
1426  int32_t trampoline_entry = kInvalidSlotPos;
1427  if (!internal_trampoline_exception_) {
1428  if (trampoline_.start() > pos) {
1429  trampoline_entry = trampoline_.take_slot();
1430  }
1431 
1432  if (kInvalidSlotPos == trampoline_entry) {
1433  internal_trampoline_exception_ = true;
1434  }
1435  }
1436  return trampoline_entry;
1437 }
1438 
1439 
1440 uint64_t Assembler::jump_address(Label* L) {
1441  int64_t target_pos;
1442  if (L->is_bound()) {
1443  target_pos = L->pos();
1444  } else {
1445  if (L->is_linked()) {
1446  target_pos = L->pos(); // L's link.
1447  L->link_to(pc_offset());
1448  } else {
1449  L->link_to(pc_offset());
1450  return kEndOfJumpChain;
1451  }
1452  }
1453  uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
1454  DCHECK_EQ(imm & 3, 0);
1455 
1456  return imm;
1457 }
1458 
1459 
1460 uint64_t Assembler::jump_offset(Label* L) {
1461  int64_t target_pos;
1462  int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1463 
1464  if (L->is_bound()) {
1465  target_pos = L->pos();
1466  } else {
1467  if (L->is_linked()) {
1468  target_pos = L->pos(); // L's link.
1469  L->link_to(pc_offset() + pad);
1470  } else {
1471  L->link_to(pc_offset() + pad);
1472  return kEndOfJumpChain;
1473  }
1474  }
1475  int64_t imm = target_pos - (pc_offset() + pad);
1476  DCHECK_EQ(imm & 3, 0);
1477 
1478  return static_cast<uint64_t>(imm);
1479 }
1480 
1481 uint64_t Assembler::branch_long_offset(Label* L) {
1482  int64_t target_pos;
1483 
1484  if (L->is_bound()) {
1485  target_pos = L->pos();
1486  } else {
1487  if (L->is_linked()) {
1488  target_pos = L->pos(); // L's link.
1489  L->link_to(pc_offset());
1490  } else {
1491  L->link_to(pc_offset());
1492  return kEndOfJumpChain;
1493  }
1494  }
1495  int64_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
1496  DCHECK_EQ(offset & 3, 0);
1497 
1498  return static_cast<uint64_t>(offset);
1499 }
1500 
1501 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1502  int32_t target_pos;
1503  int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1504 
1505  if (L->is_bound()) {
1506  target_pos = L->pos();
1507  } else {
1508  if (L->is_linked()) {
1509  target_pos = L->pos();
1510  L->link_to(pc_offset() + pad);
1511  } else {
1512  L->link_to(pc_offset() + pad);
1513  if (!trampoline_emitted_) {
1514  unbound_labels_count_++;
1515  next_buffer_check_ -= kTrampolineSlotsSize;
1516  }
1517  return kEndOfChain;
1518  }
1519  }
1520 
1521  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1522  DCHECK(is_intn(offset, bits + 2));
1523  DCHECK_EQ(offset & 3, 0);
1524 
1525  return offset;
1526 }
1527 
1528 
1529 void Assembler::label_at_put(Label* L, int at_offset) {
1530  int target_pos;
1531  if (L->is_bound()) {
1532  target_pos = L->pos();
1533  instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1534  } else {
1535  if (L->is_linked()) {
1536  target_pos = L->pos(); // L's link.
1537  int32_t imm18 = target_pos - at_offset;
1538  DCHECK_EQ(imm18 & 3, 0);
1539  int32_t imm16 = imm18 >> 2;
1540  DCHECK(is_int16(imm16));
1541  instr_at_put(at_offset, (imm16 & kImm16Mask));
1542  } else {
1543  target_pos = kEndOfChain;
1544  instr_at_put(at_offset, 0);
1545  if (!trampoline_emitted_) {
1546  unbound_labels_count_++;
1547  next_buffer_check_ -= kTrampolineSlotsSize;
1548  }
1549  }
1550  L->link_to(at_offset);
1551  }
1552 }
1553 
1554 
1555 //------- Branch and jump instructions --------
1556 
1557 void Assembler::b(int16_t offset) {
1558  beq(zero_reg, zero_reg, offset);
1559 }
1560 
1561 
1562 void Assembler::bal(int16_t offset) {
1563  bgezal(zero_reg, offset);
1564 }
1565 
1566 
1567 void Assembler::bc(int32_t offset) {
1568  DCHECK_EQ(kArchVariant, kMips64r6);
1569  GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1570 }
1571 
1572 
1573 void Assembler::balc(int32_t offset) {
1574  DCHECK_EQ(kArchVariant, kMips64r6);
1575  GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1576 }
1577 
1578 
1579 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1580  BlockTrampolinePoolScope block_trampoline_pool(this);
1581  GenInstrImmediate(BEQ, rs, rt, offset);
1582  BlockTrampolinePoolFor(1); // For associated delay slot.
1583 }
1584 
1585 
1586 void Assembler::bgez(Register rs, int16_t offset) {
1587  BlockTrampolinePoolScope block_trampoline_pool(this);
1588  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1589  BlockTrampolinePoolFor(1); // For associated delay slot.
1590 }
1591 
1592 
1593 void Assembler::bgezc(Register rt, int16_t offset) {
1594  DCHECK_EQ(kArchVariant, kMips64r6);
1595  DCHECK(rt != zero_reg);
1596  GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1597 }
1598 
1599 
1600 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1601  DCHECK_EQ(kArchVariant, kMips64r6);
1602  DCHECK(rs != zero_reg);
1603  DCHECK(rt != zero_reg);
1604  DCHECK(rs.code() != rt.code());
1605  GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1606 }
1607 
1608 
1609 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1610  DCHECK_EQ(kArchVariant, kMips64r6);
1611  DCHECK(rs != zero_reg);
1612  DCHECK(rt != zero_reg);
1613  DCHECK(rs.code() != rt.code());
1614  GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1615 }
1616 
1617 
1618 void Assembler::bgezal(Register rs, int16_t offset) {
1619  DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
1620  DCHECK(rs != ra);
1621  BlockTrampolinePoolScope block_trampoline_pool(this);
1622  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1623  BlockTrampolinePoolFor(1); // For associated delay slot.
1624 }
1625 
1626 
1627 void Assembler::bgtz(Register rs, int16_t offset) {
1628  BlockTrampolinePoolScope block_trampoline_pool(this);
1629  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1630  BlockTrampolinePoolFor(1); // For associated delay slot.
1631 }
1632 
1633 
1634 void Assembler::bgtzc(Register rt, int16_t offset) {
1635  DCHECK_EQ(kArchVariant, kMips64r6);
1636  DCHECK(rt != zero_reg);
1637  GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1638  CompactBranchType::COMPACT_BRANCH);
1639 }
1640 
1641 
1642 void Assembler::blez(Register rs, int16_t offset) {
1643  BlockTrampolinePoolScope block_trampoline_pool(this);
1644  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1645  BlockTrampolinePoolFor(1); // For associated delay slot.
1646 }
1647 
1648 
1649 void Assembler::blezc(Register rt, int16_t offset) {
1650  DCHECK_EQ(kArchVariant, kMips64r6);
1651  DCHECK(rt != zero_reg);
1652  GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1653  CompactBranchType::COMPACT_BRANCH);
1654 }
1655 
1656 
1657 void Assembler::bltzc(Register rt, int16_t offset) {
1658  DCHECK_EQ(kArchVariant, kMips64r6);
1659  DCHECK(rt != zero_reg);
1660  GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1661 }
1662 
1663 
1664 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1665  DCHECK_EQ(kArchVariant, kMips64r6);
1666  DCHECK(rs != zero_reg);
1667  DCHECK(rt != zero_reg);
1668  DCHECK(rs.code() != rt.code());
1669  GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1670 }
1671 
1672 
1673 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1674  DCHECK_EQ(kArchVariant, kMips64r6);
1675  DCHECK(rs != zero_reg);
1676  DCHECK(rt != zero_reg);
1677  DCHECK(rs.code() != rt.code());
1678  GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1679 }
1680 
1681 
1682 void Assembler::bltz(Register rs, int16_t offset) {
1683  BlockTrampolinePoolScope block_trampoline_pool(this);
1684  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1685  BlockTrampolinePoolFor(1); // For associated delay slot.
1686 }
1687 
1688 
1689 void Assembler::bltzal(Register rs, int16_t offset) {
1690  DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
1691  DCHECK(rs != ra);
1692  BlockTrampolinePoolScope block_trampoline_pool(this);
1693  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1694  BlockTrampolinePoolFor(1); // For associated delay slot.
1695 }
1696 
1697 
1698 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1699  BlockTrampolinePoolScope block_trampoline_pool(this);
1700  GenInstrImmediate(BNE, rs, rt, offset);
1701  BlockTrampolinePoolFor(1); // For associated delay slot.
1702 }
1703 
1704 
1705 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1706  DCHECK_EQ(kArchVariant, kMips64r6);
1707  if (rs.code() >= rt.code()) {
1708  GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1709  } else {
1710  GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1711  }
1712 }
1713 
1714 
1715 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1716  DCHECK_EQ(kArchVariant, kMips64r6);
1717  if (rs.code() >= rt.code()) {
1718  GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1719  } else {
1720  GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1721  }
1722 }
1723 
1724 
1725 void Assembler::blezalc(Register rt, int16_t offset) {
1726  DCHECK_EQ(kArchVariant, kMips64r6);
1727  DCHECK(rt != zero_reg);
1728  DCHECK(rt != ra);
1729  GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1730  CompactBranchType::COMPACT_BRANCH);
1731 }
1732 
1733 
1734 void Assembler::bgezalc(Register rt, int16_t offset) {
1735  DCHECK_EQ(kArchVariant, kMips64r6);
1736  DCHECK(rt != zero_reg);
1737  DCHECK(rt != ra);
1738  GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1739 }
1740 
1741 
1742 void Assembler::bgezall(Register rs, int16_t offset) {
1743  DCHECK_NE(kArchVariant, kMips64r6);
1744  DCHECK(rs != zero_reg);
1745  DCHECK(rs != ra);
1746  BlockTrampolinePoolScope block_trampoline_pool(this);
1747  GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1748  BlockTrampolinePoolFor(1); // For associated delay slot.
1749 }
1750 
1751 
1752 void Assembler::bltzalc(Register rt, int16_t offset) {
1753  DCHECK_EQ(kArchVariant, kMips64r6);
1754  DCHECK(rt != zero_reg);
1755  DCHECK(rt != ra);
1756  GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1757 }
1758 
1759 
1760 void Assembler::bgtzalc(Register rt, int16_t offset) {
1761  DCHECK_EQ(kArchVariant, kMips64r6);
1762  DCHECK(rt != zero_reg);
1763  DCHECK(rt != ra);
1764  GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1765  CompactBranchType::COMPACT_BRANCH);
1766 }
1767 
1768 
1769 void Assembler::beqzalc(Register rt, int16_t offset) {
1770  DCHECK_EQ(kArchVariant, kMips64r6);
1771  DCHECK(rt != zero_reg);
1772  DCHECK(rt != ra);
1773  GenInstrImmediate(ADDI, zero_reg, rt, offset,
1774  CompactBranchType::COMPACT_BRANCH);
1775 }
1776 
1777 
1778 void Assembler::bnezalc(Register rt, int16_t offset) {
1779  DCHECK_EQ(kArchVariant, kMips64r6);
1780  DCHECK(rt != zero_reg);
1781  DCHECK(rt != ra);
1782  GenInstrImmediate(DADDI, zero_reg, rt, offset,
1783  CompactBranchType::COMPACT_BRANCH);
1784 }
1785 
1786 
1787 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1788  DCHECK_EQ(kArchVariant, kMips64r6);
1789  DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1790  if (rs.code() < rt.code()) {
1791  GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1792  } else {
1793  GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1794  }
1795 }
1796 
1797 
1798 void Assembler::beqzc(Register rs, int32_t offset) {
1799  DCHECK_EQ(kArchVariant, kMips64r6);
1800  DCHECK(rs != zero_reg);
1801  GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1802 }
1803 
1804 
1805 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1806  DCHECK_EQ(kArchVariant, kMips64r6);
1807  DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1808  if (rs.code() < rt.code()) {
1809  GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1810  } else {
1811  GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1812  }
1813 }
1814 
1815 
1816 void Assembler::bnezc(Register rs, int32_t offset) {
1817  DCHECK_EQ(kArchVariant, kMips64r6);
1818  DCHECK(rs != zero_reg);
1819  GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1820 }
1821 
1822 
1823 void Assembler::j(int64_t target) {
1824  // Deprecated. Use PC-relative jumps instead.
1825  UNREACHABLE();
1826 }
1827 
1828 
1829 void Assembler::j(Label* target) {
1830  // Deprecated. Use PC-relative jumps instead.
1831  UNREACHABLE();
1832 }
1833 
1834 
1835 void Assembler::jal(Label* target) {
1836  // Deprecated. Use PC-relative jumps instead.
1837  UNREACHABLE();
1838 }
1839 
1840 void Assembler::jal(int64_t target) {
1841  // Deprecated. Use PC-relative jumps instead.
1842  UNREACHABLE();
1843 }
1844 
1845 
1846 void Assembler::jr(Register rs) {
1847  if (kArchVariant != kMips64r6) {
1848  BlockTrampolinePoolScope block_trampoline_pool(this);
1849  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1850  BlockTrampolinePoolFor(1); // For associated delay slot.
1851  } else {
1852  jalr(rs, zero_reg);
1853  }
1854 }
1855 
1856 
1857 void Assembler::jalr(Register rs, Register rd) {
1858  DCHECK(rs.code() != rd.code());
1859  BlockTrampolinePoolScope block_trampoline_pool(this);
1860  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1861  BlockTrampolinePoolFor(1); // For associated delay slot.
1862 }
1863 
1864 
1865 void Assembler::jic(Register rt, int16_t offset) {
1866  DCHECK_EQ(kArchVariant, kMips64r6);
1867  GenInstrImmediate(POP66, zero_reg, rt, offset);
1868 }
1869 
1870 
1871 void Assembler::jialc(Register rt, int16_t offset) {
1872  DCHECK_EQ(kArchVariant, kMips64r6);
1873  GenInstrImmediate(POP76, zero_reg, rt, offset);
1874 }
1875 
1876 
1877 // -------Data-processing-instructions---------
1878 
1879 // Arithmetic.
1880 
1881 void Assembler::addu(Register rd, Register rs, Register rt) {
1882  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1883 }
1884 
1885 
1886 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1887  GenInstrImmediate(ADDIU, rs, rd, j);
1888 }
1889 
1890 
1891 void Assembler::subu(Register rd, Register rs, Register rt) {
1892  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1893 }
1894 
1895 
1896 void Assembler::mul(Register rd, Register rs, Register rt) {
1897  if (kArchVariant == kMips64r6) {
1898  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1899  } else {
1900  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1901  }
1902 }
1903 
1904 
1905 void Assembler::muh(Register rd, Register rs, Register rt) {
1906  DCHECK_EQ(kArchVariant, kMips64r6);
1907  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1908 }
1909 
1910 
1911 void Assembler::mulu(Register rd, Register rs, Register rt) {
1912  DCHECK_EQ(kArchVariant, kMips64r6);
1913  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1914 }
1915 
1916 
1917 void Assembler::muhu(Register rd, Register rs, Register rt) {
1918  DCHECK_EQ(kArchVariant, kMips64r6);
1919  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1920 }
1921 
1922 
1923 void Assembler::dmul(Register rd, Register rs, Register rt) {
1924  DCHECK_EQ(kArchVariant, kMips64r6);
1925  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
1926 }
1927 
1928 
1929 void Assembler::dmuh(Register rd, Register rs, Register rt) {
1930  DCHECK_EQ(kArchVariant, kMips64r6);
1931  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
1932 }
1933 
1934 
1935 void Assembler::dmulu(Register rd, Register rs, Register rt) {
1936  DCHECK_EQ(kArchVariant, kMips64r6);
1937  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
1938 }
1939 
1940 
1941 void Assembler::dmuhu(Register rd, Register rs, Register rt) {
1942  DCHECK_EQ(kArchVariant, kMips64r6);
1943  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
1944 }
1945 
1946 
1947 void Assembler::mult(Register rs, Register rt) {
1948  DCHECK_NE(kArchVariant, kMips64r6);
1949  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1950 }
1951 
1952 
1953 void Assembler::multu(Register rs, Register rt) {
1954  DCHECK_NE(kArchVariant, kMips64r6);
1955  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1956 }
1957 
1958 
1959 void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1960  GenInstrImmediate(DADDIU, rs, rd, j);
1961 }
1962 
1963 
1964 void Assembler::div(Register rs, Register rt) {
1965  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1966 }
1967 
1968 
1969 void Assembler::div(Register rd, Register rs, Register rt) {
1970  DCHECK_EQ(kArchVariant, kMips64r6);
1971  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1972 }
1973 
1974 
1975 void Assembler::mod(Register rd, Register rs, Register rt) {
1976  DCHECK_EQ(kArchVariant, kMips64r6);
1977  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1978 }
1979 
1980 
1981 void Assembler::divu(Register rs, Register rt) {
1982  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1983 }
1984 
1985 
1986 void Assembler::divu(Register rd, Register rs, Register rt) {
1987  DCHECK_EQ(kArchVariant, kMips64r6);
1988  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1989 }
1990 
1991 
1992 void Assembler::modu(Register rd, Register rs, Register rt) {
1993  DCHECK_EQ(kArchVariant, kMips64r6);
1994  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1995 }
1996 
1997 
1998 void Assembler::daddu(Register rd, Register rs, Register rt) {
1999  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
2000 }
2001 
2002 
2003 void Assembler::dsubu(Register rd, Register rs, Register rt) {
2004  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
2005 }
2006 
2007 
2008 void Assembler::dmult(Register rs, Register rt) {
2009  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
2010 }
2011 
2012 
2013 void Assembler::dmultu(Register rs, Register rt) {
2014  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
2015 }
2016 
2017 
2018 void Assembler::ddiv(Register rs, Register rt) {
2019  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
2020 }
2021 
2022 
2023 void Assembler::ddiv(Register rd, Register rs, Register rt) {
2024  DCHECK_EQ(kArchVariant, kMips64r6);
2025  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
2026 }
2027 
2028 
2029 void Assembler::dmod(Register rd, Register rs, Register rt) {
2030  DCHECK_EQ(kArchVariant, kMips64r6);
2031  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
2032 }
2033 
2034 
2035 void Assembler::ddivu(Register rs, Register rt) {
2036  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
2037 }
2038 
2039 
2040 void Assembler::ddivu(Register rd, Register rs, Register rt) {
2041  DCHECK_EQ(kArchVariant, kMips64r6);
2042  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
2043 }
2044 
2045 
2046 void Assembler::dmodu(Register rd, Register rs, Register rt) {
2047  DCHECK_EQ(kArchVariant, kMips64r6);
2048  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
2049 }
2050 
2051 
2052 // Logical.
2053 
2054 void Assembler::and_(Register rd, Register rs, Register rt) {
2055  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
2056 }
2057 
2058 
2059 void Assembler::andi(Register rt, Register rs, int32_t j) {
2060  DCHECK(is_uint16(j));
2061  GenInstrImmediate(ANDI, rs, rt, j);
2062 }
2063 
2064 
2065 void Assembler::or_(Register rd, Register rs, Register rt) {
2066  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
2067 }
2068 
2069 
2070 void Assembler::ori(Register rt, Register rs, int32_t j) {
2071  DCHECK(is_uint16(j));
2072  GenInstrImmediate(ORI, rs, rt, j);
2073 }
2074 
2075 
2076 void Assembler::xor_(Register rd, Register rs, Register rt) {
2077  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
2078 }
2079 
2080 
2081 void Assembler::xori(Register rt, Register rs, int32_t j) {
2082  DCHECK(is_uint16(j));
2083  GenInstrImmediate(XORI, rs, rt, j);
2084 }
2085 
2086 
2087 void Assembler::nor(Register rd, Register rs, Register rt) {
2088  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
2089 }
2090 
2091 
2092 // Shifts.
2093 void Assembler::sll(Register rd,
2094  Register rt,
2095  uint16_t sa,
2096  bool coming_from_nop) {
2097  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
2098  // generated using the sll instruction. They must be generated using
2099  // nop(int/NopMarkerTypes).
2100  DCHECK(coming_from_nop || (rd != zero_reg && rt != zero_reg));
2101  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
2102 }
2103 
2104 
2105 void Assembler::sllv(Register rd, Register rt, Register rs) {
2106  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
2107 }
2108 
2109 
2110 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
2111  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
2112 }
2113 
2114 
2115 void Assembler::srlv(Register rd, Register rt, Register rs) {
2116  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
2117 }
2118 
2119 
2120 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
2121  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
2122 }
2123 
2124 
2125 void Assembler::srav(Register rd, Register rt, Register rs) {
2126  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
2127 }
2128 
2129 
2130 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
2131  // Should be called via MacroAssembler::Ror.
2132  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
2133  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2134  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
2135  | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
2136  emit(instr);
2137 }
2138 
2139 
2140 void Assembler::rotrv(Register rd, Register rt, Register rs) {
2141  // Should be called via MacroAssembler::Ror.
2142  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2143  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2144  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
2145  | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
2146  emit(instr);
2147 }
2148 
2149 
2150 void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
2151  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
2152 }
2153 
2154 
2155 void Assembler::dsllv(Register rd, Register rt, Register rs) {
2156  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
2157 }
2158 
2159 
2160 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
2161  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
2162 }
2163 
2164 
2165 void Assembler::dsrlv(Register rd, Register rt, Register rs) {
2166  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
2167 }
2168 
2169 
2170 void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
2171  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
2172  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
2173  | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
2174  emit(instr);
2175 }
2176 
2177 void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
2178  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
2179  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
2180  (rd.code() << kRdShift) | (sa << kSaShift) | DSRL32;
2181  emit(instr);
2182 }
2183 
2184 void Assembler::drotrv(Register rd, Register rt, Register rs) {
2185  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
2186  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
2187  | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
2188  emit(instr);
2189 }
2190 
2191 
2192 void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
2193  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
2194 }
2195 
2196 
2197 void Assembler::dsrav(Register rd, Register rt, Register rs) {
2198  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
2199 }
2200 
2201 
2202 void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
2203  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
2204 }
2205 
2206 
2207 void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
2208  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
2209 }
2210 
2211 
2212 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
2213  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
2214 }
2215 
2216 
2217 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
2218  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2219  DCHECK_LE(sa, 3);
2220  DCHECK_EQ(kArchVariant, kMips64r6);
2221  Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
2222  rd.code() << kRdShift | sa << kSaShift | LSA;
2223  emit(instr);
2224 }
2225 
2226 
2227 void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
2228  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2229  DCHECK_LE(sa, 3);
2230  DCHECK_EQ(kArchVariant, kMips64r6);
2231  Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
2232  rd.code() << kRdShift | sa << kSaShift | DLSA;
2233  emit(instr);
2234 }
2235 
2236 
2237 // ------------Memory-instructions-------------
2238 
2239 void Assembler::AdjustBaseAndOffset(MemOperand& src,
2240  OffsetAccessType access_type,
2241  int second_access_add_to_offset) {
2242  // This method is used to adjust the base register and offset pair
2243  // for a load/store when the offset doesn't fit into int16_t.
2244  // It is assumed that 'base + offset' is sufficiently aligned for memory
2245  // operands that are machine word in size or smaller. For doubleword-sized
2246  // operands it's assumed that 'base' is a multiple of 8, while 'offset'
2247  // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
2248  // and spilled variables on the stack accessed relative to the stack
2249  // pointer register).
2250  // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
2251 
2252  bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
2253  bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
2254  DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
2255 
2256  // is_int16 must be passed a signed value, hence the static cast below.
2257  if (is_int16(src.offset()) &&
2258  (!two_accesses || is_int16(static_cast<int32_t>(
2259  src.offset() + second_access_add_to_offset)))) {
2260  // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
2261  // value) fits into int16_t.
2262  return;
2263  }
2264 
2265  DCHECK(src.rm() !=
2266  at); // Must not overwrite the register 'base' while loading 'offset'.
2267 
2268 #ifdef DEBUG
2269  // Remember the "(mis)alignment" of 'offset', it will be checked at the end.
2270  uint32_t misalignment = src.offset() & (kDoubleSize - 1);
2271 #endif
2272 
2273  // Do not load the whole 32-bit 'offset' if it can be represented as
2274  // a sum of two 16-bit signed offsets. This can save an instruction or two.
2275  // To simplify matters, only do this for a symmetric range of offsets from
2276  // about -64KB to about +64KB, allowing further addition of 4 when accessing
2277  // 64-bit variables with two 32-bit accesses.
2278  constexpr int32_t kMinOffsetForSimpleAdjustment =
2279  0x7FF8; // Max int16_t that's a multiple of 8.
2280  constexpr int32_t kMaxOffsetForSimpleAdjustment =
2281  2 * kMinOffsetForSimpleAdjustment;
2282 
2283  UseScratchRegisterScope temps(this);
2284  Register scratch = temps.Acquire();
2285  if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
2286  daddiu(scratch, src.rm(), kMinOffsetForSimpleAdjustment);
2287  src.offset_ -= kMinOffsetForSimpleAdjustment;
2288  } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
2289  src.offset() < 0) {
2290  daddiu(scratch, src.rm(), -kMinOffsetForSimpleAdjustment);
2291  src.offset_ += kMinOffsetForSimpleAdjustment;
2292  } else if (kArchVariant == kMips64r6) {
2293  // On r6 take advantage of the daui instruction, e.g.:
2294  // daui at, base, offset_high
2295  // [dahi at, 1] // When `offset` is close to +2GB.
2296  // lw reg_lo, offset_low(at)
2297  // [lw reg_hi, (offset_low+4)(at)] // If misaligned 64-bit load.
2298  // or when offset_low+4 overflows int16_t:
2299  // daui at, base, offset_high
2300  // daddiu at, at, 8
2301  // lw reg_lo, (offset_low-8)(at)
2302  // lw reg_hi, (offset_low-4)(at)
2303  int16_t offset_low = static_cast<uint16_t>(src.offset());
2304  int32_t offset_low32 = offset_low;
2305  int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
2306  bool increment_hi16 = offset_low < 0;
2307  bool overflow_hi16 = false;
2308 
2309  if (increment_hi16) {
2310  offset_high++;
2311  overflow_hi16 = (offset_high == -32768);
2312  }
2313  daui(scratch, src.rm(), static_cast<uint16_t>(offset_high));
2314 
2315  if (overflow_hi16) {
2316  dahi(scratch, 1);
2317  }
2318 
2319  if (two_accesses && !is_int16(static_cast<int32_t>(
2320  offset_low32 + second_access_add_to_offset))) {
2321  // Avoid overflow in the 16-bit offset of the load/store instruction when
2322  // adding 4.
2323  daddiu(scratch, scratch, kDoubleSize);
2324  offset_low32 -= kDoubleSize;
2325  }
2326 
2327  src.offset_ = offset_low32;
2328  } else {
2329  // Do not load the whole 32-bit 'offset' if it can be represented as
2330  // a sum of three 16-bit signed offsets. This can save an instruction.
2331  // To simplify matters, only do this for a symmetric range of offsets from
2332  // about -96KB to about +96KB, allowing further addition of 4 when accessing
2333  // 64-bit variables with two 32-bit accesses.
2334  constexpr int32_t kMinOffsetForMediumAdjustment =
2335  2 * kMinOffsetForSimpleAdjustment;
2336  constexpr int32_t kMaxOffsetForMediumAdjustment =
2337  3 * kMinOffsetForSimpleAdjustment;
2338  if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
2339  daddiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2);
2340  daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
2341  src.offset_ -= kMinOffsetForMediumAdjustment;
2342  } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
2343  src.offset() < 0) {
2344  daddiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2);
2345  daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
2346  src.offset_ += kMinOffsetForMediumAdjustment;
2347  } else {
2348  // Now that all shorter options have been exhausted, load the full 32-bit
2349  // offset.
2350  int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
2351  lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
2352  ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset.
2353  daddu(scratch, scratch, src.rm());
2354  src.offset_ -= loaded_offset;
2355  }
2356  }
2357  src.rm_ = scratch;
2358 
2359  DCHECK(is_int16(src.offset()));
2360  if (two_accesses) {
2361  DCHECK(is_int16(
2362  static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
2363  }
2364  DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
2365 }
2366 
2367 void Assembler::lb(Register rd, const MemOperand& rs) {
2368  GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
2369 }
2370 
2371 
2372 void Assembler::lbu(Register rd, const MemOperand& rs) {
2373  GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
2374 }
2375 
2376 
2377 void Assembler::lh(Register rd, const MemOperand& rs) {
2378  GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
2379 }
2380 
2381 
2382 void Assembler::lhu(Register rd, const MemOperand& rs) {
2383  GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
2384 }
2385 
2386 
2387 void Assembler::lw(Register rd, const MemOperand& rs) {
2388  GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
2389 }
2390 
2391 
2392 void Assembler::lwu(Register rd, const MemOperand& rs) {
2393  GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
2394 }
2395 
2396 
2397 void Assembler::lwl(Register rd, const MemOperand& rs) {
2398  DCHECK(is_int16(rs.offset_));
2399  DCHECK_EQ(kArchVariant, kMips64r2);
2400  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
2401 }
2402 
2403 
2404 void Assembler::lwr(Register rd, const MemOperand& rs) {
2405  DCHECK(is_int16(rs.offset_));
2406  DCHECK_EQ(kArchVariant, kMips64r2);
2407  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2408 }
2409 
2410 
2411 void Assembler::sb(Register rd, const MemOperand& rs) {
2412  GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
2413 }
2414 
2415 
2416 void Assembler::sh(Register rd, const MemOperand& rs) {
2417  GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
2418 }
2419 
2420 
2421 void Assembler::sw(Register rd, const MemOperand& rs) {
2422  GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
2423 }
2424 
2425 
2426 void Assembler::swl(Register rd, const MemOperand& rs) {
2427  DCHECK(is_int16(rs.offset_));
2428  DCHECK_EQ(kArchVariant, kMips64r2);
2429  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2430 }
2431 
2432 
2433 void Assembler::swr(Register rd, const MemOperand& rs) {
2434  DCHECK(is_int16(rs.offset_));
2435  DCHECK_EQ(kArchVariant, kMips64r2);
2436  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2437 }
2438 
2439 void Assembler::ll(Register rd, const MemOperand& rs) {
2440  if (kArchVariant == kMips64r6) {
2441  DCHECK(is_int9(rs.offset_));
2442  GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LL_R6);
2443  } else {
2444  DCHECK_EQ(kArchVariant, kMips64r2);
2445  DCHECK(is_int16(rs.offset_));
2446  GenInstrImmediate(LL, rs.rm(), rd, rs.offset_);
2447  }
2448 }
2449 
2450 void Assembler::lld(Register rd, const MemOperand& rs) {
2451  if (kArchVariant == kMips64r6) {
2452  DCHECK(is_int9(rs.offset_));
2453  GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LLD_R6);
2454  } else {
2455  DCHECK_EQ(kArchVariant, kMips64r2);
2456  DCHECK(is_int16(rs.offset_));
2457  GenInstrImmediate(LLD, rs.rm(), rd, rs.offset_);
2458  }
2459 }
2460 
2461 void Assembler::sc(Register rd, const MemOperand& rs) {
2462  if (kArchVariant == kMips64r6) {
2463  DCHECK(is_int9(rs.offset_));
2464  GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SC_R6);
2465  } else {
2466  DCHECK_EQ(kArchVariant, kMips64r2);
2467  GenInstrImmediate(SC, rs.rm(), rd, rs.offset_);
2468  }
2469 }
2470 
2471 void Assembler::scd(Register rd, const MemOperand& rs) {
2472  if (kArchVariant == kMips64r6) {
2473  DCHECK(is_int9(rs.offset_));
2474  GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SCD_R6);
2475  } else {
2476  DCHECK_EQ(kArchVariant, kMips64r2);
2477  GenInstrImmediate(SCD, rs.rm(), rd, rs.offset_);
2478  }
2479 }
2480 
2481 void Assembler::lui(Register rd, int32_t j) {
2482  DCHECK(is_uint16(j) || is_int16(j));
2483  GenInstrImmediate(LUI, zero_reg, rd, j);
2484 }
2485 
2486 
2487 void Assembler::aui(Register rt, Register rs, int32_t j) {
2488  // This instruction uses same opcode as 'lui'. The difference in encoding is
2489  // 'lui' has zero reg. for rs field.
2490  DCHECK(is_uint16(j));
2491  GenInstrImmediate(LUI, rs, rt, j);
2492 }
2493 
2494 
2495 void Assembler::daui(Register rt, Register rs, int32_t j) {
2496  DCHECK(is_uint16(j));
2497  DCHECK(rs != zero_reg);
2498  GenInstrImmediate(DAUI, rs, rt, j);
2499 }
2500 
2501 
2502 void Assembler::dahi(Register rs, int32_t j) {
2503  DCHECK(is_uint16(j));
2504  GenInstrImmediate(REGIMM, rs, DAHI, j);
2505 }
2506 
2507 
2508 void Assembler::dati(Register rs, int32_t j) {
2509  DCHECK(is_uint16(j));
2510  GenInstrImmediate(REGIMM, rs, DATI, j);
2511 }
2512 
2513 
2514 void Assembler::ldl(Register rd, const MemOperand& rs) {
2515  DCHECK(is_int16(rs.offset_));
2516  DCHECK_EQ(kArchVariant, kMips64r2);
2517  GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
2518 }
2519 
2520 
2521 void Assembler::ldr(Register rd, const MemOperand& rs) {
2522  DCHECK(is_int16(rs.offset_));
2523  DCHECK_EQ(kArchVariant, kMips64r2);
2524  GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
2525 }
2526 
2527 
2528 void Assembler::sdl(Register rd, const MemOperand& rs) {
2529  DCHECK(is_int16(rs.offset_));
2530  DCHECK_EQ(kArchVariant, kMips64r2);
2531  GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
2532 }
2533 
2534 
2535 void Assembler::sdr(Register rd, const MemOperand& rs) {
2536  DCHECK(is_int16(rs.offset_));
2537  DCHECK_EQ(kArchVariant, kMips64r2);
2538  GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
2539 }
2540 
2541 
2542 void Assembler::ld(Register rd, const MemOperand& rs) {
2543  GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
2544 }
2545 
2546 
2547 void Assembler::sd(Register rd, const MemOperand& rs) {
2548  GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
2549 }
2550 
2551 
2552 // ---------PC-Relative instructions-----------
2553 
2554 void Assembler::addiupc(Register rs, int32_t imm19) {
2555  DCHECK_EQ(kArchVariant, kMips64r6);
2556  DCHECK(rs.is_valid() && is_int19(imm19));
2557  uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2558  GenInstrImmediate(PCREL, rs, imm21);
2559 }
2560 
2561 
2562 void Assembler::lwpc(Register rs, int32_t offset19) {
2563  DCHECK_EQ(kArchVariant, kMips64r6);
2564  DCHECK(rs.is_valid() && is_int19(offset19));
2565  uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2566  GenInstrImmediate(PCREL, rs, imm21);
2567 }
2568 
2569 
2570 void Assembler::lwupc(Register rs, int32_t offset19) {
2571  DCHECK_EQ(kArchVariant, kMips64r6);
2572  DCHECK(rs.is_valid() && is_int19(offset19));
2573  uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
2574  GenInstrImmediate(PCREL, rs, imm21);
2575 }
2576 
2577 
2578 void Assembler::ldpc(Register rs, int32_t offset18) {
2579  DCHECK_EQ(kArchVariant, kMips64r6);
2580  DCHECK(rs.is_valid() && is_int18(offset18));
2581  uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
2582  GenInstrImmediate(PCREL, rs, imm21);
2583 }
2584 
2585 
2586 void Assembler::auipc(Register rs, int16_t imm16) {
2587  DCHECK_EQ(kArchVariant, kMips64r6);
2588  DCHECK(rs.is_valid());
2589  uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2590  GenInstrImmediate(PCREL, rs, imm21);
2591 }
2592 
2593 
2594 void Assembler::aluipc(Register rs, int16_t imm16) {
2595  DCHECK_EQ(kArchVariant, kMips64r6);
2596  DCHECK(rs.is_valid());
2597  uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2598  GenInstrImmediate(PCREL, rs, imm21);
2599 }
2600 
2601 
2602 // -------------Misc-instructions--------------
2603 
2604 // Break / Trap instructions.
2605 void Assembler::break_(uint32_t code, bool break_as_stop) {
2606  DCHECK_EQ(code & ~0xFFFFF, 0);
2607  // We need to invalidate breaks that could be stops as well because the
2608  // simulator expects a char pointer after the stop instruction.
2609  // See constants-mips.h for explanation.
2610  DCHECK((break_as_stop &&
2611  code <= kMaxStopCode &&
2612  code > kMaxWatchpointCode) ||
2613  (!break_as_stop &&
2614  (code > kMaxStopCode ||
2615  code <= kMaxWatchpointCode)));
2616  Instr break_instr = SPECIAL | BREAK | (code << 6);
2617  emit(break_instr);
2618 }
2619 
2620 
2621 void Assembler::stop(const char* msg, uint32_t code) {
2622  DCHECK_GT(code, kMaxWatchpointCode);
2623  DCHECK_LE(code, kMaxStopCode);
2624 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
2625  break_(0x54321);
2626 #else // V8_HOST_ARCH_MIPS
2627  break_(code, true);
2628 #endif
2629 }
2630 
2631 
2632 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2633  DCHECK(is_uint10(code));
2634  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2635  | rt.code() << kRtShift | code << 6;
2636  emit(instr);
2637 }
2638 
2639 
2640 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2641  DCHECK(is_uint10(code));
2642  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2643  | rt.code() << kRtShift | code << 6;
2644  emit(instr);
2645 }
2646 
2647 
2648 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2649  DCHECK(is_uint10(code));
2650  Instr instr =
2651  SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2652  emit(instr);
2653 }
2654 
2655 
2656 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2657  DCHECK(is_uint10(code));
2658  Instr instr =
2659  SPECIAL | TLTU | rs.code() << kRsShift
2660  | rt.code() << kRtShift | code << 6;
2661  emit(instr);
2662 }
2663 
2664 
2665 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2666  DCHECK(is_uint10(code));
2667  Instr instr =
2668  SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2669  emit(instr);
2670 }
2671 
2672 
2673 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2674  DCHECK(is_uint10(code));
2675  Instr instr =
2676  SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2677  emit(instr);
2678 }
2679 
2680 void Assembler::sync() {
2681  Instr sync_instr = SPECIAL | SYNC;
2682  emit(sync_instr);
2683 }
2684 
2685 // Move from HI/LO register.
2686 
2687 void Assembler::mfhi(Register rd) {
2688  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2689 }
2690 
2691 
2692 void Assembler::mflo(Register rd) {
2693  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2694 }
2695 
2696 
2697 // Set on less than instructions.
2698 void Assembler::slt(Register rd, Register rs, Register rt) {
2699  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2700 }
2701 
2702 
2703 void Assembler::sltu(Register rd, Register rs, Register rt) {
2704  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2705 }
2706 
2707 
2708 void Assembler::slti(Register rt, Register rs, int32_t j) {
2709  GenInstrImmediate(SLTI, rs, rt, j);
2710 }
2711 
2712 
2713 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2714  GenInstrImmediate(SLTIU, rs, rt, j);
2715 }
2716 
2717 
2718 // Conditional move.
2719 void Assembler::movz(Register rd, Register rs, Register rt) {
2720  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2721 }
2722 
2723 
2724 void Assembler::movn(Register rd, Register rs, Register rt) {
2725  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2726 }
2727 
2728 
2729 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2730  Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
2731  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2732 }
2733 
2734 
2735 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2736  Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
2737  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2738 }
2739 
2740 
2741 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2742  min(S, fd, fs, ft);
2743 }
2744 
2745 
2746 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2747  min(D, fd, fs, ft);
2748 }
2749 
2750 
2751 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2752  max(S, fd, fs, ft);
2753 }
2754 
2755 
2756 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2757  max(D, fd, fs, ft);
2758 }
2759 
2760 
2761 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2762  mina(S, fd, fs, ft);
2763 }
2764 
2765 
2766 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2767  mina(D, fd, fs, ft);
2768 }
2769 
2770 
2771 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2772  maxa(S, fd, fs, ft);
2773 }
2774 
2775 
2776 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2777  maxa(D, fd, fs, ft);
2778 }
2779 
2780 
2781 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2782  FPURegister ft) {
2783  DCHECK_EQ(kArchVariant, kMips64r6);
2784  DCHECK((fmt == D) || (fmt == S));
2785  GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2786 }
2787 
2788 
2789 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2790  FPURegister ft) {
2791  DCHECK_EQ(kArchVariant, kMips64r6);
2792  DCHECK((fmt == D) || (fmt == S));
2793  GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2794 }
2795 
2796 
2797 // GPR.
2798 void Assembler::seleqz(Register rd, Register rs, Register rt) {
2799  DCHECK_EQ(kArchVariant, kMips64r6);
2800  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2801 }
2802 
2803 
2804 // GPR.
2805 void Assembler::selnez(Register rd, Register rs, Register rt) {
2806  DCHECK_EQ(kArchVariant, kMips64r6);
2807  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2808 }
2809 
2810 
2811 // Bit twiddling.
2812 void Assembler::clz(Register rd, Register rs) {
2813  if (kArchVariant != kMips64r6) {
2814  // clz instr requires same GPR number in 'rd' and 'rt' fields.
2815  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2816  } else {
2817  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2818  }
2819 }
2820 
2821 
2822 void Assembler::dclz(Register rd, Register rs) {
2823  if (kArchVariant != kMips64r6) {
2824  // dclz instr requires same GPR number in 'rd' and 'rt' fields.
2825  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
2826  } else {
2827  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
2828  }
2829 }
2830 
2831 
2832 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2833  // Should be called via MacroAssembler::Ins.
2834  // ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2835  DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
2836  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2837 }
2838 
2839 
2840 void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2841  // Should be called via MacroAssembler::Dins.
2842  // dins instr has 'rt' field as dest, and two uint5: msb, lsb.
2843  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2844  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
2845 }
2846 
2847 void Assembler::dinsm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2848  // Should be called via MacroAssembler::Dins.
2849  // dinsm instr has 'rt' field as dest, and two uint5: msbminus32, lsb.
2850  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2851  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos, DINSM);
2852 }
2853 
2854 void Assembler::dinsu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2855  // Should be called via MacroAssembler::Dins.
2856  // dinsu instr has 'rt' field as dest, and two uint5: msbminus32, lsbminus32.
2857  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2858  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos - 32, DINSU);
2859 }
2860 
2861 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2862  // Should be called via MacroAssembler::Ext.
2863  // ext instr has 'rt' field as dest, and two uint5: msbd, lsb.
2864  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2865  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2866 }
2867 
2868 
2869 void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2870  // Should be called via MacroAssembler::Dext.
2871  // dext instr has 'rt' field as dest, and two uint5: msbd, lsb.
2872  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2873  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
2874 }
2875 
2876 void Assembler::dextm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2877  // Should be called via MacroAssembler::Dextm.
2878  // dextm instr has 'rt' field as dest, and two uint5: msbdminus32, lsb.
2879  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2880  GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
2881 }
2882 
2883 void Assembler::dextu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2884  // Should be called via MacroAssembler::Dextu.
2885  // dextu instr has 'rt' field as dest, and two uint5: msbd, lsbminus32.
2886  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2887  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
2888 }
2889 
2890 
2891 void Assembler::bitswap(Register rd, Register rt) {
2892  DCHECK_EQ(kArchVariant, kMips64r6);
2893  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2894 }
2895 
2896 
2897 void Assembler::dbitswap(Register rd, Register rt) {
2898  DCHECK_EQ(kArchVariant, kMips64r6);
2899  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
2900 }
2901 
2902 
2903 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2904  DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2905  Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2906  | (rs.offset_);
2907  emit(instr);
2908 }
2909 
2910 
2911 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2912  DCHECK_EQ(kArchVariant, kMips64r6);
2913  DCHECK(is_uint3(bp));
2914  uint16_t sa = (ALIGN << kBp2Bits) | bp;
2915  GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2916 }
2917 
2918 
2919 void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
2920  DCHECK_EQ(kArchVariant, kMips64r6);
2921  DCHECK(is_uint3(bp));
2922  uint16_t sa = (DALIGN << kBp3Bits) | bp;
2923  GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
2924 }
2925 
2926 void Assembler::wsbh(Register rd, Register rt) {
2927  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2928  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2929 }
2930 
2931 void Assembler::dsbh(Register rd, Register rt) {
2932  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2933  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSBH, DBSHFL);
2934 }
2935 
2936 void Assembler::dshd(Register rd, Register rt) {
2937  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2938  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSHD, DBSHFL);
2939 }
2940 
2941 void Assembler::seh(Register rd, Register rt) {
2942  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2943  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2944 }
2945 
2946 void Assembler::seb(Register rd, Register rt) {
2947  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2948  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2949 }
2950 
2951 // --------Coprocessor-instructions----------------
2952 
2953 // Load, store, move.
2954 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2955  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2956 }
2957 
2958 
2959 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2960  GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
2961 }
2962 
2963 void Assembler::swc1(FPURegister fs, const MemOperand& src) {
2964  GenInstrImmediate(SWC1, src.rm(), fs, src.offset_);
2965 }
2966 
2967 void Assembler::sdc1(FPURegister fs, const MemOperand& src) {
2968  GenInstrImmediate(SDC1, src.rm(), fs, src.offset_);
2969 }
2970 
2971 
2972 void Assembler::mtc1(Register rt, FPURegister fs) {
2973  GenInstrRegister(COP1, MTC1, rt, fs, f0);
2974 }
2975 
2976 
2977 void Assembler::mthc1(Register rt, FPURegister fs) {
2978  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2979 }
2980 
2981 
2982 void Assembler::dmtc1(Register rt, FPURegister fs) {
2983  GenInstrRegister(COP1, DMTC1, rt, fs, f0);
2984 }
2985 
2986 
2987 void Assembler::mfc1(Register rt, FPURegister fs) {
2988  GenInstrRegister(COP1, MFC1, rt, fs, f0);
2989 }
2990 
2991 
2992 void Assembler::mfhc1(Register rt, FPURegister fs) {
2993  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2994 }
2995 
2996 
2997 void Assembler::dmfc1(Register rt, FPURegister fs) {
2998  GenInstrRegister(COP1, DMFC1, rt, fs, f0);
2999 }
3000 
3001 
3002 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
3003  GenInstrRegister(COP1, CTC1, rt, fs);
3004 }
3005 
3006 
3007 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
3008  GenInstrRegister(COP1, CFC1, rt, fs);
3009 }
3010 
3011 
3012 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
3013  FPURegister ft) {
3014  DCHECK_EQ(kArchVariant, kMips64r6);
3015  DCHECK((fmt == D) || (fmt == S));
3016 
3017  GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
3018 }
3019 
3020 
3021 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3022  sel(S, fd, fs, ft);
3023 }
3024 
3025 
3026 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3027  sel(D, fd, fs, ft);
3028 }
3029 
3030 
3031 // FPR.
3032 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
3033  FPURegister ft) {
3034  DCHECK((fmt == D) || (fmt == S));
3035  GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
3036 }
3037 
3038 
3039 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3040  seleqz(D, fd, fs, ft);
3041 }
3042 
3043 
3044 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3045  seleqz(S, fd, fs, ft);
3046 }
3047 
3048 
3049 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3050  selnez(D, fd, fs, ft);
3051 }
3052 
3053 
3054 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3055  selnez(S, fd, fs, ft);
3056 }
3057 
3058 
3059 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
3060  DCHECK_EQ(kArchVariant, kMips64r2);
3061  GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
3062 }
3063 
3064 
3065 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
3066  DCHECK_EQ(kArchVariant, kMips64r2);
3067  GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
3068 }
3069 
3070 
3071 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
3072  DCHECK_EQ(kArchVariant, kMips64r2);
3073  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
3074  GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
3075 }
3076 
3077 
3078 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
3079  DCHECK_EQ(kArchVariant, kMips64r2);
3080  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
3081  GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
3082 }
3083 
3084 
3085 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
3086  DCHECK_EQ(kArchVariant, kMips64r2);
3087  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
3088  GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
3089 }
3090 
3091 
3092 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
3093  DCHECK_EQ(kArchVariant, kMips64r2);
3094  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
3095  GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
3096 }
3097 
3098 
3099 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
3100  DCHECK_EQ(kArchVariant, kMips64r2);
3101  GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
3102 }
3103 
3104 
3105 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
3106  DCHECK_EQ(kArchVariant, kMips64r2);
3107  GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
3108 }
3109 
3110 
3111 // FPR.
3112 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
3113  FPURegister ft) {
3114  DCHECK_EQ(kArchVariant, kMips64r6);
3115  DCHECK((fmt == D) || (fmt == S));
3116  GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
3117 }
3118 
3119 
3120 // Arithmetic.
3121 
3122 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3123  GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
3124 }
3125 
3126 
3127 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3128  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
3129 }
3130 
3131 
3132 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3133  GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
3134 }
3135 
3136 
3137 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3138  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
3139 }
3140 
3141 
3142 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3143  GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
3144 }
3145 
3146 
3147 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3148  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
3149 }
3150 
3151 void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
3152  FPURegister ft) {
3153  // On Loongson 3A (MIPS64R2), MADD.S instruction is actually fused MADD.S and
3154  // this causes failure in some of the tests. Since this optimization is rarely
3155  // used, and not used at all on MIPS64R6, this isntruction is removed.
3156  UNREACHABLE();
3157 }
3158 
3159 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
3160  FPURegister ft) {
3161  // On Loongson 3A (MIPS64R2), MADD.D instruction is actually fused MADD.D and
3162  // this causes failure in some of the tests. Since this optimization is rarely
3163  // used, and not used at all on MIPS64R6, this isntruction is removed.
3164  UNREACHABLE();
3165 }
3166 
3167 void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
3168  FPURegister ft) {
3169  // See explanation for instruction madd_s.
3170  UNREACHABLE();
3171 }
3172 
3173 void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
3174  FPURegister ft) {
3175  // See explanation for instruction madd_d.
3176  UNREACHABLE();
3177 }
3178 
3179 void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3180  DCHECK_EQ(kArchVariant, kMips64r6);
3181  GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
3182 }
3183 
3184 void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3185  DCHECK_EQ(kArchVariant, kMips64r6);
3186  GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
3187 }
3188 
3189 void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3190  DCHECK_EQ(kArchVariant, kMips64r6);
3191  GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
3192 }
3193 
3194 void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3195  DCHECK_EQ(kArchVariant, kMips64r6);
3196  GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
3197 }
3198 
3199 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3200  GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
3201 }
3202 
3203 
3204 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3205  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
3206 }
3207 
3208 
3209 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
3210  GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
3211 }
3212 
3213 
3214 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
3215  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
3216 }
3217 
3218 
3219 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
3220  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
3221 }
3222 
3223 
3224 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
3225  GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
3226 }
3227 
3228 
3229 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
3230  GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
3231 }
3232 
3233 
3234 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
3235  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
3236 }
3237 
3238 
3239 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
3240  GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
3241 }
3242 
3243 
3244 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
3245  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
3246 }
3247 
3248 
3249 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
3250  GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
3251 }
3252 
3253 
3254 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
3255  GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
3256 }
3257 
3258 
3259 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
3260  GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
3261 }
3262 
3263 
3264 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
3265  GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
3266 }
3267 
3268 
3269 // Conversions.
3270 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
3271  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
3272 }
3273 
3274 
3275 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
3276  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
3277 }
3278 
3279 
3280 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
3281  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
3282 }
3283 
3284 
3285 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
3286  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
3287 }
3288 
3289 
3290 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
3291  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
3292 }
3293 
3294 
3295 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
3296  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
3297 }
3298 
3299 
3300 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
3301  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
3302 }
3303 
3304 
3305 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
3306  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
3307 }
3308 
3309 
3310 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
3311  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
3312 }
3313 
3314 
3315 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
3316  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
3317 }
3318 
3319 
3320 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
3321 
3322 
3323 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
3324 
3325 
3326 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
3327  DCHECK_EQ(kArchVariant, kMips64r6);
3328  GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
3329 }
3330 
3331 
3332 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
3333  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3334  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
3335 }
3336 
3337 
3338 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
3339  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3340  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
3341 }
3342 
3343 
3344 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
3345  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3346  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
3347 }
3348 
3349 
3350 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
3351  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3352  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
3353 }
3354 
3355 
3356 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
3357  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
3358 }
3359 
3360 
3361 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
3362  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
3363 }
3364 
3365 
3366 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
3367  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
3368 }
3369 
3370 
3371 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
3372  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
3373 }
3374 
3375 
3376 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
3377  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
3378 }
3379 
3380 
3381 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
3382  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
3383 }
3384 
3385 
3386 void Assembler::class_s(FPURegister fd, FPURegister fs) {
3387  DCHECK_EQ(kArchVariant, kMips64r6);
3388  GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
3389 }
3390 
3391 
3392 void Assembler::class_d(FPURegister fd, FPURegister fs) {
3393  DCHECK_EQ(kArchVariant, kMips64r6);
3394  GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
3395 }
3396 
3397 
3398 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
3399  FPURegister ft) {
3400  DCHECK_EQ(kArchVariant, kMips64r6);
3401  DCHECK((fmt == D) || (fmt == S));
3402  GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
3403 }
3404 
3405 
3406 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
3407  FPURegister ft) {
3408  DCHECK_EQ(kArchVariant, kMips64r6);
3409  DCHECK((fmt == D) || (fmt == S));
3410  GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
3411 }
3412 
3413 
3414 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
3415  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
3416 }
3417 
3418 
3419 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
3420  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3421  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
3422 }
3423 
3424 
3425 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
3426  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
3427 }
3428 
3429 
3430 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
3431  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
3432 }
3433 
3434 
3435 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
3436  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
3437  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
3438 }
3439 
3440 
3441 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
3442  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
3443 }
3444 
3445 
3446 // Conditions for >= MIPSr6.
3447 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
3448  FPURegister fd, FPURegister fs, FPURegister ft) {
3449  DCHECK_EQ(kArchVariant, kMips64r6);
3450  DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3451  Instr instr = COP1 | fmt | ft.code() << kFtShift |
3452  fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
3453  emit(instr);
3454 }
3455 
3456 
3457 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
3458  FPURegister ft) {
3459  cmp(cond, W, fd, fs, ft);
3460 }
3461 
3462 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
3463  FPURegister ft) {
3464  cmp(cond, L, fd, fs, ft);
3465 }
3466 
3467 
3468 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
3469  DCHECK_EQ(kArchVariant, kMips64r6);
3470  BlockTrampolinePoolScope block_trampoline_pool(this);
3471  Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
3472  emit(instr);
3473  BlockTrampolinePoolFor(1); // For associated delay slot.
3474 }
3475 
3476 
3477 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
3478  DCHECK_EQ(kArchVariant, kMips64r6);
3479  BlockTrampolinePoolScope block_trampoline_pool(this);
3480  Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
3481  emit(instr);
3482  BlockTrampolinePoolFor(1); // For associated delay slot.
3483 }
3484 
3485 
3486 // Conditions for < MIPSr6.
3487 void Assembler::c(FPUCondition cond, SecondaryField fmt,
3488  FPURegister fs, FPURegister ft, uint16_t cc) {
3489  DCHECK_NE(kArchVariant, kMips64r6);
3490  DCHECK(is_uint3(cc));
3491  DCHECK(fmt == S || fmt == D);
3492  DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3493  Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
3494  | cc << 8 | 3 << 4 | cond;
3495  emit(instr);
3496 }
3497 
3498 
3499 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
3500  uint16_t cc) {
3501  c(cond, S, fs, ft, cc);
3502 }
3503 
3504 
3505 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
3506  uint16_t cc) {
3507  c(cond, D, fs, ft, cc);
3508 }
3509 
3510 
3511 void Assembler::fcmp(FPURegister src1, const double src2,
3512  FPUCondition cond) {
3513  DCHECK_EQ(src2, 0.0);
3514  mtc1(zero_reg, f14);
3515  cvt_d_w(f14, f14);
3516  c(cond, D, src1, f14, 0);
3517 }
3518 
3519 
3520 void Assembler::bc1f(int16_t offset, uint16_t cc) {
3521  BlockTrampolinePoolScope block_trampoline_pool(this);
3522  DCHECK(is_uint3(cc));
3523  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
3524  emit(instr);
3525  BlockTrampolinePoolFor(1); // For associated delay slot.
3526 }
3527 
3528 
3529 void Assembler::bc1t(int16_t offset, uint16_t cc) {
3530  BlockTrampolinePoolScope block_trampoline_pool(this);
3531  DCHECK(is_uint3(cc));
3532  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
3533  emit(instr);
3534  BlockTrampolinePoolFor(1); // For associated delay slot.
3535 }
3536 
3537 // ---------- MSA instructions ------------
3538 #define MSA_BRANCH_LIST(V) \
3539  V(bz_v, BZ_V) \
3540  V(bz_b, BZ_B) \
3541  V(bz_h, BZ_H) \
3542  V(bz_w, BZ_W) \
3543  V(bz_d, BZ_D) \
3544  V(bnz_v, BNZ_V) \
3545  V(bnz_b, BNZ_B) \
3546  V(bnz_h, BNZ_H) \
3547  V(bnz_w, BNZ_W) \
3548  V(bnz_d, BNZ_D)
3549 
3550 #define MSA_BRANCH(name, opcode) \
3551  void Assembler::name(MSARegister wt, int16_t offset) { \
3552  GenInstrMsaBranch(opcode, wt, offset); \
3553  }
3554 
3555 MSA_BRANCH_LIST(MSA_BRANCH)
3556 #undef MSA_BRANCH
3557 #undef MSA_BRANCH_LIST
3558 
3559 #define MSA_LD_ST_LIST(V) \
3560  V(ld_b, LD_B) \
3561  V(ld_h, LD_H) \
3562  V(ld_w, LD_W) \
3563  V(ld_d, LD_D) \
3564  V(st_b, ST_B) \
3565  V(st_h, ST_H) \
3566  V(st_w, ST_W) \
3567  V(st_d, ST_D)
3568 
3569 #define MSA_LD_ST(name, opcode) \
3570  void Assembler::name(MSARegister wd, const MemOperand& rs) { \
3571  MemOperand source = rs; \
3572  AdjustBaseAndOffset(source); \
3573  if (is_int10(source.offset())) { \
3574  GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
3575  } else { \
3576  UseScratchRegisterScope temps(this); \
3577  Register scratch = temps.Acquire(); \
3578  DCHECK(rs.rm() != scratch); \
3579  daddiu(scratch, source.rm(), source.offset()); \
3580  GenInstrMsaMI10(opcode, 0, scratch, wd); \
3581  } \
3582  }
3583 
3584 MSA_LD_ST_LIST(MSA_LD_ST)
3585 #undef MSA_LD_ST
3586 #undef MSA_BRANCH_LIST
3587 
3588 #define MSA_I10_LIST(V) \
3589  V(ldi_b, I5_DF_b) \
3590  V(ldi_h, I5_DF_h) \
3591  V(ldi_w, I5_DF_w) \
3592  V(ldi_d, I5_DF_d)
3593 
3594 #define MSA_I10(name, format) \
3595  void Assembler::name(MSARegister wd, int32_t imm10) { \
3596  GenInstrMsaI10(LDI, format, imm10, wd); \
3597  }
3598 MSA_I10_LIST(MSA_I10)
3599 #undef MSA_I10
3600 #undef MSA_I10_LIST
3601 
3602 #define MSA_I5_LIST(V) \
3603  V(addvi, ADDVI) \
3604  V(subvi, SUBVI) \
3605  V(maxi_s, MAXI_S) \
3606  V(maxi_u, MAXI_U) \
3607  V(mini_s, MINI_S) \
3608  V(mini_u, MINI_U) \
3609  V(ceqi, CEQI) \
3610  V(clti_s, CLTI_S) \
3611  V(clti_u, CLTI_U) \
3612  V(clei_s, CLEI_S) \
3613  V(clei_u, CLEI_U)
3614 
3615 #define MSA_I5_FORMAT(name, opcode, format) \
3616  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3617  uint32_t imm5) { \
3618  GenInstrMsaI5(opcode, I5_DF_##format, imm5, ws, wd); \
3619  }
3620 
3621 #define MSA_I5(name, opcode) \
3622  MSA_I5_FORMAT(name, opcode, b) \
3623  MSA_I5_FORMAT(name, opcode, h) \
3624  MSA_I5_FORMAT(name, opcode, w) \
3625  MSA_I5_FORMAT(name, opcode, d)
3626 
3627 MSA_I5_LIST(MSA_I5)
3628 #undef MSA_I5
3629 #undef MSA_I5_FORMAT
3630 #undef MSA_I5_LIST
3631 
3632 #define MSA_I8_LIST(V) \
3633  V(andi_b, ANDI_B) \
3634  V(ori_b, ORI_B) \
3635  V(nori_b, NORI_B) \
3636  V(xori_b, XORI_B) \
3637  V(bmnzi_b, BMNZI_B) \
3638  V(bmzi_b, BMZI_B) \
3639  V(bseli_b, BSELI_B) \
3640  V(shf_b, SHF_B) \
3641  V(shf_h, SHF_H) \
3642  V(shf_w, SHF_W)
3643 
3644 #define MSA_I8(name, opcode) \
3645  void Assembler::name(MSARegister wd, MSARegister ws, uint32_t imm8) { \
3646  GenInstrMsaI8(opcode, imm8, ws, wd); \
3647  }
3648 
3649 MSA_I8_LIST(MSA_I8)
3650 #undef MSA_I8
3651 #undef MSA_I8_LIST
3652 
3653 #define MSA_VEC_LIST(V) \
3654  V(and_v, AND_V) \
3655  V(or_v, OR_V) \
3656  V(nor_v, NOR_V) \
3657  V(xor_v, XOR_V) \
3658  V(bmnz_v, BMNZ_V) \
3659  V(bmz_v, BMZ_V) \
3660  V(bsel_v, BSEL_V)
3661 
3662 #define MSA_VEC(name, opcode) \
3663  void Assembler::name(MSARegister wd, MSARegister ws, MSARegister wt) { \
3664  GenInstrMsaVec(opcode, wt, ws, wd); \
3665  }
3666 
3667 MSA_VEC_LIST(MSA_VEC)
3668 #undef MSA_VEC
3669 #undef MSA_VEC_LIST
3670 
3671 #define MSA_2R_LIST(V) \
3672  V(pcnt, PCNT) \
3673  V(nloc, NLOC) \
3674  V(nlzc, NLZC)
3675 
3676 #define MSA_2R_FORMAT(name, opcode, format) \
3677  void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3678  GenInstrMsa2R(opcode, MSA_2R_DF_##format, ws, wd); \
3679  }
3680 
3681 #define MSA_2R(name, opcode) \
3682  MSA_2R_FORMAT(name, opcode, b) \
3683  MSA_2R_FORMAT(name, opcode, h) \
3684  MSA_2R_FORMAT(name, opcode, w) \
3685  MSA_2R_FORMAT(name, opcode, d)
3686 
3687 MSA_2R_LIST(MSA_2R)
3688 #undef MSA_2R
3689 #undef MSA_2R_FORMAT
3690 #undef MSA_2R_LIST
3691 
3692 #define MSA_FILL(format) \
3693  void Assembler::fill_##format(MSARegister wd, Register rs) { \
3694  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD)); \
3695  DCHECK(rs.is_valid() && wd.is_valid()); \
3696  Instr instr = MSA | MSA_2R_FORMAT | FILL | MSA_2R_DF_##format | \
3697  (rs.code() << kWsShift) | (wd.code() << kWdShift) | \
3698  MSA_VEC_2R_2RF_MINOR; \
3699  emit(instr); \
3700  }
3701 
3702 MSA_FILL(b)
3703 MSA_FILL(h)
3704 MSA_FILL(w)
3705 MSA_FILL(d)
3706 #undef MSA_FILL
3707 
3708 #define MSA_2RF_LIST(V) \
3709  V(fclass, FCLASS) \
3710  V(ftrunc_s, FTRUNC_S) \
3711  V(ftrunc_u, FTRUNC_U) \
3712  V(fsqrt, FSQRT) \
3713  V(frsqrt, FRSQRT) \
3714  V(frcp, FRCP) \
3715  V(frint, FRINT) \
3716  V(flog2, FLOG2) \
3717  V(fexupl, FEXUPL) \
3718  V(fexupr, FEXUPR) \
3719  V(ffql, FFQL) \
3720  V(ffqr, FFQR) \
3721  V(ftint_s, FTINT_S) \
3722  V(ftint_u, FTINT_U) \
3723  V(ffint_s, FFINT_S) \
3724  V(ffint_u, FFINT_U)
3725 
3726 #define MSA_2RF_FORMAT(name, opcode, format) \
3727  void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3728  GenInstrMsa2RF(opcode, MSA_2RF_DF_##format, ws, wd); \
3729  }
3730 
3731 #define MSA_2RF(name, opcode) \
3732  MSA_2RF_FORMAT(name, opcode, w) \
3733  MSA_2RF_FORMAT(name, opcode, d)
3734 
3735 MSA_2RF_LIST(MSA_2RF)
3736 #undef MSA_2RF
3737 #undef MSA_2RF_FORMAT
3738 #undef MSA_2RF_LIST
3739 
3740 #define MSA_3R_LIST(V) \
3741  V(sll, SLL_MSA) \
3742  V(sra, SRA_MSA) \
3743  V(srl, SRL_MSA) \
3744  V(bclr, BCLR) \
3745  V(bset, BSET) \
3746  V(bneg, BNEG) \
3747  V(binsl, BINSL) \
3748  V(binsr, BINSR) \
3749  V(addv, ADDV) \
3750  V(subv, SUBV) \
3751  V(max_s, MAX_S) \
3752  V(max_u, MAX_U) \
3753  V(min_s, MIN_S) \
3754  V(min_u, MIN_U) \
3755  V(max_a, MAX_A) \
3756  V(min_a, MIN_A) \
3757  V(ceq, CEQ) \
3758  V(clt_s, CLT_S) \
3759  V(clt_u, CLT_U) \
3760  V(cle_s, CLE_S) \
3761  V(cle_u, CLE_U) \
3762  V(add_a, ADD_A) \
3763  V(adds_a, ADDS_A) \
3764  V(adds_s, ADDS_S) \
3765  V(adds_u, ADDS_U) \
3766  V(ave_s, AVE_S) \
3767  V(ave_u, AVE_U) \
3768  V(aver_s, AVER_S) \
3769  V(aver_u, AVER_U) \
3770  V(subs_s, SUBS_S) \
3771  V(subs_u, SUBS_U) \
3772  V(subsus_u, SUBSUS_U) \
3773  V(subsuu_s, SUBSUU_S) \
3774  V(asub_s, ASUB_S) \
3775  V(asub_u, ASUB_U) \
3776  V(mulv, MULV) \
3777  V(maddv, MADDV) \
3778  V(msubv, MSUBV) \
3779  V(div_s, DIV_S_MSA) \
3780  V(div_u, DIV_U) \
3781  V(mod_s, MOD_S) \
3782  V(mod_u, MOD_U) \
3783  V(dotp_s, DOTP_S) \
3784  V(dotp_u, DOTP_U) \
3785  V(dpadd_s, DPADD_S) \
3786  V(dpadd_u, DPADD_U) \
3787  V(dpsub_s, DPSUB_S) \
3788  V(dpsub_u, DPSUB_U) \
3789  V(pckev, PCKEV) \
3790  V(pckod, PCKOD) \
3791  V(ilvl, ILVL) \
3792  V(ilvr, ILVR) \
3793  V(ilvev, ILVEV) \
3794  V(ilvod, ILVOD) \
3795  V(vshf, VSHF) \
3796  V(srar, SRAR) \
3797  V(srlr, SRLR) \
3798  V(hadd_s, HADD_S) \
3799  V(hadd_u, HADD_U) \
3800  V(hsub_s, HSUB_S) \
3801  V(hsub_u, HSUB_U)
3802 
3803 #define MSA_3R_FORMAT(name, opcode, format) \
3804  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3805  MSARegister wt) { \
3806  GenInstrMsa3R<MSARegister>(opcode, MSA_3R_DF_##format, wt, ws, wd); \
3807  }
3808 
3809 #define MSA_3R_FORMAT_SLD_SPLAT(name, opcode, format) \
3810  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3811  Register rt) { \
3812  GenInstrMsa3R<Register>(opcode, MSA_3R_DF_##format, rt, ws, wd); \
3813  }
3814 
3815 #define MSA_3R(name, opcode) \
3816  MSA_3R_FORMAT(name, opcode, b) \
3817  MSA_3R_FORMAT(name, opcode, h) \
3818  MSA_3R_FORMAT(name, opcode, w) \
3819  MSA_3R_FORMAT(name, opcode, d)
3820 
3821 #define MSA_3R_SLD_SPLAT(name, opcode) \
3822  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, b) \
3823  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, h) \
3824  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, w) \
3825  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, d)
3826 
3827 MSA_3R_LIST(MSA_3R)
3828 MSA_3R_SLD_SPLAT(sld, SLD)
3829 MSA_3R_SLD_SPLAT(splat, SPLAT)
3830 
3831 #undef MSA_3R
3832 #undef MSA_3R_FORMAT
3833 #undef MSA_3R_FORMAT_SLD_SPLAT
3834 #undef MSA_3R_SLD_SPLAT
3835 #undef MSA_3R_LIST
3836 
3837 #define MSA_3RF_LIST1(V) \
3838  V(fcaf, FCAF) \
3839  V(fcun, FCUN) \
3840  V(fceq, FCEQ) \
3841  V(fcueq, FCUEQ) \
3842  V(fclt, FCLT) \
3843  V(fcult, FCULT) \
3844  V(fcle, FCLE) \
3845  V(fcule, FCULE) \
3846  V(fsaf, FSAF) \
3847  V(fsun, FSUN) \
3848  V(fseq, FSEQ) \
3849  V(fsueq, FSUEQ) \
3850  V(fslt, FSLT) \
3851  V(fsult, FSULT) \
3852  V(fsle, FSLE) \
3853  V(fsule, FSULE) \
3854  V(fadd, FADD) \
3855  V(fsub, FSUB) \
3856  V(fmul, FMUL) \
3857  V(fdiv, FDIV) \
3858  V(fmadd, FMADD) \
3859  V(fmsub, FMSUB) \
3860  V(fexp2, FEXP2) \
3861  V(fmin, FMIN) \
3862  V(fmin_a, FMIN_A) \
3863  V(fmax, FMAX) \
3864  V(fmax_a, FMAX_A) \
3865  V(fcor, FCOR) \
3866  V(fcune, FCUNE) \
3867  V(fcne, FCNE) \
3868  V(fsor, FSOR) \
3869  V(fsune, FSUNE) \
3870  V(fsne, FSNE)
3871 
3872 #define MSA_3RF_LIST2(V) \
3873  V(fexdo, FEXDO) \
3874  V(ftq, FTQ) \
3875  V(mul_q, MUL_Q) \
3876  V(madd_q, MADD_Q) \
3877  V(msub_q, MSUB_Q) \
3878  V(mulr_q, MULR_Q) \
3879  V(maddr_q, MADDR_Q) \
3880  V(msubr_q, MSUBR_Q)
3881 
3882 #define MSA_3RF_FORMAT(name, opcode, df, df_c) \
3883  void Assembler::name##_##df(MSARegister wd, MSARegister ws, \
3884  MSARegister wt) { \
3885  GenInstrMsa3RF(opcode, df_c, wt, ws, wd); \
3886  }
3887 
3888 #define MSA_3RF_1(name, opcode) \
3889  MSA_3RF_FORMAT(name, opcode, w, 0) \
3890  MSA_3RF_FORMAT(name, opcode, d, 1)
3891 
3892 #define MSA_3RF_2(name, opcode) \
3893  MSA_3RF_FORMAT(name, opcode, h, 0) \
3894  MSA_3RF_FORMAT(name, opcode, w, 1)
3895 
3896 MSA_3RF_LIST1(MSA_3RF_1)
3897 MSA_3RF_LIST2(MSA_3RF_2)
3898 #undef MSA_3RF_1
3899 #undef MSA_3RF_2
3900 #undef MSA_3RF_FORMAT
3901 #undef MSA_3RF_LIST1
3902 #undef MSA_3RF_LIST2
3903 
3904 void Assembler::sldi_b(MSARegister wd, MSARegister ws, uint32_t n) {
3905  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_B, n, ws, wd);
3906 }
3907 
3908 void Assembler::sldi_h(MSARegister wd, MSARegister ws, uint32_t n) {
3909  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_H, n, ws, wd);
3910 }
3911 
3912 void Assembler::sldi_w(MSARegister wd, MSARegister ws, uint32_t n) {
3913  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_W, n, ws, wd);
3914 }
3915 
3916 void Assembler::sldi_d(MSARegister wd, MSARegister ws, uint32_t n) {
3917  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_D, n, ws, wd);
3918 }
3919 
3920 void Assembler::splati_b(MSARegister wd, MSARegister ws, uint32_t n) {
3921  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_B, n, ws, wd);
3922 }
3923 
3924 void Assembler::splati_h(MSARegister wd, MSARegister ws, uint32_t n) {
3925  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_H, n, ws, wd);
3926 }
3927 
3928 void Assembler::splati_w(MSARegister wd, MSARegister ws, uint32_t n) {
3929  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_W, n, ws, wd);
3930 }
3931 
3932 void Assembler::splati_d(MSARegister wd, MSARegister ws, uint32_t n) {
3933  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_D, n, ws, wd);
3934 }
3935 
3936 void Assembler::copy_s_b(Register rd, MSARegister ws, uint32_t n) {
3937  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_B, n, ws, rd);
3938 }
3939 
3940 void Assembler::copy_s_h(Register rd, MSARegister ws, uint32_t n) {
3941  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_H, n, ws, rd);
3942 }
3943 
3944 void Assembler::copy_s_w(Register rd, MSARegister ws, uint32_t n) {
3945  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_W, n, ws, rd);
3946 }
3947 
3948 void Assembler::copy_s_d(Register rd, MSARegister ws, uint32_t n) {
3949  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_D, n, ws, rd);
3950 }
3951 
3952 void Assembler::copy_u_b(Register rd, MSARegister ws, uint32_t n) {
3953  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_B, n, ws, rd);
3954 }
3955 
3956 void Assembler::copy_u_h(Register rd, MSARegister ws, uint32_t n) {
3957  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_H, n, ws, rd);
3958 }
3959 
3960 void Assembler::copy_u_w(Register rd, MSARegister ws, uint32_t n) {
3961  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_W, n, ws, rd);
3962 }
3963 
3964 void Assembler::insert_b(MSARegister wd, uint32_t n, Register rs) {
3965  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_B, n, rs, wd);
3966 }
3967 
3968 void Assembler::insert_h(MSARegister wd, uint32_t n, Register rs) {
3969  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_H, n, rs, wd);
3970 }
3971 
3972 void Assembler::insert_w(MSARegister wd, uint32_t n, Register rs) {
3973  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_W, n, rs, wd);
3974 }
3975 
3976 void Assembler::insert_d(MSARegister wd, uint32_t n, Register rs) {
3977  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_D, n, rs, wd);
3978 }
3979 
3980 void Assembler::insve_b(MSARegister wd, uint32_t n, MSARegister ws) {
3981  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_B, n, ws, wd);
3982 }
3983 
3984 void Assembler::insve_h(MSARegister wd, uint32_t n, MSARegister ws) {
3985  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_H, n, ws, wd);
3986 }
3987 
3988 void Assembler::insve_w(MSARegister wd, uint32_t n, MSARegister ws) {
3989  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_W, n, ws, wd);
3990 }
3991 
3992 void Assembler::insve_d(MSARegister wd, uint32_t n, MSARegister ws) {
3993  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_D, n, ws, wd);
3994 }
3995 
3996 void Assembler::move_v(MSARegister wd, MSARegister ws) {
3997  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
3998  DCHECK(ws.is_valid() && wd.is_valid());
3999  Instr instr = MSA | MOVE_V | (ws.code() << kWsShift) |
4000  (wd.code() << kWdShift) | MSA_ELM_MINOR;
4001  emit(instr);
4002 }
4003 
4004 void Assembler::ctcmsa(MSAControlRegister cd, Register rs) {
4005  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
4006  DCHECK(cd.is_valid() && rs.is_valid());
4007  Instr instr = MSA | CTCMSA | (rs.code() << kWsShift) |
4008  (cd.code() << kWdShift) | MSA_ELM_MINOR;
4009  emit(instr);
4010 }
4011 
4012 void Assembler::cfcmsa(Register rd, MSAControlRegister cs) {
4013  DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
4014  DCHECK(rd.is_valid() && cs.is_valid());
4015  Instr instr = MSA | CFCMSA | (cs.code() << kWsShift) |
4016  (rd.code() << kWdShift) | MSA_ELM_MINOR;
4017  emit(instr);
4018 }
4019 
4020 #define MSA_BIT_LIST(V) \
4021  V(slli, SLLI) \
4022  V(srai, SRAI) \
4023  V(srli, SRLI) \
4024  V(bclri, BCLRI) \
4025  V(bseti, BSETI) \
4026  V(bnegi, BNEGI) \
4027  V(binsli, BINSLI) \
4028  V(binsri, BINSRI) \
4029  V(sat_s, SAT_S) \
4030  V(sat_u, SAT_U) \
4031  V(srari, SRARI) \
4032  V(srlri, SRLRI)
4033 
4034 #define MSA_BIT_FORMAT(name, opcode, format) \
4035  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
4036  uint32_t m) { \
4037  GenInstrMsaBit(opcode, BIT_DF_##format, m, ws, wd); \
4038  }
4039 
4040 #define MSA_BIT(name, opcode) \
4041  MSA_BIT_FORMAT(name, opcode, b) \
4042  MSA_BIT_FORMAT(name, opcode, h) \
4043  MSA_BIT_FORMAT(name, opcode, w) \
4044  MSA_BIT_FORMAT(name, opcode, d)
4045 
4046 MSA_BIT_LIST(MSA_BIT)
4047 #undef MSA_BIT
4048 #undef MSA_BIT_FORMAT
4049 #undef MSA_BIT_LIST
4050 
4051 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
4052  intptr_t pc_delta) {
4053  if (RelocInfo::IsInternalReference(rmode)) {
4054  int64_t* p = reinterpret_cast<int64_t*>(pc);
4055  if (*p == kEndOfJumpChain) {
4056  return 0; // Number of instructions patched.
4057  }
4058  *p += pc_delta;
4059  return 2; // Number of instructions patched.
4060  }
4061  Instr instr = instr_at(pc);
4062  DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
4063  if (IsLui(instr)) {
4064  Instr instr_lui = instr_at(pc + 0 * kInstrSize);
4065  Instr instr_ori = instr_at(pc + 1 * kInstrSize);
4066  Instr instr_ori2 = instr_at(pc + 3 * kInstrSize);
4067  DCHECK(IsOri(instr_ori));
4068  DCHECK(IsOri(instr_ori2));
4069  // TODO(plind): symbolic names for the shifts.
4070  int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
4071  imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
4072  imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
4073  // Sign extend address.
4074  imm >>= 16;
4075 
4076  if (imm == kEndOfJumpChain) {
4077  return 0; // Number of instructions patched.
4078  }
4079  imm += pc_delta;
4080  DCHECK_EQ(imm & 3, 0);
4081 
4082  instr_lui &= ~kImm16Mask;
4083  instr_ori &= ~kImm16Mask;
4084  instr_ori2 &= ~kImm16Mask;
4085 
4086  instr_at_put(pc + 0 * kInstrSize, instr_lui | ((imm >> 32) & kImm16Mask));
4087  instr_at_put(pc + 1 * kInstrSize, instr_ori | (imm >> 16 & kImm16Mask));
4088  instr_at_put(pc + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask));
4089  return 4; // Number of instructions patched.
4090  } else if (IsJ(instr) || IsJal(instr)) {
4091  // Regular j/jal relocation.
4092  uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
4093  imm28 += pc_delta;
4094  imm28 &= kImm28Mask;
4095  instr &= ~kImm26Mask;
4096  DCHECK_EQ(imm28 & 3, 0);
4097  uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
4098  instr_at_put(pc, instr | (imm26 & kImm26Mask));
4099  return 1; // Number of instructions patched.
4100  } else {
4101  DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
4102  ((instr & kJumpRawMask) == kJalRawMark));
4103  // Unbox raw offset and emit j/jal.
4104  int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
4105  // Sign extend 28-bit offset to 32-bit.
4106  imm28 = (imm28 << 4) >> 4;
4107  uint64_t target =
4108  static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
4109  target &= kImm28Mask;
4110  DCHECK_EQ(imm28 & 3, 0);
4111  uint32_t imm26 = static_cast<uint32_t>(target >> 2);
4112  // Check markings whether to emit j or jal.
4113  uint32_t unbox = (instr & kJRawMark) ? J : JAL;
4114  instr_at_put(pc, unbox | (imm26 & kImm26Mask));
4115  return 1; // Number of instructions patched.
4116  }
4117 }
4118 
4119 
4120 void Assembler::GrowBuffer() {
4121  if (!own_buffer_) FATAL("external code buffer is too small");
4122 
4123  // Compute new buffer size.
4124  CodeDesc desc; // the new buffer
4125  if (buffer_size_ < 1 * MB) {
4126  desc.buffer_size = 2*buffer_size_;
4127  } else {
4128  desc.buffer_size = buffer_size_ + 1*MB;
4129  }
4130 
4131  // Some internal data structures overflow for very large buffers,
4132  // they must ensure that kMaximalBufferSize is not too large.
4133  if (desc.buffer_size > kMaximalBufferSize) {
4134  V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
4135  }
4136 
4137  // Set up new buffer.
4138  desc.buffer = NewArray<byte>(desc.buffer_size);
4139  desc.origin = this;
4140 
4141  desc.instr_size = pc_offset();
4142  desc.reloc_size =
4143  static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
4144 
4145  // Copy the data.
4146  intptr_t pc_delta = desc.buffer - buffer_;
4147  intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
4148  (buffer_ + buffer_size_);
4149  MemMove(desc.buffer, buffer_, desc.instr_size);
4150  MemMove(reloc_info_writer.pos() + rc_delta,
4151  reloc_info_writer.pos(), desc.reloc_size);
4152 
4153  // Switch buffers.
4154  DeleteArray(buffer_);
4155  buffer_ = desc.buffer;
4156  buffer_size_ = desc.buffer_size;
4157  pc_ += pc_delta;
4158  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
4159  reloc_info_writer.last_pc() + pc_delta);
4160 
4161  // Relocate runtime entries.
4162  for (RelocIterator it(desc); !it.done(); it.next()) {
4163  RelocInfo::Mode rmode = it.rinfo()->rmode();
4164  if (rmode == RelocInfo::INTERNAL_REFERENCE) {
4165  RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
4166  }
4167  }
4168  DCHECK(!overflow());
4169 }
4170 
4171 
4172 void Assembler::db(uint8_t data) {
4173  CheckForEmitInForbiddenSlot();
4174  EmitHelper(data);
4175 }
4176 
4177 
4178 void Assembler::dd(uint32_t data) {
4179  CheckForEmitInForbiddenSlot();
4180  EmitHelper(data);
4181 }
4182 
4183 
4184 void Assembler::dq(uint64_t data) {
4185  CheckForEmitInForbiddenSlot();
4186  EmitHelper(data);
4187 }
4188 
4189 
4190 void Assembler::dd(Label* label) {
4191  uint64_t data;
4192  CheckForEmitInForbiddenSlot();
4193  if (label->is_bound()) {
4194  data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
4195  } else {
4196  data = jump_address(label);
4197  unbound_labels_count_++;
4198  internal_reference_positions_.insert(label->pos());
4199  }
4200  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
4201  EmitHelper(data);
4202 }
4203 
4204 
4205 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
4206  if (!ShouldRecordRelocInfo(rmode)) return;
4207  // We do not try to reuse pool constants.
4208  RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
4209  DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
4210  reloc_info_writer.Write(&rinfo);
4211 }
4212 
4213 
4214 void Assembler::BlockTrampolinePoolFor(int instructions) {
4215  CheckTrampolinePoolQuick(instructions);
4216  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
4217 }
4218 
4219 
4220 void Assembler::CheckTrampolinePool() {
4221  // Some small sequences of instructions must not be broken up by the
4222  // insertion of a trampoline pool; such sequences are protected by setting
4223  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
4224  // which are both checked here. Also, recursive calls to CheckTrampolinePool
4225  // are blocked by trampoline_pool_blocked_nesting_.
4226  if ((trampoline_pool_blocked_nesting_ > 0) ||
4227  (pc_offset() < no_trampoline_pool_before_)) {
4228  // Emission is currently blocked; make sure we try again as soon as
4229  // possible.
4230  if (trampoline_pool_blocked_nesting_ > 0) {
4231  next_buffer_check_ = pc_offset() + kInstrSize;
4232  } else {
4233  next_buffer_check_ = no_trampoline_pool_before_;
4234  }
4235  return;
4236  }
4237 
4238  DCHECK(!trampoline_emitted_);
4239  DCHECK_GE(unbound_labels_count_, 0);
4240  if (unbound_labels_count_ > 0) {
4241  // First we emit jump (2 instructions), then we emit trampoline pool.
4242  { BlockTrampolinePoolScope block_trampoline_pool(this);
4243  Label after_pool;
4244  if (kArchVariant == kMips64r6) {
4245  bc(&after_pool);
4246  } else {
4247  b(&after_pool);
4248  }
4249  nop();
4250 
4251  int pool_start = pc_offset();
4252  for (int i = 0; i < unbound_labels_count_; i++) {
4253  {
4254  if (kArchVariant == kMips64r6) {
4255  bc(&after_pool);
4256  nop();
4257  } else {
4258  or_(t8, ra, zero_reg);
4259  nal(); // Read PC into ra register.
4260  lui(t9, 0); // Branch delay slot.
4261  ori(t9, t9, 0);
4262  daddu(t9, ra, t9);
4263  or_(ra, t8, zero_reg);
4264  // Instruction jr will take or_ from the next trampoline.
4265  // in its branch delay slot. This is the expected behavior
4266  // in order to decrease size of trampoline pool.
4267  jr(t9);
4268  }
4269  }
4270  }
4271  nop();
4272  bind(&after_pool);
4273  trampoline_ = Trampoline(pool_start, unbound_labels_count_);
4274 
4275  trampoline_emitted_ = true;
4276  // As we are only going to emit trampoline once, we need to prevent any
4277  // further emission.
4278  next_buffer_check_ = kMaxInt;
4279  }
4280  } else {
4281  // Number of branches to unbound label at this point is zero, so we can
4282  // move next buffer check to maximum.
4283  next_buffer_check_ = pc_offset() +
4284  kMaxBranchOffset - kTrampolineSlotsSize * 16;
4285  }
4286  return;
4287 }
4288 
4289 
4290 Address Assembler::target_address_at(Address pc) {
4291  Instr instr0 = instr_at(pc);
4292  Instr instr1 = instr_at(pc + 1 * kInstrSize);
4293  Instr instr3 = instr_at(pc + 3 * kInstrSize);
4294 
4295  // Interpret 4 instructions for address generated by li: See listing in
4296  // Assembler::set_target_address_at() just below.
4297  if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
4298  (GetOpcodeField(instr3) == ORI)) {
4299  // Assemble the 48 bit value.
4300  int64_t addr = static_cast<int64_t>(
4301  ((uint64_t)(GetImmediate16(instr0)) << 32) |
4302  ((uint64_t)(GetImmediate16(instr1)) << 16) |
4303  ((uint64_t)(GetImmediate16(instr3))));
4304 
4305  // Sign extend to get canonical address.
4306  addr = (addr << 16) >> 16;
4307  return static_cast<Address>(addr);
4308  }
4309  // We should never get here, force a bad address if we do.
4310  UNREACHABLE();
4311 }
4312 
4313 
4314 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
4315 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
4316 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
4317 // OS::nan_value() returns a qNaN.
4318 void Assembler::QuietNaN(HeapObject* object) {
4319  HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
4320 }
4321 
4322 
4323 // On Mips64, a target address is stored in a 4-instruction sequence:
4324 // 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
4325 // 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
4326 // 2: dsll(rd, rd, 16);
4327 // 3: ori(rd, rd, j.imm32_ & kImm16Mask);
4328 //
4329 // Patching the address must replace all the lui & ori instructions,
4330 // and flush the i-cache.
4331 //
4332 // There is an optimization below, which emits a nop when the address
4333 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
4334 // and possibly removed.
4335 void Assembler::set_target_value_at(Address pc, uint64_t target,
4336  ICacheFlushMode icache_flush_mode) {
4337  // There is an optimization where only 4 instructions are used to load address
4338  // in code on MIP64 because only 48-bits of address is effectively used.
4339  // It relies on fact the upper [63:48] bits are not used for virtual address
4340  // translation and they have to be set according to value of bit 47 in order
4341  // get canonical address.
4342  Instr instr1 = instr_at(pc + kInstrSize);
4343  uint32_t rt_code = GetRt(instr1);
4344  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
4345 
4346 #ifdef DEBUG
4347  // Check we have the result from a li macro-instruction.
4348  Instr instr0 = instr_at(pc);
4349  Instr instr3 = instr_at(pc + kInstrSize * 3);
4350  DCHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
4351  GetOpcodeField(instr3) == ORI));
4352 #endif
4353 
4354  // Must use 4 instructions to insure patchable code.
4355  // lui rt, upper-16.
4356  // ori rt, rt, lower-16.
4357  // dsll rt, rt, 16.
4358  // ori rt rt, lower-16.
4359  *p = LUI | (rt_code << kRtShift) | ((target >> 32) & kImm16Mask);
4360  *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift) |
4361  ((target >> 16) & kImm16Mask);
4362  *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift) |
4363  (target & kImm16Mask);
4364 
4365  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
4366  Assembler::FlushICache(pc, 4 * kInstrSize);
4367  }
4368 }
4369 
4370 UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
4371  : available_(assembler->GetScratchRegisterList()),
4372  old_available_(*available_) {}
4373 
4374 UseScratchRegisterScope::~UseScratchRegisterScope() {
4375  *available_ = old_available_;
4376 }
4377 
4378 Register UseScratchRegisterScope::Acquire() {
4379  DCHECK_NOT_NULL(available_);
4380  DCHECK_NE(*available_, 0);
4381  int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
4382  *available_ &= ~(1UL << index);
4383 
4384  return Register::from_code(index);
4385 }
4386 
4387 bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
4388 
4389 } // namespace internal
4390 } // namespace v8
4391 
4392 #endif // V8_TARGET_ARCH_MIPS64
Definition: libplatform.h:13