V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
assembler-mips.cc
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 #include "src/mips/assembler-mips.h"
36 
37 #if V8_TARGET_ARCH_MIPS
38 
39 #include "src/base/bits.h"
40 #include "src/base/cpu.h"
41 #include "src/code-stubs.h"
42 #include "src/deoptimizer.h"
43 #include "src/mips/assembler-mips-inl.h"
44 #include "src/string-constants.h"
45 
46 namespace v8 {
47 namespace internal {
48 
49 // Get the CPU features enabled by the build. For cross compilation the
50 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
51 // can be defined to enable FPU instructions when building the
52 // snapshot.
53 static unsigned CpuFeaturesImpliedByCompiler() {
54  unsigned answer = 0;
55 #ifdef CAN_USE_FPU_INSTRUCTIONS
56  answer |= 1u << FPU;
57 #endif // def CAN_USE_FPU_INSTRUCTIONS
58 
59  // If the compiler is allowed to use FPU then we can use FPU too in our code
60  // generation even when generating snapshots. This won't work for cross
61  // compilation.
62 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
63  answer |= 1u << FPU;
64 #endif
65 
66  return answer;
67 }
68 
69 
70 void CpuFeatures::ProbeImpl(bool cross_compile) {
71  supported_ |= CpuFeaturesImpliedByCompiler();
72 
73  // Only use statically determined features for cross compile (snapshot).
74  if (cross_compile) return;
75 
76  // If the compiler is allowed to use fpu then we can use fpu too in our
77  // code generation.
78 #ifndef __mips__
79  // For the simulator build, use FPU.
80  supported_ |= 1u << FPU;
81 #if defined(_MIPS_ARCH_MIPS32R6)
82  // FP64 mode is implied on r6.
83  supported_ |= 1u << FP64FPU;
84 #if defined(_MIPS_MSA)
85  supported_ |= 1u << MIPS_SIMD;
86 #endif
87 #endif
88 #if defined(FPU_MODE_FP64)
89  supported_ |= 1u << FP64FPU;
90 #endif
91 #else
92  // Probe for additional features at runtime.
93  base::CPU cpu;
94  if (cpu.has_fpu()) supported_ |= 1u << FPU;
95 #if defined(FPU_MODE_FPXX)
96  if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
97 #elif defined(FPU_MODE_FP64)
98  supported_ |= 1u << FP64FPU;
99 #if defined(_MIPS_ARCH_MIPS32R6)
100 #if defined(_MIPS_MSA)
101  supported_ |= 1u << MIPS_SIMD;
102 #else
103  if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
104 #endif
105 #endif
106 #endif
107 #if defined(_MIPS_ARCH_MIPS32RX)
108  if (cpu.architecture() == 6) {
109  supported_ |= 1u << MIPSr6;
110  } else if (cpu.architecture() == 2) {
111  supported_ |= 1u << MIPSr1;
112  supported_ |= 1u << MIPSr2;
113  } else {
114  supported_ |= 1u << MIPSr1;
115  }
116 #endif
117 #endif
118 }
119 
120 
121 void CpuFeatures::PrintTarget() { }
122 void CpuFeatures::PrintFeatures() { }
123 
124 
125 int ToNumber(Register reg) {
126  DCHECK(reg.is_valid());
127  const int kNumbers[] = {
128  0, // zero_reg
129  1, // at
130  2, // v0
131  3, // v1
132  4, // a0
133  5, // a1
134  6, // a2
135  7, // a3
136  8, // t0
137  9, // t1
138  10, // t2
139  11, // t3
140  12, // t4
141  13, // t5
142  14, // t6
143  15, // t7
144  16, // s0
145  17, // s1
146  18, // s2
147  19, // s3
148  20, // s4
149  21, // s5
150  22, // s6
151  23, // s7
152  24, // t8
153  25, // t9
154  26, // k0
155  27, // k1
156  28, // gp
157  29, // sp
158  30, // fp
159  31, // ra
160  };
161  return kNumbers[reg.code()];
162 }
163 
164 
165 Register ToRegister(int num) {
166  DCHECK(num >= 0 && num < kNumRegisters);
167  const Register kRegisters[] = {
168  zero_reg,
169  at,
170  v0, v1,
171  a0, a1, a2, a3,
172  t0, t1, t2, t3, t4, t5, t6, t7,
173  s0, s1, s2, s3, s4, s5, s6, s7,
174  t8, t9,
175  k0, k1,
176  gp,
177  sp,
178  fp,
179  ra
180  };
181  return kRegisters[num];
182 }
183 
184 
185 // -----------------------------------------------------------------------------
186 // Implementation of RelocInfo.
187 
188 const int RelocInfo::kApplyMask =
189  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
190  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
191  RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
192 
193 bool RelocInfo::IsCodedSpecially() {
194  // The deserializer needs to know whether a pointer is specially coded. Being
195  // specially coded on MIPS means that it is a lui/ori instruction, and that is
196  // always the case inside code objects.
197  return true;
198 }
199 
200 
201 bool RelocInfo::IsInConstantPool() {
202  return false;
203 }
204 
205 int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
206  DCHECK(IsRuntimeEntry(rmode_));
207  return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
208 }
209 
210 uint32_t RelocInfo::wasm_call_tag() const {
211  DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
212  return static_cast<uint32_t>(
213  Assembler::target_address_at(pc_, constant_pool_));
214 }
215 
216 // -----------------------------------------------------------------------------
217 // Implementation of Operand and MemOperand.
218 // See assembler-mips-inl.h for inlined constructors.
219 
220 Operand::Operand(Handle<HeapObject> handle)
221  : rm_(no_reg), rmode_(RelocInfo::EMBEDDED_OBJECT) {
222  value_.immediate = static_cast<intptr_t>(handle.address());
223 }
224 
225 Operand Operand::EmbeddedNumber(double value) {
226  int32_t smi;
227  if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
228  Operand result(0, RelocInfo::EMBEDDED_OBJECT);
229  result.is_heap_object_request_ = true;
230  result.value_.heap_object_request = HeapObjectRequest(value);
231  return result;
232 }
233 
234 Operand Operand::EmbeddedCode(CodeStub* stub) {
235  Operand result(0, RelocInfo::CODE_TARGET);
236  result.is_heap_object_request_ = true;
237  result.value_.heap_object_request = HeapObjectRequest(stub);
238  return result;
239 }
240 
241 Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
242  Operand result(0, RelocInfo::EMBEDDED_OBJECT);
243  result.is_heap_object_request_ = true;
244  result.value_.heap_object_request = HeapObjectRequest(str);
245  return result;
246 }
247 
248 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
249  offset_ = offset;
250 }
251 
252 
253 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
254  OffsetAddend offset_addend) : Operand(rm) {
255  offset_ = unit * multiplier + offset_addend;
256 }
257 
258 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
259  DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
260  for (auto& request : heap_object_requests_) {
261  Handle<HeapObject> object;
262  switch (request.kind()) {
263  case HeapObjectRequest::kHeapNumber:
264  object =
265  isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
266  break;
267  case HeapObjectRequest::kCodeStub:
268  request.code_stub()->set_isolate(isolate);
269  object = request.code_stub()->GetCode();
270  break;
271  case HeapObjectRequest::kStringConstant:
272  const StringConstantBase* str = request.string();
273  CHECK_NOT_NULL(str);
274  object = str->AllocateStringConstant(isolate);
275  break;
276  }
277  Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
278  set_target_value_at(pc, reinterpret_cast<uint32_t>(object.location()));
279  }
280 }
281 
282 // -----------------------------------------------------------------------------
283 // Specific instructions, constants, and masks.
284 
285 static const int kNegOffset = 0x00008000;
286 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
287 // operations as post-increment of sp.
288 const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift) |
289  (sp.code() << kRtShift) |
290  (kPointerSize & kImm16Mask); // NOLINT
291 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
292 const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift) |
293  (sp.code() << kRtShift) |
294  (-kPointerSize & kImm16Mask); // NOLINT
295 // sw(r, MemOperand(sp, 0))
296 const Instr kPushRegPattern =
297  SW | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
298 // lw(r, MemOperand(sp, 0))
299 const Instr kPopRegPattern =
300  LW | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
301 
302 const Instr kLwRegFpOffsetPattern =
303  LW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
304 
305 const Instr kSwRegFpOffsetPattern =
306  SW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
307 
308 const Instr kLwRegFpNegOffsetPattern =
309  LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
310 
311 const Instr kSwRegFpNegOffsetPattern =
312  SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
313 // A mask for the Rt register for push, pop, lw, sw instructions.
314 const Instr kRtMask = kRtFieldMask;
315 const Instr kLwSwInstrTypeMask = 0xFFE00000;
316 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
317 const Instr kLwSwOffsetMask = kImm16Mask;
318 
319 Assembler::Assembler(const AssemblerOptions& options, void* buffer,
320  int buffer_size)
321  : AssemblerBase(options, buffer, buffer_size),
322  scratch_register_list_(at.bit()) {
323  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
324 
325  last_trampoline_pool_end_ = 0;
326  no_trampoline_pool_before_ = 0;
327  trampoline_pool_blocked_nesting_ = 0;
328  // We leave space (16 * kTrampolineSlotsSize)
329  // for BlockTrampolinePoolScope buffer.
330  next_buffer_check_ = FLAG_force_long_branches
331  ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
332  internal_trampoline_exception_ = false;
333  last_bound_pos_ = 0;
334 
335  trampoline_emitted_ = FLAG_force_long_branches;
336  unbound_labels_count_ = 0;
337  block_buffer_growth_ = false;
338 }
339 
340 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
341  EmitForbiddenSlotInstruction();
342  DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
343 
344  AllocateAndInstallRequestedHeapObjects(isolate);
345 
346  // Set up code descriptor.
347  desc->buffer = buffer_;
348  desc->buffer_size = buffer_size_;
349  desc->instr_size = pc_offset();
350  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
351  desc->origin = this;
352  desc->constant_pool_size = 0;
353  desc->unwinding_info_size = 0;
354  desc->unwinding_info = nullptr;
355 }
356 
357 
358 void Assembler::Align(int m) {
359  DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
360  EmitForbiddenSlotInstruction();
361  while ((pc_offset() & (m - 1)) != 0) {
362  nop();
363  }
364 }
365 
366 
367 void Assembler::CodeTargetAlign() {
368  // No advantage to aligning branch/call targets to more than
369  // single instruction, that I am aware of.
370  Align(4);
371 }
372 
373 
374 Register Assembler::GetRtReg(Instr instr) {
375  return Register::from_code((instr & kRtFieldMask) >> kRtShift);
376 }
377 
378 
379 Register Assembler::GetRsReg(Instr instr) {
380  return Register::from_code((instr & kRsFieldMask) >> kRsShift);
381 }
382 
383 
384 Register Assembler::GetRdReg(Instr instr) {
385  return Register::from_code((instr & kRdFieldMask) >> kRdShift);
386 }
387 
388 
389 uint32_t Assembler::GetRt(Instr instr) {
390  return (instr & kRtFieldMask) >> kRtShift;
391 }
392 
393 
394 uint32_t Assembler::GetRtField(Instr instr) {
395  return instr & kRtFieldMask;
396 }
397 
398 
399 uint32_t Assembler::GetRs(Instr instr) {
400  return (instr & kRsFieldMask) >> kRsShift;
401 }
402 
403 
404 uint32_t Assembler::GetRsField(Instr instr) {
405  return instr & kRsFieldMask;
406 }
407 
408 
409 uint32_t Assembler::GetRd(Instr instr) {
410  return (instr & kRdFieldMask) >> kRdShift;
411 }
412 
413 
414 uint32_t Assembler::GetRdField(Instr instr) {
415  return instr & kRdFieldMask;
416 }
417 
418 
419 uint32_t Assembler::GetSa(Instr instr) {
420  return (instr & kSaFieldMask) >> kSaShift;
421 }
422 
423 
424 uint32_t Assembler::GetSaField(Instr instr) {
425  return instr & kSaFieldMask;
426 }
427 
428 
429 uint32_t Assembler::GetOpcodeField(Instr instr) {
430  return instr & kOpcodeMask;
431 }
432 
433 
434 uint32_t Assembler::GetFunction(Instr instr) {
435  return (instr & kFunctionFieldMask) >> kFunctionShift;
436 }
437 
438 
439 uint32_t Assembler::GetFunctionField(Instr instr) {
440  return instr & kFunctionFieldMask;
441 }
442 
443 
444 uint32_t Assembler::GetImmediate16(Instr instr) {
445  return instr & kImm16Mask;
446 }
447 
448 
449 uint32_t Assembler::GetLabelConst(Instr instr) {
450  return instr & ~kImm16Mask;
451 }
452 
453 
454 bool Assembler::IsPop(Instr instr) {
455  return (instr & ~kRtMask) == kPopRegPattern;
456 }
457 
458 
459 bool Assembler::IsPush(Instr instr) {
460  return (instr & ~kRtMask) == kPushRegPattern;
461 }
462 
463 
464 bool Assembler::IsSwRegFpOffset(Instr instr) {
465  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
466 }
467 
468 
469 bool Assembler::IsLwRegFpOffset(Instr instr) {
470  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
471 }
472 
473 
474 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
475  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
476  kSwRegFpNegOffsetPattern);
477 }
478 
479 
480 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
481  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
482  kLwRegFpNegOffsetPattern);
483 }
484 
485 
486 // Labels refer to positions in the (to be) generated code.
487 // There are bound, linked, and unused labels.
488 //
489 // Bound labels refer to known positions in the already
490 // generated code. pos() is the position the label refers to.
491 //
492 // Linked labels refer to unknown positions in the code
493 // to be generated; pos() is the position of the last
494 // instruction using the label.
495 
496 // The link chain is terminated by a value in the instruction of -1,
497 // which is an otherwise illegal value (branch -1 is inf loop).
498 // The instruction 16-bit offset field addresses 32-bit words, but in
499 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
500 
501 const int kEndOfChain = -4;
502 // Determines the end of the Jump chain (a subset of the label link chain).
503 const int kEndOfJumpChain = 0;
504 
505 bool Assembler::IsMsaBranch(Instr instr) {
506  uint32_t opcode = GetOpcodeField(instr);
507  uint32_t rs_field = GetRsField(instr);
508  if (opcode == COP1) {
509  switch (rs_field) {
510  case BZ_V:
511  case BZ_B:
512  case BZ_H:
513  case BZ_W:
514  case BZ_D:
515  case BNZ_V:
516  case BNZ_B:
517  case BNZ_H:
518  case BNZ_W:
519  case BNZ_D:
520  return true;
521  default:
522  return false;
523  }
524  } else {
525  return false;
526  }
527 }
528 
529 bool Assembler::IsBranch(Instr instr) {
530  uint32_t opcode = GetOpcodeField(instr);
531  uint32_t rt_field = GetRtField(instr);
532  uint32_t rs_field = GetRsField(instr);
533  // Checks if the instruction is a branch.
534  bool isBranch =
535  opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
536  opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
537  (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
538  rt_field == BLTZAL || rt_field == BGEZAL)) ||
539  (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
540  (opcode == COP1 && rs_field == BC1EQZ) ||
541  (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr);
542  if (!isBranch && IsMipsArchVariant(kMips32r6)) {
543  // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
544  // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
545  isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
546  opcode == BALC ||
547  (opcode == POP66 && rs_field != 0) || // BEQZC
548  (opcode == POP76 && rs_field != 0); // BNEZC
549  }
550  return isBranch;
551 }
552 
553 
554 bool Assembler::IsBc(Instr instr) {
555  uint32_t opcode = GetOpcodeField(instr);
556  // Checks if the instruction is a BC or BALC.
557  return opcode == BC || opcode == BALC;
558 }
559 
560 bool Assembler::IsNal(Instr instr) {
561  uint32_t opcode = GetOpcodeField(instr);
562  uint32_t rt_field = GetRtField(instr);
563  uint32_t rs_field = GetRsField(instr);
564  return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0;
565 }
566 
567 bool Assembler::IsBzc(Instr instr) {
568  uint32_t opcode = GetOpcodeField(instr);
569  // Checks if the instruction is BEQZC or BNEZC.
570  return (opcode == POP66 && GetRsField(instr) != 0) ||
571  (opcode == POP76 && GetRsField(instr) != 0);
572 }
573 
574 
575 bool Assembler::IsEmittedConstant(Instr instr) {
576  uint32_t label_constant = GetLabelConst(instr);
577  return label_constant == 0; // Emitted label const in reg-exp engine.
578 }
579 
580 
581 bool Assembler::IsBeq(Instr instr) {
582  return GetOpcodeField(instr) == BEQ;
583 }
584 
585 
586 bool Assembler::IsBne(Instr instr) {
587  return GetOpcodeField(instr) == BNE;
588 }
589 
590 
591 bool Assembler::IsBeqzc(Instr instr) {
592  uint32_t opcode = GetOpcodeField(instr);
593  return opcode == POP66 && GetRsField(instr) != 0;
594 }
595 
596 
597 bool Assembler::IsBnezc(Instr instr) {
598  uint32_t opcode = GetOpcodeField(instr);
599  return opcode == POP76 && GetRsField(instr) != 0;
600 }
601 
602 
603 bool Assembler::IsBeqc(Instr instr) {
604  uint32_t opcode = GetOpcodeField(instr);
605  uint32_t rs = GetRsField(instr);
606  uint32_t rt = GetRtField(instr);
607  return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
608 }
609 
610 
611 bool Assembler::IsBnec(Instr instr) {
612  uint32_t opcode = GetOpcodeField(instr);
613  uint32_t rs = GetRsField(instr);
614  uint32_t rt = GetRtField(instr);
615  return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
616 }
617 
618 bool Assembler::IsJicOrJialc(Instr instr) {
619  uint32_t opcode = GetOpcodeField(instr);
620  uint32_t rs = GetRsField(instr);
621  return (opcode == POP66 || opcode == POP76) && rs == 0;
622 }
623 
624 bool Assembler::IsJump(Instr instr) {
625  uint32_t opcode = GetOpcodeField(instr);
626  uint32_t rt_field = GetRtField(instr);
627  uint32_t rd_field = GetRdField(instr);
628  uint32_t function_field = GetFunctionField(instr);
629  // Checks if the instruction is a jump.
630  return opcode == J || opcode == JAL ||
631  (opcode == SPECIAL && rt_field == 0 &&
632  ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
633 }
634 
635 bool Assembler::IsJ(Instr instr) {
636  uint32_t opcode = GetOpcodeField(instr);
637  // Checks if the instruction is a jump.
638  return opcode == J;
639 }
640 
641 
642 bool Assembler::IsJal(Instr instr) {
643  return GetOpcodeField(instr) == JAL;
644 }
645 
646 
647 bool Assembler::IsJr(Instr instr) {
648  if (!IsMipsArchVariant(kMips32r6)) {
649  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
650  } else {
651  return GetOpcodeField(instr) == SPECIAL &&
652  GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
653  }
654 }
655 
656 
657 bool Assembler::IsJalr(Instr instr) {
658  return GetOpcodeField(instr) == SPECIAL &&
659  GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
660 }
661 
662 
663 bool Assembler::IsLui(Instr instr) {
664  uint32_t opcode = GetOpcodeField(instr);
665  // Checks if the instruction is a load upper immediate.
666  return opcode == LUI;
667 }
668 
669 
670 bool Assembler::IsOri(Instr instr) {
671  uint32_t opcode = GetOpcodeField(instr);
672  // Checks if the instruction is a load upper immediate.
673  return opcode == ORI;
674 }
675 
676 bool Assembler::IsAddu(Instr instr, Register rd, Register rs, Register rt) {
677  uint32_t opcode = GetOpcodeField(instr);
678  uint32_t rd_field = GetRd(instr);
679  uint32_t rs_field = GetRs(instr);
680  uint32_t rt_field = GetRt(instr);
681  uint32_t sa_field = GetSaField(instr);
682  uint32_t rd_reg = static_cast<uint32_t>(rd.code());
683  uint32_t rs_reg = static_cast<uint32_t>(rs.code());
684  uint32_t rt_reg = static_cast<uint32_t>(rt.code());
685  uint32_t function_field = GetFunction(instr);
686  return opcode == SPECIAL && sa_field == 0 && function_field == ADDU &&
687  rd_reg == rd_field && rs_reg == rs_field && rt_reg == rt_field;
688 }
689 
690 bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
691  uint32_t opcode = GetOpcodeField(instr);
692  uint32_t rd_field = GetRd(instr);
693  uint32_t rs_field = GetRs(instr);
694  uint32_t rt_field = GetRt(instr);
695  uint32_t rd_reg = static_cast<uint32_t>(rd.code());
696  uint32_t rs_reg = static_cast<uint32_t>(rs.code());
697  uint32_t function_field = GetFunctionField(instr);
698  // Checks if the instruction is a OR with zero_reg argument (aka MOV).
699  bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg &&
700  rs_field == rs_reg && rt_field == 0;
701  return res;
702 }
703 
704 bool Assembler::IsNop(Instr instr, unsigned int type) {
705  // See Assembler::nop(type).
706  DCHECK_LT(type, 32);
707  uint32_t opcode = GetOpcodeField(instr);
708  uint32_t function = GetFunctionField(instr);
709  uint32_t rt = GetRt(instr);
710  uint32_t rd = GetRd(instr);
711  uint32_t sa = GetSa(instr);
712 
713  // Traditional mips nop == sll(zero_reg, zero_reg, 0)
714  // When marking non-zero type, use sll(zero_reg, at, type)
715  // to avoid use of mips ssnop and ehb special encodings
716  // of the sll instruction.
717 
718  Register nop_rt_reg = (type == 0) ? zero_reg : at;
719  bool ret = (opcode == SPECIAL && function == SLL &&
720  rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
721  rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
722  sa == type);
723 
724  return ret;
725 }
726 
727 
728 int32_t Assembler::GetBranchOffset(Instr instr) {
729  DCHECK(IsBranch(instr));
730  return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
731 }
732 
733 
734 bool Assembler::IsLw(Instr instr) {
735  return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
736 }
737 
738 
739 int16_t Assembler::GetLwOffset(Instr instr) {
740  DCHECK(IsLw(instr));
741  return ((instr & kImm16Mask));
742 }
743 
744 
745 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
746  DCHECK(IsLw(instr));
747 
748  // We actually create a new lw instruction based on the original one.
749  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
750  | (offset & kImm16Mask);
751 
752  return temp_instr;
753 }
754 
755 
756 bool Assembler::IsSw(Instr instr) {
757  return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
758 }
759 
760 
761 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
762  DCHECK(IsSw(instr));
763  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
764 }
765 
766 
767 bool Assembler::IsAddImmediate(Instr instr) {
768  return ((instr & kOpcodeMask) == ADDIU);
769 }
770 
771 
772 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
773  DCHECK(IsAddImmediate(instr));
774  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
775 }
776 
777 
778 bool Assembler::IsAndImmediate(Instr instr) {
779  return GetOpcodeField(instr) == ANDI;
780 }
781 
782 
783 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
784  if (IsMipsArchVariant(kMips32r6)) {
785  if (Assembler::IsBc(instr)) {
786  return Assembler::OffsetSize::kOffset26;
787  } else if (Assembler::IsBzc(instr)) {
788  return Assembler::OffsetSize::kOffset21;
789  }
790  }
791  return Assembler::OffsetSize::kOffset16;
792 }
793 
794 
795 static inline int32_t AddBranchOffset(int pos, Instr instr) {
796  int bits = OffsetSizeInBits(instr);
797  const int32_t mask = (1 << bits) - 1;
798  bits = 32 - bits;
799 
800  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
801  // the compiler uses arithmetic shifts for signed integers.
802  int32_t imm = ((instr & mask) << bits) >> (bits - 2);
803 
804  if (imm == kEndOfChain) {
805  // EndOfChain sentinel is returned directly, not relative to pc or pos.
806  return kEndOfChain;
807  } else {
808  return pos + Assembler::kBranchPCOffset + imm;
809  }
810 }
811 
812 uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
813  DCHECK(IsLui(instr_lui) && IsJicOrJialc(instr_jic));
814  int16_t jic_offset = GetImmediate16(instr_jic);
815  int16_t lui_offset = GetImmediate16(instr_lui);
816 
817  if (jic_offset < 0) {
818  lui_offset += kImm16Mask;
819  }
820  uint32_t lui_offset_u = (static_cast<uint32_t>(lui_offset)) << kLuiShift;
821  uint32_t jic_offset_u = static_cast<uint32_t>(jic_offset) & kImm16Mask;
822 
823  return lui_offset_u | jic_offset_u;
824 }
825 
826 // Use just lui and jic instructions. Insert lower part of the target address in
827 // jic offset part. Since jic sign-extends offset and then add it with register,
828 // before that addition, difference between upper part of the target address and
829 // upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted
830 // in jic register with lui instruction.
831 void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
832  int16_t& jic_offset) {
833  lui_offset = (address & kHiMask) >> kLuiShift;
834  jic_offset = address & kLoMask;
835 
836  if (jic_offset < 0) {
837  lui_offset -= kImm16Mask;
838  }
839 }
840 
841 void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
842  uint32_t& lui_offset,
843  uint32_t& jic_offset) {
844  int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
845  int16_t jic_offset16 = address & kLoMask;
846 
847  if (jic_offset16 < 0) {
848  lui_offset16 -= kImm16Mask;
849  }
850  lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
851  jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
852 }
853 
854 void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui,
855  Address offset_lui, Instr instr_ori,
856  Address offset_ori) {
857  DCHECK(IsLui(instr_lui));
858  DCHECK(IsOri(instr_ori));
859  instr_at_put(static_cast<int>(pc + offset_lui),
860  instr_lui | ((imm >> kLuiShift) & kImm16Mask));
861  instr_at_put(static_cast<int>(pc + offset_ori),
862  instr_ori | (imm & kImm16Mask));
863 }
864 
865 void Assembler::PatchLuiOriImmediate(Address pc, int32_t imm, Instr instr_lui,
866  Address offset_lui, Instr instr_ori,
867  Address offset_ori) {
868  DCHECK(IsLui(instr_lui));
869  DCHECK(IsOri(instr_ori));
870  instr_at_put(pc + offset_lui, instr_lui | ((imm >> kLuiShift) & kImm16Mask));
871  instr_at_put(pc + offset_ori, instr_ori | (imm & kImm16Mask));
872 }
873 
874 int32_t Assembler::GetLuiOriImmediate(Instr instr_lui, Instr instr_ori) {
875  DCHECK(IsLui(instr_lui));
876  DCHECK(IsOri(instr_ori));
877  int32_t imm;
878  imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
879  imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
880  return imm;
881 }
882 
883 int Assembler::target_at(int pos, bool is_internal) {
884  Instr instr = instr_at(pos);
885  if (is_internal) {
886  if (instr == 0) {
887  return kEndOfChain;
888  } else {
889  int32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
890  int delta = static_cast<int>(instr_address - instr);
891  DCHECK(pos > delta);
892  return pos - delta;
893  }
894  }
895  if ((instr & ~kImm16Mask) == 0) {
896  // Emitted label constant, not part of a branch.
897  if (instr == 0) {
898  return kEndOfChain;
899  } else {
900  int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
901  return (imm18 + pos);
902  }
903  }
904  // Check we have a branch or jump instruction.
905  DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
906  if (IsBranch(instr)) {
907  return AddBranchOffset(pos, instr);
908  } else if (IsMov(instr, t8, ra)) {
909  int32_t imm32;
910  Instr instr_lui = instr_at(pos + 2 * kInstrSize);
911  Instr instr_ori = instr_at(pos + 3 * kInstrSize);
912  imm32 = GetLuiOriImmediate(instr_lui, instr_ori);
913  if (imm32 == kEndOfJumpChain) {
914  // EndOfChain sentinel is returned directly, not relative to pc or pos.
915  return kEndOfChain;
916  }
917  return pos + Assembler::kLongBranchPCOffset + imm32;
918  } else {
919  DCHECK(IsLui(instr));
920  if (IsNal(instr_at(pos + kInstrSize))) {
921  int32_t imm32;
922  Instr instr_lui = instr_at(pos + 0 * kInstrSize);
923  Instr instr_ori = instr_at(pos + 2 * kInstrSize);
924  imm32 = GetLuiOriImmediate(instr_lui, instr_ori);
925  if (imm32 == kEndOfJumpChain) {
926  // EndOfChain sentinel is returned directly, not relative to pc or pos.
927  return kEndOfChain;
928  }
929  return pos + Assembler::kLongBranchPCOffset + imm32;
930  } else {
931  Instr instr1 = instr_at(pos + 0 * kInstrSize);
932  Instr instr2 = instr_at(pos + 1 * kInstrSize);
933  DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
934  int32_t imm;
935  if (IsJicOrJialc(instr2)) {
936  imm = CreateTargetAddress(instr1, instr2);
937  } else {
938  imm = GetLuiOriImmediate(instr1, instr2);
939  }
940 
941  if (imm == kEndOfJumpChain) {
942  // EndOfChain sentinel is returned directly, not relative to pc or pos.
943  return kEndOfChain;
944  } else {
945  uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
946  int32_t delta = instr_address - imm;
947  DCHECK(pos > delta);
948  return pos - delta;
949  }
950  }
951  }
952  return 0;
953 }
954 
955 
956 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
957  Instr instr) {
958  int32_t bits = OffsetSizeInBits(instr);
959  int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
960  DCHECK_EQ(imm & 3, 0);
961  imm >>= 2;
962 
963  const int32_t mask = (1 << bits) - 1;
964  instr &= ~mask;
965  DCHECK(is_intn(imm, bits));
966 
967  return instr | (imm & mask);
968 }
969 
970 
971 void Assembler::target_at_put(int32_t pos, int32_t target_pos,
972  bool is_internal) {
973  Instr instr = instr_at(pos);
974 
975  if (is_internal) {
976  uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
977  instr_at_put(pos, imm);
978  return;
979  }
980  if ((instr & ~kImm16Mask) == 0) {
981  DCHECK(target_pos == kEndOfChain || target_pos >= 0);
982  // Emitted label constant, not part of a branch.
983  // Make label relative to Code pointer of generated Code object.
984  instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
985  return;
986  }
987 
988  DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
989  if (IsBranch(instr)) {
990  instr = SetBranchOffset(pos, target_pos, instr);
991  instr_at_put(pos, instr);
992  } else if (IsMov(instr, t8, ra)) {
993  Instr instr_lui = instr_at(pos + 2 * kInstrSize);
994  Instr instr_ori = instr_at(pos + 3 * kInstrSize);
995  DCHECK(IsLui(instr_lui));
996  DCHECK(IsOri(instr_ori));
997 
998  int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset);
999 
1000  if (is_int16(imm_short)) {
1001  // Optimize by converting to regular branch with 16-bit
1002  // offset
1003  Instr instr_b = BEQ;
1004  instr_b = SetBranchOffset(pos, target_pos, instr_b);
1005 
1006  Instr instr_j = instr_at(pos + 5 * kInstrSize);
1007  Instr instr_branch_delay;
1008 
1009  if (IsJump(instr_j)) {
1010  // Case when branch delay slot is protected.
1011  instr_branch_delay = nopInstr;
1012  } else {
1013  // Case when branch delay slot is used.
1014  instr_branch_delay = instr_at(pos + 7 * kInstrSize);
1015  }
1016  instr_at_put(pos + 0 * kInstrSize, instr_b);
1017  instr_at_put(pos + 1 * kInstrSize, instr_branch_delay);
1018  } else {
1019  int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
1020  DCHECK_EQ(imm & 3, 0);
1021 
1022  instr_lui &= ~kImm16Mask;
1023  instr_ori &= ~kImm16Mask;
1024 
1025  PatchLuiOriImmediate(pos, imm, instr_lui, 2 * kInstrSize, instr_ori,
1026  3 * kInstrSize);
1027  }
1028  } else {
1029  DCHECK(IsLui(instr));
1030  if (IsNal(instr_at(pos + kInstrSize))) {
1031  Instr instr_lui = instr_at(pos + 0 * kInstrSize);
1032  Instr instr_ori = instr_at(pos + 2 * kInstrSize);
1033  DCHECK(IsLui(instr_lui));
1034  DCHECK(IsOri(instr_ori));
1035  int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
1036  DCHECK_EQ(imm & 3, 0);
1037  if (is_int16(imm + Assembler::kLongBranchPCOffset -
1038  Assembler::kBranchPCOffset)) {
1039  // Optimize by converting to regular branch and link with 16-bit
1040  // offset.
1041  Instr instr_b = REGIMM | BGEZAL; // Branch and link.
1042  instr_b = SetBranchOffset(pos, target_pos, instr_b);
1043  // Correct ra register to point to one instruction after jalr from
1044  // TurboAssembler::BranchAndLinkLong.
1045  Instr instr_a = ADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
1046  kOptimizedBranchAndLinkLongReturnOffset;
1047 
1048  instr_at_put(pos, instr_b);
1049  instr_at_put(pos + 1 * kInstrSize, instr_a);
1050  } else {
1051  instr_lui &= ~kImm16Mask;
1052  instr_ori &= ~kImm16Mask;
1053  PatchLuiOriImmediate(pos, imm, instr_lui, 0 * kInstrSize, instr_ori,
1054  2 * kInstrSize);
1055  }
1056  } else {
1057  Instr instr1 = instr_at(pos + 0 * kInstrSize);
1058  Instr instr2 = instr_at(pos + 1 * kInstrSize);
1059  DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
1060  uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1061  DCHECK_EQ(imm & 3, 0);
1062  DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
1063  instr1 &= ~kImm16Mask;
1064  instr2 &= ~kImm16Mask;
1065 
1066  if (IsJicOrJialc(instr2)) {
1067  uint32_t lui_offset_u, jic_offset_u;
1068  UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
1069  instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u);
1070  instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u);
1071  } else {
1072  PatchLuiOriImmediate(pos, imm, instr1, 0 * kInstrSize, instr2,
1073  1 * kInstrSize);
1074  }
1075  }
1076  }
1077 }
1078 
1079 void Assembler::print(const Label* L) {
1080  if (L->is_unused()) {
1081  PrintF("unused label\n");
1082  } else if (L->is_bound()) {
1083  PrintF("bound label to %d\n", L->pos());
1084  } else if (L->is_linked()) {
1085  Label l;
1086  l.link_to(L->pos());
1087  PrintF("unbound label");
1088  while (l.is_linked()) {
1089  PrintF("@ %d ", l.pos());
1090  Instr instr = instr_at(l.pos());
1091  if ((instr & ~kImm16Mask) == 0) {
1092  PrintF("value\n");
1093  } else {
1094  PrintF("%d\n", instr);
1095  }
1096  next(&l, is_internal_reference(&l));
1097  }
1098  } else {
1099  PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
1100  }
1101 }
1102 
1103 
1104 void Assembler::bind_to(Label* L, int pos) {
1105  DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
1106  int32_t trampoline_pos = kInvalidSlotPos;
1107  bool is_internal = false;
1108  if (L->is_linked() && !trampoline_emitted_) {
1109  unbound_labels_count_--;
1110  if (!is_internal_reference(L)) {
1111  next_buffer_check_ += kTrampolineSlotsSize;
1112  }
1113  }
1114 
1115  while (L->is_linked()) {
1116  int32_t fixup_pos = L->pos();
1117  int32_t dist = pos - fixup_pos;
1118  is_internal = is_internal_reference(L);
1119  next(L, is_internal); // Call next before overwriting link with target at
1120  // fixup_pos.
1121  Instr instr = instr_at(fixup_pos);
1122  if (is_internal) {
1123  target_at_put(fixup_pos, pos, is_internal);
1124  } else {
1125  if (IsBranch(instr)) {
1126  int branch_offset = BranchOffset(instr);
1127  if (dist > branch_offset) {
1128  if (trampoline_pos == kInvalidSlotPos) {
1129  trampoline_pos = get_trampoline_entry(fixup_pos);
1130  CHECK_NE(trampoline_pos, kInvalidSlotPos);
1131  }
1132  CHECK((trampoline_pos - fixup_pos) <= branch_offset);
1133  target_at_put(fixup_pos, trampoline_pos, false);
1134  fixup_pos = trampoline_pos;
1135  }
1136  target_at_put(fixup_pos, pos, false);
1137  } else {
1138  target_at_put(fixup_pos, pos, false);
1139  }
1140  }
1141  }
1142  L->bind_to(pos);
1143 
1144  // Keep track of the last bound label so we don't eliminate any instructions
1145  // before a bound label.
1146  if (pos > last_bound_pos_)
1147  last_bound_pos_ = pos;
1148 }
1149 
1150 
1151 void Assembler::bind(Label* L) {
1152  DCHECK(!L->is_bound()); // Label can only be bound once.
1153  bind_to(L, pc_offset());
1154 }
1155 
1156 
1157 void Assembler::next(Label* L, bool is_internal) {
1158  DCHECK(L->is_linked());
1159  int link = target_at(L->pos(), is_internal);
1160  if (link == kEndOfChain) {
1161  L->Unuse();
1162  } else {
1163  DCHECK_GE(link, 0);
1164  L->link_to(link);
1165  }
1166 }
1167 
1168 
1169 bool Assembler::is_near(Label* L) {
1170  DCHECK(L->is_bound());
1171  return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
1172 }
1173 
1174 
1175 bool Assembler::is_near(Label* L, OffsetSize bits) {
1176  if (L == nullptr || !L->is_bound()) return true;
1177  return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize;
1178 }
1179 
1180 
1181 bool Assembler::is_near_branch(Label* L) {
1182  DCHECK(L->is_bound());
1183  return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L);
1184 }
1185 
1186 
1187 int Assembler::BranchOffset(Instr instr) {
1188  // At pre-R6 and for other R6 branches the offset is 16 bits.
1189  int bits = OffsetSize::kOffset16;
1190 
1191  if (IsMipsArchVariant(kMips32r6)) {
1192  uint32_t opcode = GetOpcodeField(instr);
1193  switch (opcode) {
1194  // Checks BC or BALC.
1195  case BC:
1196  case BALC:
1197  bits = OffsetSize::kOffset26;
1198  break;
1199 
1200  // Checks BEQZC or BNEZC.
1201  case POP66:
1202  case POP76:
1203  if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
1204  break;
1205  default:
1206  break;
1207  }
1208  }
1209 
1210  return (1 << (bits + 2 - 1)) - 1;
1211 }
1212 
1213 
1214 // We have to use a temporary register for things that can be relocated even
1215 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
1216 // space. There is no guarantee that the relocated location can be similarly
1217 // encoded.
1218 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1219  return !RelocInfo::IsNone(rmode);
1220 }
1221 
1222 void Assembler::GenInstrRegister(Opcode opcode,
1223  Register rs,
1224  Register rt,
1225  Register rd,
1226  uint16_t sa,
1227  SecondaryField func) {
1228  DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1229  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1230  | (rd.code() << kRdShift) | (sa << kSaShift) | func;
1231  emit(instr);
1232 }
1233 
1234 
1235 void Assembler::GenInstrRegister(Opcode opcode,
1236  Register rs,
1237  Register rt,
1238  uint16_t msb,
1239  uint16_t lsb,
1240  SecondaryField func) {
1241  DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1242  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1243  | (msb << kRdShift) | (lsb << kSaShift) | func;
1244  emit(instr);
1245 }
1246 
1247 
1248 void Assembler::GenInstrRegister(Opcode opcode,
1249  SecondaryField fmt,
1250  FPURegister ft,
1251  FPURegister fs,
1252  FPURegister fd,
1253  SecondaryField func) {
1254  DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1255  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
1256  | (fd.code() << kFdShift) | func;
1257  emit(instr);
1258 }
1259 
1260 
1261 void Assembler::GenInstrRegister(Opcode opcode,
1262  FPURegister fr,
1263  FPURegister ft,
1264  FPURegister fs,
1265  FPURegister fd,
1266  SecondaryField func) {
1267  DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1268  Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
1269  | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1270  emit(instr);
1271 }
1272 
1273 
1274 void Assembler::GenInstrRegister(Opcode opcode,
1275  SecondaryField fmt,
1276  Register rt,
1277  FPURegister fs,
1278  FPURegister fd,
1279  SecondaryField func) {
1280  DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1281  Instr instr = opcode | fmt | (rt.code() << kRtShift)
1282  | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1283  emit(instr);
1284 }
1285 
1286 
1287 void Assembler::GenInstrRegister(Opcode opcode,
1288  SecondaryField fmt,
1289  Register rt,
1290  FPUControlRegister fs,
1291  SecondaryField func) {
1292  DCHECK(fs.is_valid() && rt.is_valid());
1293  Instr instr =
1294  opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1295  emit(instr);
1296 }
1297 
1298 
1299 // Instructions with immediate value.
1300 // Registers are in the order of the instruction encoding, from left to right.
1301 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1302  int32_t j,
1303  CompactBranchType is_compact_branch) {
1304  DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1305  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1306  | (j & kImm16Mask);
1307  emit(instr, is_compact_branch);
1308 }
1309 
1310 void Assembler::GenInstrImmediate(Opcode opcode, Register base, Register rt,
1311  int32_t offset9, int bit6,
1312  SecondaryField func) {
1313  DCHECK(base.is_valid() && rt.is_valid() && is_int9(offset9) &&
1314  is_uint1(bit6));
1315  Instr instr = opcode | (base.code() << kBaseShift) | (rt.code() << kRtShift) |
1316  ((offset9 << kImm9Shift) & kImm9Mask) | bit6 << kBit6Shift |
1317  func;
1318  emit(instr);
1319 }
1320 
1321 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1322  int32_t j,
1323  CompactBranchType is_compact_branch) {
1324  DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1325  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1326  emit(instr, is_compact_branch);
1327 }
1328 
1329 
1330 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1331  int32_t j,
1332  CompactBranchType is_compact_branch) {
1333  DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1334  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1335  | (j & kImm16Mask);
1336  emit(instr, is_compact_branch);
1337 }
1338 
1339 
1340 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1341  CompactBranchType is_compact_branch) {
1342  DCHECK(rs.is_valid() && (is_int21(offset21)));
1343  Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1344  emit(instr, is_compact_branch);
1345 }
1346 
1347 
1348 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1349  uint32_t offset21) {
1350  DCHECK(rs.is_valid() && (is_uint21(offset21)));
1351  Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1352  emit(instr);
1353 }
1354 
1355 
1356 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1357  CompactBranchType is_compact_branch) {
1358  DCHECK(is_int26(offset26));
1359  Instr instr = opcode | (offset26 & kImm26Mask);
1360  emit(instr, is_compact_branch);
1361 }
1362 
1363 
1364 void Assembler::GenInstrJump(Opcode opcode,
1365  uint32_t address) {
1366  BlockTrampolinePoolScope block_trampoline_pool(this);
1367  DCHECK(is_uint26(address));
1368  Instr instr = opcode | address;
1369  emit(instr);
1370  BlockTrampolinePoolFor(1); // For associated delay slot.
1371 }
1372 
1373 // MSA instructions
1374 void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8,
1375  MSARegister ws, MSARegister wd) {
1376  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1377  DCHECK(ws.is_valid() && wd.is_valid() && is_uint8(imm8));
1378  Instr instr = MSA | operation | ((imm8 & kImm8Mask) << kWtShift) |
1379  (ws.code() << kWsShift) | (wd.code() << kWdShift);
1380  emit(instr);
1381 }
1382 
1383 void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df,
1384  int32_t imm5, MSARegister ws, MSARegister wd) {
1385  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1386  DCHECK(ws.is_valid() && wd.is_valid());
1387  DCHECK((operation == MAXI_S) || (operation == MINI_S) ||
1388  (operation == CEQI) || (operation == CLTI_S) ||
1389  (operation == CLEI_S)
1390  ? is_int5(imm5)
1391  : is_uint5(imm5));
1392  Instr instr = MSA | operation | df | ((imm5 & kImm5Mask) << kWtShift) |
1393  (ws.code() << kWsShift) | (wd.code() << kWdShift);
1394  emit(instr);
1395 }
1396 
1397 void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df,
1398  uint32_t m, MSARegister ws, MSARegister wd) {
1399  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1400  DCHECK(ws.is_valid() && wd.is_valid() && is_valid_msa_df_m(df, m));
1401  Instr instr = MSA | operation | df | (m << kWtShift) |
1402  (ws.code() << kWsShift) | (wd.code() << kWdShift);
1403  emit(instr);
1404 }
1405 
1406 void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df,
1407  int32_t imm10, MSARegister wd) {
1408  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1409  DCHECK(wd.is_valid() && is_int10(imm10));
1410  Instr instr = MSA | operation | df | ((imm10 & kImm10Mask) << kWsShift) |
1411  (wd.code() << kWdShift);
1412  emit(instr);
1413 }
1414 
1415 template <typename RegType>
1416 void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df,
1417  RegType t, MSARegister ws, MSARegister wd) {
1418  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1419  DCHECK(t.is_valid() && ws.is_valid() && wd.is_valid());
1420  Instr instr = MSA | operation | df | (t.code() << kWtShift) |
1421  (ws.code() << kWsShift) | (wd.code() << kWdShift);
1422  emit(instr);
1423 }
1424 
1425 template <typename DstType, typename SrcType>
1426 void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df,
1427  uint32_t n, SrcType src, DstType dst) {
1428  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1429  DCHECK(src.is_valid() && dst.is_valid() && is_valid_msa_df_n(df, n));
1430  Instr instr = MSA | operation | df | (n << kWtShift) |
1431  (src.code() << kWsShift) | (dst.code() << kWdShift) |
1432  MSA_ELM_MINOR;
1433  emit(instr);
1434 }
1435 
1436 void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
1437  MSARegister wt, MSARegister ws, MSARegister wd) {
1438  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1439  DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1440  DCHECK_LT(df, 2);
1441  Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) |
1442  (ws.code() << kWsShift) | (wd.code() << kWdShift);
1443  emit(instr);
1444 }
1445 
1446 void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt,
1447  MSARegister ws, MSARegister wd) {
1448  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1449  DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1450  Instr instr = MSA | operation | (wt.code() << kWtShift) |
1451  (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1452  MSA_VEC_2R_2RF_MINOR;
1453  emit(instr);
1454 }
1455 
1456 void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10,
1457  Register rs, MSARegister wd) {
1458  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1459  DCHECK(rs.is_valid() && wd.is_valid() && is_int10(s10));
1460  Instr instr = MSA | operation | ((s10 & kImm10Mask) << kWtShift) |
1461  (rs.code() << kWsShift) | (wd.code() << kWdShift);
1462  emit(instr);
1463 }
1464 
1465 void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df,
1466  MSARegister ws, MSARegister wd) {
1467  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1468  DCHECK(ws.is_valid() && wd.is_valid());
1469  Instr instr = MSA | MSA_2R_FORMAT | operation | df | (ws.code() << kWsShift) |
1470  (wd.code() << kWdShift) | MSA_VEC_2R_2RF_MINOR;
1471  emit(instr);
1472 }
1473 
1474 void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df,
1475  MSARegister ws, MSARegister wd) {
1476  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1477  DCHECK(ws.is_valid() && wd.is_valid());
1478  Instr instr = MSA | MSA_2RF_FORMAT | operation | df |
1479  (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1480  MSA_VEC_2R_2RF_MINOR;
1481  emit(instr);
1482 }
1483 
1484 void Assembler::GenInstrMsaBranch(SecondaryField operation, MSARegister wt,
1485  int32_t offset16) {
1486  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
1487  DCHECK(wt.is_valid() && is_int16(offset16));
1488  BlockTrampolinePoolScope block_trampoline_pool(this);
1489  Instr instr =
1490  COP1 | operation | (wt.code() << kWtShift) | (offset16 & kImm16Mask);
1491  emit(instr);
1492  BlockTrampolinePoolFor(1); // For associated delay slot.
1493 }
1494 
1495 // Returns the next free trampoline entry.
1496 int32_t Assembler::get_trampoline_entry(int32_t pos) {
1497  int32_t trampoline_entry = kInvalidSlotPos;
1498 
1499  if (!internal_trampoline_exception_) {
1500  if (trampoline_.start() > pos) {
1501  trampoline_entry = trampoline_.take_slot();
1502  }
1503 
1504  if (kInvalidSlotPos == trampoline_entry) {
1505  internal_trampoline_exception_ = true;
1506  }
1507  }
1508  return trampoline_entry;
1509 }
1510 
1511 
1512 uint32_t Assembler::jump_address(Label* L) {
1513  int32_t target_pos;
1514 
1515  if (L->is_bound()) {
1516  target_pos = L->pos();
1517  } else {
1518  if (L->is_linked()) {
1519  target_pos = L->pos(); // L's link.
1520  L->link_to(pc_offset());
1521  } else {
1522  L->link_to(pc_offset());
1523  return kEndOfJumpChain;
1524  }
1525  }
1526 
1527  uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1528  DCHECK_EQ(imm & 3, 0);
1529 
1530  return imm;
1531 }
1532 
1533 uint32_t Assembler::branch_long_offset(Label* L) {
1534  int32_t target_pos;
1535 
1536  if (L->is_bound()) {
1537  target_pos = L->pos();
1538  } else {
1539  if (L->is_linked()) {
1540  target_pos = L->pos(); // L's link.
1541  L->link_to(pc_offset());
1542  } else {
1543  L->link_to(pc_offset());
1544  return kEndOfJumpChain;
1545  }
1546  }
1547 
1548  DCHECK(is_int32(static_cast<int64_t>(target_pos) -
1549  static_cast<int64_t>(pc_offset() + kLongBranchPCOffset)));
1550  int32_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
1551  DCHECK_EQ(offset & 3, 0);
1552 
1553  return offset;
1554 }
1555 
1556 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1557  int32_t target_pos;
1558  int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1559 
1560  if (L->is_bound()) {
1561  target_pos = L->pos();
1562  } else {
1563  if (L->is_linked()) {
1564  target_pos = L->pos();
1565  L->link_to(pc_offset() + pad);
1566  } else {
1567  L->link_to(pc_offset() + pad);
1568  if (!trampoline_emitted_) {
1569  unbound_labels_count_++;
1570  next_buffer_check_ -= kTrampolineSlotsSize;
1571  }
1572  return kEndOfChain;
1573  }
1574  }
1575 
1576  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1577  DCHECK(is_intn(offset, bits + 2));
1578  DCHECK_EQ(offset & 3, 0);
1579 
1580  return offset;
1581 }
1582 
1583 
1584 void Assembler::label_at_put(Label* L, int at_offset) {
1585  int target_pos;
1586  if (L->is_bound()) {
1587  target_pos = L->pos();
1588  instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1589  } else {
1590  if (L->is_linked()) {
1591  target_pos = L->pos(); // L's link.
1592  int32_t imm18 = target_pos - at_offset;
1593  DCHECK_EQ(imm18 & 3, 0);
1594  int32_t imm16 = imm18 >> 2;
1595  DCHECK(is_int16(imm16));
1596  instr_at_put(at_offset, (imm16 & kImm16Mask));
1597  } else {
1598  target_pos = kEndOfChain;
1599  instr_at_put(at_offset, 0);
1600  if (!trampoline_emitted_) {
1601  unbound_labels_count_++;
1602  next_buffer_check_ -= kTrampolineSlotsSize;
1603  }
1604  }
1605  L->link_to(at_offset);
1606  }
1607 }
1608 
1609 
1610 //------- Branch and jump instructions --------
1611 
1612 void Assembler::b(int16_t offset) {
1613  beq(zero_reg, zero_reg, offset);
1614 }
1615 
1616 
1617 void Assembler::bal(int16_t offset) {
1618  bgezal(zero_reg, offset);
1619 }
1620 
1621 
1622 void Assembler::bc(int32_t offset) {
1623  DCHECK(IsMipsArchVariant(kMips32r6));
1624  GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1625 }
1626 
1627 
1628 void Assembler::balc(int32_t offset) {
1629  DCHECK(IsMipsArchVariant(kMips32r6));
1630  GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1631 }
1632 
1633 
1634 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1635  BlockTrampolinePoolScope block_trampoline_pool(this);
1636  GenInstrImmediate(BEQ, rs, rt, offset);
1637  BlockTrampolinePoolFor(1); // For associated delay slot.
1638 }
1639 
1640 
1641 void Assembler::bgez(Register rs, int16_t offset) {
1642  BlockTrampolinePoolScope block_trampoline_pool(this);
1643  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1644  BlockTrampolinePoolFor(1); // For associated delay slot.
1645 }
1646 
1647 
1648 void Assembler::bgezc(Register rt, int16_t offset) {
1649  DCHECK(IsMipsArchVariant(kMips32r6));
1650  DCHECK(rt != zero_reg);
1651  GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1652 }
1653 
1654 
1655 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1656  DCHECK(IsMipsArchVariant(kMips32r6));
1657  DCHECK(rs != zero_reg);
1658  DCHECK(rt != zero_reg);
1659  DCHECK(rs.code() != rt.code());
1660  GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1661 }
1662 
1663 
1664 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1665  DCHECK(IsMipsArchVariant(kMips32r6));
1666  DCHECK(rs != zero_reg);
1667  DCHECK(rt != zero_reg);
1668  DCHECK(rs.code() != rt.code());
1669  GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1670 }
1671 
1672 
1673 void Assembler::bgezal(Register rs, int16_t offset) {
1674  DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
1675  DCHECK(rs != ra);
1676  BlockTrampolinePoolScope block_trampoline_pool(this);
1677  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1678  BlockTrampolinePoolFor(1); // For associated delay slot.
1679 }
1680 
1681 
1682 void Assembler::bgtz(Register rs, int16_t offset) {
1683  BlockTrampolinePoolScope block_trampoline_pool(this);
1684  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1685  BlockTrampolinePoolFor(1); // For associated delay slot.
1686 }
1687 
1688 
1689 void Assembler::bgtzc(Register rt, int16_t offset) {
1690  DCHECK(IsMipsArchVariant(kMips32r6));
1691  DCHECK(rt != zero_reg);
1692  GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1693  CompactBranchType::COMPACT_BRANCH);
1694 }
1695 
1696 
1697 void Assembler::blez(Register rs, int16_t offset) {
1698  BlockTrampolinePoolScope block_trampoline_pool(this);
1699  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1700  BlockTrampolinePoolFor(1); // For associated delay slot.
1701 }
1702 
1703 
1704 void Assembler::blezc(Register rt, int16_t offset) {
1705  DCHECK(IsMipsArchVariant(kMips32r6));
1706  DCHECK(rt != zero_reg);
1707  GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1708  CompactBranchType::COMPACT_BRANCH);
1709 }
1710 
1711 
1712 void Assembler::bltzc(Register rt, int16_t offset) {
1713  DCHECK(IsMipsArchVariant(kMips32r6));
1714  DCHECK(rt != zero_reg);
1715  GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1716 }
1717 
1718 
1719 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1720  DCHECK(IsMipsArchVariant(kMips32r6));
1721  DCHECK(rs != zero_reg);
1722  DCHECK(rt != zero_reg);
1723  DCHECK(rs.code() != rt.code());
1724  GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1725 }
1726 
1727 
1728 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1729  DCHECK(IsMipsArchVariant(kMips32r6));
1730  DCHECK(rs != zero_reg);
1731  DCHECK(rt != zero_reg);
1732  DCHECK(rs.code() != rt.code());
1733  GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1734 }
1735 
1736 
1737 void Assembler::bltz(Register rs, int16_t offset) {
1738  BlockTrampolinePoolScope block_trampoline_pool(this);
1739  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1740  BlockTrampolinePoolFor(1); // For associated delay slot.
1741 }
1742 
1743 
1744 void Assembler::bltzal(Register rs, int16_t offset) {
1745  DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
1746  DCHECK(rs != ra);
1747  BlockTrampolinePoolScope block_trampoline_pool(this);
1748  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1749  BlockTrampolinePoolFor(1); // For associated delay slot.
1750 }
1751 
1752 
1753 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1754  BlockTrampolinePoolScope block_trampoline_pool(this);
1755  GenInstrImmediate(BNE, rs, rt, offset);
1756  BlockTrampolinePoolFor(1); // For associated delay slot.
1757 }
1758 
1759 
1760 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1761  DCHECK(IsMipsArchVariant(kMips32r6));
1762  if (rs.code() >= rt.code()) {
1763  GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1764  } else {
1765  GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1766  }
1767 }
1768 
1769 
1770 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1771  DCHECK(IsMipsArchVariant(kMips32r6));
1772  if (rs.code() >= rt.code()) {
1773  GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1774  } else {
1775  GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1776  }
1777 }
1778 
1779 
1780 void Assembler::blezalc(Register rt, int16_t offset) {
1781  DCHECK(IsMipsArchVariant(kMips32r6));
1782  DCHECK(rt != zero_reg);
1783  DCHECK(rt != ra);
1784  GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1785  CompactBranchType::COMPACT_BRANCH);
1786 }
1787 
1788 
1789 void Assembler::bgezalc(Register rt, int16_t offset) {
1790  DCHECK(IsMipsArchVariant(kMips32r6));
1791  DCHECK(rt != zero_reg);
1792  DCHECK(rt != ra);
1793  GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1794 }
1795 
1796 
1797 void Assembler::bgezall(Register rs, int16_t offset) {
1798  DCHECK(!IsMipsArchVariant(kMips32r6));
1799  DCHECK(rs != zero_reg);
1800  DCHECK(rs != ra);
1801  BlockTrampolinePoolScope block_trampoline_pool(this);
1802  GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1803  BlockTrampolinePoolFor(1); // For associated delay slot.
1804 }
1805 
1806 
1807 void Assembler::bltzalc(Register rt, int16_t offset) {
1808  DCHECK(IsMipsArchVariant(kMips32r6));
1809  DCHECK(rt != zero_reg);
1810  DCHECK(rt != ra);
1811  GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1812 }
1813 
1814 
1815 void Assembler::bgtzalc(Register rt, int16_t offset) {
1816  DCHECK(IsMipsArchVariant(kMips32r6));
1817  DCHECK(rt != zero_reg);
1818  DCHECK(rt != ra);
1819  GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1820  CompactBranchType::COMPACT_BRANCH);
1821 }
1822 
1823 
1824 void Assembler::beqzalc(Register rt, int16_t offset) {
1825  DCHECK(IsMipsArchVariant(kMips32r6));
1826  DCHECK(rt != zero_reg);
1827  DCHECK(rt != ra);
1828  GenInstrImmediate(ADDI, zero_reg, rt, offset,
1829  CompactBranchType::COMPACT_BRANCH);
1830 }
1831 
1832 
1833 void Assembler::bnezalc(Register rt, int16_t offset) {
1834  DCHECK(IsMipsArchVariant(kMips32r6));
1835  DCHECK(rt != zero_reg);
1836  DCHECK(rt != ra);
1837  GenInstrImmediate(DADDI, zero_reg, rt, offset,
1838  CompactBranchType::COMPACT_BRANCH);
1839 }
1840 
1841 
1842 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1843  DCHECK(IsMipsArchVariant(kMips32r6));
1844  DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1845  if (rs.code() < rt.code()) {
1846  GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1847  } else {
1848  GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1849  }
1850 }
1851 
1852 
1853 void Assembler::beqzc(Register rs, int32_t offset) {
1854  DCHECK(IsMipsArchVariant(kMips32r6));
1855  DCHECK(rs != zero_reg);
1856  GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1857 }
1858 
1859 
1860 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1861  DCHECK(IsMipsArchVariant(kMips32r6));
1862  DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1863  if (rs.code() < rt.code()) {
1864  GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1865  } else {
1866  GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1867  }
1868 }
1869 
1870 
1871 void Assembler::bnezc(Register rs, int32_t offset) {
1872  DCHECK(IsMipsArchVariant(kMips32r6));
1873  DCHECK(rs != zero_reg);
1874  GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1875 }
1876 
1877 
1878 void Assembler::j(int32_t target) {
1879 #if DEBUG
1880  // Get pc of delay slot.
1881  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1882  bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1883  (kImm26Bits + kImmFieldShift)) == 0;
1884  DCHECK(in_range && ((target & 3) == 0));
1885 #endif
1886  BlockTrampolinePoolScope block_trampoline_pool(this);
1887  GenInstrJump(J, (target >> 2) & kImm26Mask);
1888  BlockTrampolinePoolFor(1); // For associated delay slot.
1889 }
1890 
1891 
1892 void Assembler::jr(Register rs) {
1893  if (!IsMipsArchVariant(kMips32r6)) {
1894  BlockTrampolinePoolScope block_trampoline_pool(this);
1895  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1896  BlockTrampolinePoolFor(1); // For associated delay slot.
1897  } else {
1898  jalr(rs, zero_reg);
1899  }
1900 }
1901 
1902 
1903 void Assembler::jal(int32_t target) {
1904 #ifdef DEBUG
1905  // Get pc of delay slot.
1906  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1907  bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
1908  (kImm26Bits + kImmFieldShift)) == 0;
1909  DCHECK(in_range && ((target & 3) == 0));
1910 #endif
1911  BlockTrampolinePoolScope block_trampoline_pool(this);
1912  GenInstrJump(JAL, (target >> 2) & kImm26Mask);
1913  BlockTrampolinePoolFor(1); // For associated delay slot.
1914 }
1915 
1916 
1917 void Assembler::jalr(Register rs, Register rd) {
1918  DCHECK(rs.code() != rd.code());
1919  BlockTrampolinePoolScope block_trampoline_pool(this);
1920  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1921  BlockTrampolinePoolFor(1); // For associated delay slot.
1922 }
1923 
1924 
1925 void Assembler::jic(Register rt, int16_t offset) {
1926  DCHECK(IsMipsArchVariant(kMips32r6));
1927  GenInstrImmediate(POP66, zero_reg, rt, offset);
1928 }
1929 
1930 
1931 void Assembler::jialc(Register rt, int16_t offset) {
1932  DCHECK(IsMipsArchVariant(kMips32r6));
1933  GenInstrImmediate(POP76, zero_reg, rt, offset);
1934 }
1935 
1936 
1937 // -------Data-processing-instructions---------
1938 
1939 // Arithmetic.
1940 
1941 void Assembler::addu(Register rd, Register rs, Register rt) {
1942  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1943 }
1944 
1945 
1946 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1947  GenInstrImmediate(ADDIU, rs, rd, j);
1948 }
1949 
1950 
1951 void Assembler::subu(Register rd, Register rs, Register rt) {
1952  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1953 }
1954 
1955 
1956 void Assembler::mul(Register rd, Register rs, Register rt) {
1957  if (!IsMipsArchVariant(kMips32r6)) {
1958  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1959  } else {
1960  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1961  }
1962 }
1963 
1964 
1965 void Assembler::mulu(Register rd, Register rs, Register rt) {
1966  DCHECK(IsMipsArchVariant(kMips32r6));
1967  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1968 }
1969 
1970 
1971 void Assembler::muh(Register rd, Register rs, Register rt) {
1972  DCHECK(IsMipsArchVariant(kMips32r6));
1973  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1974 }
1975 
1976 
1977 void Assembler::muhu(Register rd, Register rs, Register rt) {
1978  DCHECK(IsMipsArchVariant(kMips32r6));
1979  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1980 }
1981 
1982 
1983 void Assembler::mod(Register rd, Register rs, Register rt) {
1984  DCHECK(IsMipsArchVariant(kMips32r6));
1985  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1986 }
1987 
1988 
1989 void Assembler::modu(Register rd, Register rs, Register rt) {
1990  DCHECK(IsMipsArchVariant(kMips32r6));
1991  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1992 }
1993 
1994 
1995 void Assembler::mult(Register rs, Register rt) {
1996  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1997 }
1998 
1999 
2000 void Assembler::multu(Register rs, Register rt) {
2001  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
2002 }
2003 
2004 
2005 void Assembler::div(Register rs, Register rt) {
2006  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
2007 }
2008 
2009 
2010 void Assembler::div(Register rd, Register rs, Register rt) {
2011  DCHECK(IsMipsArchVariant(kMips32r6));
2012  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
2013 }
2014 
2015 
2016 void Assembler::divu(Register rs, Register rt) {
2017  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
2018 }
2019 
2020 
2021 void Assembler::divu(Register rd, Register rs, Register rt) {
2022  DCHECK(IsMipsArchVariant(kMips32r6));
2023  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
2024 }
2025 
2026 
2027 // Logical.
2028 
2029 void Assembler::and_(Register rd, Register rs, Register rt) {
2030  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
2031 }
2032 
2033 
2034 void Assembler::andi(Register rt, Register rs, int32_t j) {
2035  DCHECK(is_uint16(j));
2036  GenInstrImmediate(ANDI, rs, rt, j);
2037 }
2038 
2039 
2040 void Assembler::or_(Register rd, Register rs, Register rt) {
2041  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
2042 }
2043 
2044 
2045 void Assembler::ori(Register rt, Register rs, int32_t j) {
2046  DCHECK(is_uint16(j));
2047  GenInstrImmediate(ORI, rs, rt, j);
2048 }
2049 
2050 
2051 void Assembler::xor_(Register rd, Register rs, Register rt) {
2052  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
2053 }
2054 
2055 
2056 void Assembler::xori(Register rt, Register rs, int32_t j) {
2057  DCHECK(is_uint16(j));
2058  GenInstrImmediate(XORI, rs, rt, j);
2059 }
2060 
2061 
2062 void Assembler::nor(Register rd, Register rs, Register rt) {
2063  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
2064 }
2065 
2066 
2067 // Shifts.
2068 void Assembler::sll(Register rd,
2069  Register rt,
2070  uint16_t sa,
2071  bool coming_from_nop) {
2072  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
2073  // generated using the sll instruction. They must be generated using
2074  // nop(int/NopMarkerTypes).
2075  DCHECK(coming_from_nop || !(rd == zero_reg && rt == zero_reg));
2076  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
2077 }
2078 
2079 
2080 void Assembler::sllv(Register rd, Register rt, Register rs) {
2081  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
2082 }
2083 
2084 
2085 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
2086  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
2087 }
2088 
2089 
2090 void Assembler::srlv(Register rd, Register rt, Register rs) {
2091  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
2092 }
2093 
2094 
2095 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
2096  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
2097 }
2098 
2099 
2100 void Assembler::srav(Register rd, Register rt, Register rs) {
2101  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
2102 }
2103 
2104 
2105 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
2106  // Should be called via MacroAssembler::Ror.
2107  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
2108  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2109  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
2110  | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
2111  emit(instr);
2112 }
2113 
2114 
2115 void Assembler::rotrv(Register rd, Register rt, Register rs) {
2116  // Should be called via MacroAssembler::Ror.
2117  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2118  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2119  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
2120  | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
2121  emit(instr);
2122 }
2123 
2124 
2125 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
2126  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2127  DCHECK_LE(sa, 3);
2128  DCHECK(IsMipsArchVariant(kMips32r6));
2129  Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
2130  rd.code() << kRdShift | sa << kSaShift | LSA;
2131  emit(instr);
2132 }
2133 
2134 
2135 // ------------Memory-instructions-------------
2136 
2137 void Assembler::AdjustBaseAndOffset(MemOperand& src,
2138  OffsetAccessType access_type,
2139  int second_access_add_to_offset) {
2140  // This method is used to adjust the base register and offset pair
2141  // for a load/store when the offset doesn't fit into int16_t.
2142  // It is assumed that 'base + offset' is sufficiently aligned for memory
2143  // operands that are machine word in size or smaller. For doubleword-sized
2144  // operands it's assumed that 'base' is a multiple of 8, while 'offset'
2145  // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
2146  // and spilled variables on the stack accessed relative to the stack
2147  // pointer register).
2148  // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
2149 
2150  bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
2151  bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
2152  DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
2153 
2154  // is_int16 must be passed a signed value, hence the static cast below.
2155  if (is_int16(src.offset()) &&
2156  (!two_accesses || is_int16(static_cast<int32_t>(
2157  src.offset() + second_access_add_to_offset)))) {
2158  // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
2159  // value) fits into int16_t.
2160  return;
2161  }
2162  UseScratchRegisterScope temps(this);
2163  Register scratch = temps.Acquire();
2164  DCHECK(src.rm() != scratch); // Must not overwrite the register 'base'
2165  // while loading 'offset'.
2166 
2167 #ifdef DEBUG
2168  // Remember the "(mis)alignment" of 'offset', it will be checked at the end.
2169  uint32_t misalignment = src.offset() & (kDoubleSize - 1);
2170 #endif
2171 
2172  // Do not load the whole 32-bit 'offset' if it can be represented as
2173  // a sum of two 16-bit signed offsets. This can save an instruction or two.
2174  // To simplify matters, only do this for a symmetric range of offsets from
2175  // about -64KB to about +64KB, allowing further addition of 4 when accessing
2176  // 64-bit variables with two 32-bit accesses.
2177  constexpr int32_t kMinOffsetForSimpleAdjustment =
2178  0x7FF8; // Max int16_t that's a multiple of 8.
2179  constexpr int32_t kMaxOffsetForSimpleAdjustment =
2180  2 * kMinOffsetForSimpleAdjustment;
2181  if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
2182  addiu(at, src.rm(), kMinOffsetForSimpleAdjustment);
2183  src.offset_ -= kMinOffsetForSimpleAdjustment;
2184  } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
2185  src.offset() < 0) {
2186  addiu(at, src.rm(), -kMinOffsetForSimpleAdjustment);
2187  src.offset_ += kMinOffsetForSimpleAdjustment;
2188  } else if (IsMipsArchVariant(kMips32r6)) {
2189  // On r6 take advantage of the aui instruction, e.g.:
2190  // aui at, base, offset_high
2191  // lw reg_lo, offset_low(at)
2192  // lw reg_hi, (offset_low+4)(at)
2193  // or when offset_low+4 overflows int16_t:
2194  // aui at, base, offset_high
2195  // addiu at, at, 8
2196  // lw reg_lo, (offset_low-8)(at)
2197  // lw reg_hi, (offset_low-4)(at)
2198  int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
2199  int16_t offset_low = static_cast<uint16_t>(src.offset());
2200  offset_high += (offset_low < 0)
2201  ? 1
2202  : 0; // Account for offset sign extension in load/store.
2203  aui(scratch, src.rm(), static_cast<uint16_t>(offset_high));
2204  if (two_accesses && !is_int16(static_cast<int32_t>(
2205  offset_low + second_access_add_to_offset))) {
2206  // Avoid overflow in the 16-bit offset of the load/store instruction when
2207  // adding 4.
2208  addiu(scratch, scratch, kDoubleSize);
2209  offset_low -= kDoubleSize;
2210  }
2211  src.offset_ = offset_low;
2212  } else {
2213  // Do not load the whole 32-bit 'offset' if it can be represented as
2214  // a sum of three 16-bit signed offsets. This can save an instruction.
2215  // To simplify matters, only do this for a symmetric range of offsets from
2216  // about -96KB to about +96KB, allowing further addition of 4 when accessing
2217  // 64-bit variables with two 32-bit accesses.
2218  constexpr int32_t kMinOffsetForMediumAdjustment =
2219  2 * kMinOffsetForSimpleAdjustment;
2220  constexpr int32_t kMaxOffsetForMediumAdjustment =
2221  3 * kMinOffsetForSimpleAdjustment;
2222  if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
2223  addiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2);
2224  addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
2225  src.offset_ -= kMinOffsetForMediumAdjustment;
2226  } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
2227  src.offset() < 0) {
2228  addiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2);
2229  addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
2230  src.offset_ += kMinOffsetForMediumAdjustment;
2231  } else {
2232  // Now that all shorter options have been exhausted, load the full 32-bit
2233  // offset.
2234  int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
2235  lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
2236  ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset.
2237  addu(scratch, scratch, src.rm());
2238  src.offset_ -= loaded_offset;
2239  }
2240  }
2241  src.rm_ = scratch;
2242 
2243  DCHECK(is_int16(src.offset()));
2244  if (two_accesses) {
2245  DCHECK(is_int16(
2246  static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
2247  }
2248  DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
2249 }
2250 
2251 void Assembler::lb(Register rd, const MemOperand& rs) {
2252  MemOperand source = rs;
2253  AdjustBaseAndOffset(source);
2254  GenInstrImmediate(LB, source.rm(), rd, source.offset());
2255 }
2256 
2257 
2258 void Assembler::lbu(Register rd, const MemOperand& rs) {
2259  MemOperand source = rs;
2260  AdjustBaseAndOffset(source);
2261  GenInstrImmediate(LBU, source.rm(), rd, source.offset());
2262 }
2263 
2264 
2265 void Assembler::lh(Register rd, const MemOperand& rs) {
2266  MemOperand source = rs;
2267  AdjustBaseAndOffset(source);
2268  GenInstrImmediate(LH, source.rm(), rd, source.offset());
2269 }
2270 
2271 
2272 void Assembler::lhu(Register rd, const MemOperand& rs) {
2273  MemOperand source = rs;
2274  AdjustBaseAndOffset(source);
2275  GenInstrImmediate(LHU, source.rm(), rd, source.offset());
2276 }
2277 
2278 
2279 void Assembler::lw(Register rd, const MemOperand& rs) {
2280  MemOperand source = rs;
2281  AdjustBaseAndOffset(source);
2282  GenInstrImmediate(LW, source.rm(), rd, source.offset());
2283 }
2284 
2285 
2286 void Assembler::lwl(Register rd, const MemOperand& rs) {
2287  DCHECK(is_int16(rs.offset_));
2288  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2289  IsMipsArchVariant(kMips32r2));
2290  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
2291 }
2292 
2293 
2294 void Assembler::lwr(Register rd, const MemOperand& rs) {
2295  DCHECK(is_int16(rs.offset_));
2296  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2297  IsMipsArchVariant(kMips32r2));
2298  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2299 }
2300 
2301 
2302 void Assembler::sb(Register rd, const MemOperand& rs) {
2303  MemOperand source = rs;
2304  AdjustBaseAndOffset(source);
2305  GenInstrImmediate(SB, source.rm(), rd, source.offset());
2306 }
2307 
2308 
2309 void Assembler::sh(Register rd, const MemOperand& rs) {
2310  MemOperand source = rs;
2311  AdjustBaseAndOffset(source);
2312  GenInstrImmediate(SH, source.rm(), rd, source.offset());
2313 }
2314 
2315 
2316 void Assembler::sw(Register rd, const MemOperand& rs) {
2317  MemOperand source = rs;
2318  AdjustBaseAndOffset(source);
2319  GenInstrImmediate(SW, source.rm(), rd, source.offset());
2320 }
2321 
2322 
2323 void Assembler::swl(Register rd, const MemOperand& rs) {
2324  DCHECK(is_int16(rs.offset_));
2325  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2326  IsMipsArchVariant(kMips32r2));
2327  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2328 }
2329 
2330 
2331 void Assembler::swr(Register rd, const MemOperand& rs) {
2332  DCHECK(is_int16(rs.offset_));
2333  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2334  IsMipsArchVariant(kMips32r2));
2335  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2336 }
2337 
2338 void Assembler::ll(Register rd, const MemOperand& rs) {
2339  if (IsMipsArchVariant(kMips32r6)) {
2340  DCHECK(is_int9(rs.offset_));
2341  GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LL_R6);
2342  } else {
2343  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2344  IsMipsArchVariant(kMips32r2));
2345  DCHECK(is_int16(rs.offset_));
2346  GenInstrImmediate(LL, rs.rm(), rd, rs.offset_);
2347  }
2348 }
2349 
2350 void Assembler::sc(Register rd, const MemOperand& rs) {
2351  if (IsMipsArchVariant(kMips32r6)) {
2352  DCHECK(is_int9(rs.offset_));
2353  GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SC_R6);
2354  } else {
2355  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2356  IsMipsArchVariant(kMips32r2));
2357  GenInstrImmediate(SC, rs.rm(), rd, rs.offset_);
2358  }
2359 }
2360 
2361 void Assembler::llx(Register rd, const MemOperand& rs) {
2362  DCHECK(IsMipsArchVariant(kMips32r6));
2363  DCHECK(is_int9(rs.offset_));
2364  GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, LL_R6);
2365 }
2366 
2367 void Assembler::scx(Register rd, const MemOperand& rs) {
2368  DCHECK(IsMipsArchVariant(kMips32r6));
2369  DCHECK(is_int9(rs.offset_));
2370  GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, SC_R6);
2371 }
2372 
2373 void Assembler::lui(Register rd, int32_t j) {
2374  DCHECK(is_uint16(j) || is_int16(j));
2375  GenInstrImmediate(LUI, zero_reg, rd, j);
2376 }
2377 
2378 
2379 void Assembler::aui(Register rt, Register rs, int32_t j) {
2380  // This instruction uses same opcode as 'lui'. The difference in encoding is
2381  // 'lui' has zero reg. for rs field.
2382  DCHECK(IsMipsArchVariant(kMips32r6));
2383  DCHECK(rs != zero_reg);
2384  DCHECK(is_uint16(j));
2385  GenInstrImmediate(LUI, rs, rt, j);
2386 }
2387 
2388 // ---------PC-Relative instructions-----------
2389 
2390 void Assembler::addiupc(Register rs, int32_t imm19) {
2391  DCHECK(IsMipsArchVariant(kMips32r6));
2392  DCHECK(rs.is_valid() && is_int19(imm19));
2393  uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2394  GenInstrImmediate(PCREL, rs, imm21);
2395 }
2396 
2397 
2398 void Assembler::lwpc(Register rs, int32_t offset19) {
2399  DCHECK(IsMipsArchVariant(kMips32r6));
2400  DCHECK(rs.is_valid() && is_int19(offset19));
2401  uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2402  GenInstrImmediate(PCREL, rs, imm21);
2403 }
2404 
2405 
2406 void Assembler::auipc(Register rs, int16_t imm16) {
2407  DCHECK(IsMipsArchVariant(kMips32r6));
2408  DCHECK(rs.is_valid());
2409  uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2410  GenInstrImmediate(PCREL, rs, imm21);
2411 }
2412 
2413 
2414 void Assembler::aluipc(Register rs, int16_t imm16) {
2415  DCHECK(IsMipsArchVariant(kMips32r6));
2416  DCHECK(rs.is_valid());
2417  uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2418  GenInstrImmediate(PCREL, rs, imm21);
2419 }
2420 
2421 
2422 // -------------Misc-instructions--------------
2423 
2424 // Break / Trap instructions.
2425 void Assembler::break_(uint32_t code, bool break_as_stop) {
2426  DCHECK_EQ(code & ~0xFFFFF, 0);
2427  // We need to invalidate breaks that could be stops as well because the
2428  // simulator expects a char pointer after the stop instruction.
2429  // See constants-mips.h for explanation.
2430  DCHECK((break_as_stop &&
2431  code <= kMaxStopCode &&
2432  code > kMaxWatchpointCode) ||
2433  (!break_as_stop &&
2434  (code > kMaxStopCode ||
2435  code <= kMaxWatchpointCode)));
2436  Instr break_instr = SPECIAL | BREAK | (code << 6);
2437  emit(break_instr);
2438 }
2439 
2440 
2441 void Assembler::stop(const char* msg, uint32_t code) {
2442  DCHECK_GT(code, kMaxWatchpointCode);
2443  DCHECK_LE(code, kMaxStopCode);
2444 #if V8_HOST_ARCH_MIPS
2445  break_(0x54321);
2446 #else // V8_HOST_ARCH_MIPS
2447  break_(code, true);
2448 #endif
2449 }
2450 
2451 
2452 void Assembler::tge(Register rs, Register rt, uint16_t code) {
2453  DCHECK(is_uint10(code));
2454  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2455  | rt.code() << kRtShift | code << 6;
2456  emit(instr);
2457 }
2458 
2459 
2460 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2461  DCHECK(is_uint10(code));
2462  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2463  | rt.code() << kRtShift | code << 6;
2464  emit(instr);
2465 }
2466 
2467 
2468 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2469  DCHECK(is_uint10(code));
2470  Instr instr =
2471  SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2472  emit(instr);
2473 }
2474 
2475 
2476 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2477  DCHECK(is_uint10(code));
2478  Instr instr =
2479  SPECIAL | TLTU | rs.code() << kRsShift
2480  | rt.code() << kRtShift | code << 6;
2481  emit(instr);
2482 }
2483 
2484 
2485 void Assembler::teq(Register rs, Register rt, uint16_t code) {
2486  DCHECK(is_uint10(code));
2487  Instr instr =
2488  SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2489  emit(instr);
2490 }
2491 
2492 
2493 void Assembler::tne(Register rs, Register rt, uint16_t code) {
2494  DCHECK(is_uint10(code));
2495  Instr instr =
2496  SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2497  emit(instr);
2498 }
2499 
2500 void Assembler::sync() {
2501  Instr sync_instr = SPECIAL | SYNC;
2502  emit(sync_instr);
2503 }
2504 
2505 // Move from HI/LO register.
2506 
2507 void Assembler::mfhi(Register rd) {
2508  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2509 }
2510 
2511 
2512 void Assembler::mflo(Register rd) {
2513  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2514 }
2515 
2516 
2517 // Set on less than instructions.
2518 void Assembler::slt(Register rd, Register rs, Register rt) {
2519  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2520 }
2521 
2522 
2523 void Assembler::sltu(Register rd, Register rs, Register rt) {
2524  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2525 }
2526 
2527 
2528 void Assembler::slti(Register rt, Register rs, int32_t j) {
2529  GenInstrImmediate(SLTI, rs, rt, j);
2530 }
2531 
2532 
2533 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2534  GenInstrImmediate(SLTIU, rs, rt, j);
2535 }
2536 
2537 
2538 // Conditional move.
2539 void Assembler::movz(Register rd, Register rs, Register rt) {
2540  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2541 }
2542 
2543 
2544 void Assembler::movn(Register rd, Register rs, Register rt) {
2545  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2546 }
2547 
2548 
2549 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2550  Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
2551  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2552 }
2553 
2554 
2555 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2556  Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
2557  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2558 }
2559 
2560 
2561 void Assembler::seleqz(Register rd, Register rs, Register rt) {
2562  DCHECK(IsMipsArchVariant(kMips32r6));
2563  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2564 }
2565 
2566 
2567 // Bit twiddling.
2568 void Assembler::clz(Register rd, Register rs) {
2569  if (!IsMipsArchVariant(kMips32r6)) {
2570  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2571  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2572  } else {
2573  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2574  }
2575 }
2576 
2577 
2578 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2579  // Should be called via MacroAssembler::Ins.
2580  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2581  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2582  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2583 }
2584 
2585 
2586 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2587  // Should be called via MacroAssembler::Ext.
2588  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2589  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2590  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2591 }
2592 
2593 
2594 void Assembler::bitswap(Register rd, Register rt) {
2595  DCHECK(IsMipsArchVariant(kMips32r6));
2596  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2597 }
2598 
2599 
2600 void Assembler::pref(int32_t hint, const MemOperand& rs) {
2601  DCHECK(!IsMipsArchVariant(kLoongson));
2602  DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2603  Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2604  | (rs.offset_);
2605  emit(instr);
2606 }
2607 
2608 
2609 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2610  DCHECK(IsMipsArchVariant(kMips32r6));
2611  DCHECK(is_uint3(bp));
2612  uint16_t sa = (ALIGN << kBp2Bits) | bp;
2613  GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2614 }
2615 
2616 // Byte swap.
2617 void Assembler::wsbh(Register rd, Register rt) {
2618  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2619  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2620 }
2621 
2622 void Assembler::seh(Register rd, Register rt) {
2623  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2624  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2625 }
2626 
2627 void Assembler::seb(Register rd, Register rt) {
2628  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2629  GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2630 }
2631 
2632 // --------Coprocessor-instructions----------------
2633 
2634 // Load, store, move.
2635 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2636  MemOperand tmp = src;
2637  AdjustBaseAndOffset(tmp);
2638  GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset());
2639 }
2640 
2641 
2642 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2643  MemOperand tmp = src;
2644  AdjustBaseAndOffset(tmp);
2645  GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset());
2646 }
2647 
2648 
2649 void Assembler::mtc1(Register rt, FPURegister fs) {
2650  GenInstrRegister(COP1, MTC1, rt, fs, f0);
2651 }
2652 
2653 
2654 void Assembler::mthc1(Register rt, FPURegister fs) {
2655  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2656 }
2657 
2658 
2659 void Assembler::mfc1(Register rt, FPURegister fs) {
2660  GenInstrRegister(COP1, MFC1, rt, fs, f0);
2661 }
2662 
2663 
2664 void Assembler::mfhc1(Register rt, FPURegister fs) {
2665  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2666 }
2667 
2668 
2669 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2670  GenInstrRegister(COP1, CTC1, rt, fs);
2671 }
2672 
2673 
2674 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2675  GenInstrRegister(COP1, CFC1, rt, fs);
2676 }
2677 
2678 
2679 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2680  DCHECK(!IsMipsArchVariant(kMips32r6));
2681  GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2682 }
2683 
2684 
2685 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2686  DCHECK(!IsMipsArchVariant(kMips32r6));
2687  GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2688 }
2689 
2690 
2691 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2692  FPURegister ft) {
2693  DCHECK(IsMipsArchVariant(kMips32r6));
2694  DCHECK((fmt == D) || (fmt == S));
2695 
2696  GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2697 }
2698 
2699 
2700 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2701  sel(S, fd, fs, ft);
2702 }
2703 
2704 
2705 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2706  sel(D, fd, fs, ft);
2707 }
2708 
2709 
2710 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2711  FPURegister ft) {
2712  DCHECK(IsMipsArchVariant(kMips32r6));
2713  DCHECK((fmt == D) || (fmt == S));
2714  GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2715 }
2716 
2717 
2718 void Assembler::selnez(Register rd, Register rs, Register rt) {
2719  DCHECK(IsMipsArchVariant(kMips32r6));
2720  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2721 }
2722 
2723 
2724 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2725  FPURegister ft) {
2726  DCHECK(IsMipsArchVariant(kMips32r6));
2727  DCHECK((fmt == D) || (fmt == S));
2728  GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2729 }
2730 
2731 
2732 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2733  seleqz(D, fd, fs, ft);
2734 }
2735 
2736 
2737 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2738  seleqz(S, fd, fs, ft);
2739 }
2740 
2741 
2742 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2743  selnez(D, fd, fs, ft);
2744 }
2745 
2746 
2747 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2748  selnez(S, fd, fs, ft);
2749 }
2750 
2751 
2752 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2753  DCHECK(!IsMipsArchVariant(kMips32r6));
2754  GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2755 }
2756 
2757 
2758 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2759  DCHECK(!IsMipsArchVariant(kMips32r6));
2760  GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2761 }
2762 
2763 
2764 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2765  DCHECK(!IsMipsArchVariant(kMips32r6));
2766  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2767  GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2768 }
2769 
2770 
2771 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2772  DCHECK(!IsMipsArchVariant(kMips32r6));
2773  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2774  GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2775 }
2776 
2777 
2778 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2779  DCHECK(!IsMipsArchVariant(kMips32r6));
2780  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2781  GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2782 }
2783 
2784 
2785 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2786  DCHECK(!IsMipsArchVariant(kMips32r6));
2787  FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2788  GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2789 }
2790 
2791 
2792 // Arithmetic.
2793 
2794 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2795  GenInstrRegister(COP1, S, ft, fs, fd, ADD_S);
2796 }
2797 
2798 
2799 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2800  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2801 }
2802 
2803 
2804 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2805  GenInstrRegister(COP1, S, ft, fs, fd, SUB_S);
2806 }
2807 
2808 
2809 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2810  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2811 }
2812 
2813 
2814 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2815  GenInstrRegister(COP1, S, ft, fs, fd, MUL_S);
2816 }
2817 
2818 
2819 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2820  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2821 }
2822 
2823 void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2824  FPURegister ft) {
2825  DCHECK(IsMipsArchVariant(kMips32r2));
2826  GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
2827 }
2828 
2829 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2830  FPURegister ft) {
2831  DCHECK(IsMipsArchVariant(kMips32r2));
2832  GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2833 }
2834 
2835 void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2836  FPURegister ft) {
2837  DCHECK(IsMipsArchVariant(kMips32r2));
2838  GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
2839 }
2840 
2841 void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2842  FPURegister ft) {
2843  DCHECK(IsMipsArchVariant(kMips32r2));
2844  GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
2845 }
2846 
2847 void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2848  DCHECK(IsMipsArchVariant(kMips32r6));
2849  GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
2850 }
2851 
2852 void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2853  DCHECK(IsMipsArchVariant(kMips32r6));
2854  GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
2855 }
2856 
2857 void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2858  DCHECK(IsMipsArchVariant(kMips32r6));
2859  GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
2860 }
2861 
2862 void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2863  DCHECK(IsMipsArchVariant(kMips32r6));
2864  GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
2865 }
2866 
2867 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2868  GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
2869 }
2870 
2871 
2872 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2873  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2874 }
2875 
2876 
2877 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2878  GenInstrRegister(COP1, S, f0, fs, fd, ABS_S);
2879 }
2880 
2881 
2882 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2883  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2884 }
2885 
2886 
2887 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2888  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2889 }
2890 
2891 
2892 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2893  GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2894 }
2895 
2896 
2897 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2898  GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
2899 }
2900 
2901 
2902 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2903  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2904 }
2905 
2906 
2907 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2908  GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S);
2909 }
2910 
2911 
2912 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2913  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2914 }
2915 
2916 
2917 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2918  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2919  GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2920 }
2921 
2922 
2923 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2924  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2925  GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2926 }
2927 
2928 
2929 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2930  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2931  GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2932 }
2933 
2934 
2935 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2936  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2937  GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2938 }
2939 
2940 
2941 // Conversions.
2942 
2943 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2944  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2945 }
2946 
2947 
2948 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2949  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2950 }
2951 
2952 
2953 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2954  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2955 }
2956 
2957 
2958 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2959  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2960 }
2961 
2962 
2963 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2964  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2965 }
2966 
2967 
2968 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2969  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2970 }
2971 
2972 
2973 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2974  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2975 }
2976 
2977 
2978 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2979  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2980 }
2981 
2982 
2983 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2984  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2985 }
2986 
2987 
2988 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2989  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2990 }
2991 
2992 
2993 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2994 
2995 
2996 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2997  DCHECK(IsMipsArchVariant(kMips32r6));
2998  DCHECK((fmt == D) || (fmt == S));
2999  GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
3000 }
3001 
3002 
3003 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
3004 
3005 
3006 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
3007  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3008  IsFp64Mode());
3009  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
3010 }
3011 
3012 
3013 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
3014  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3015  IsFp64Mode());
3016  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
3017 }
3018 
3019 
3020 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
3021  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3022  IsFp64Mode());
3023  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
3024 }
3025 
3026 
3027 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
3028  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3029  IsFp64Mode());
3030  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
3031 }
3032 
3033 
3034 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
3035  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3036  IsFp64Mode());
3037  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
3038 }
3039 
3040 
3041 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
3042  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3043  IsFp64Mode());
3044  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
3045 }
3046 
3047 
3048 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
3049  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3050  IsFp64Mode());
3051  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
3052 }
3053 
3054 
3055 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
3056  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3057  IsFp64Mode());
3058  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
3059 }
3060 
3061 
3062 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
3063  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3064  IsFp64Mode());
3065  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
3066 }
3067 
3068 
3069 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
3070  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3071  IsFp64Mode());
3072  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
3073 }
3074 
3075 
3076 void Assembler::class_s(FPURegister fd, FPURegister fs) {
3077  DCHECK(IsMipsArchVariant(kMips32r6));
3078  GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
3079 }
3080 
3081 
3082 void Assembler::class_d(FPURegister fd, FPURegister fs) {
3083  DCHECK(IsMipsArchVariant(kMips32r6));
3084  GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
3085 }
3086 
3087 
3088 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
3089  FPURegister ft) {
3090  DCHECK(IsMipsArchVariant(kMips32r6));
3091  DCHECK((fmt == D) || (fmt == S));
3092  GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
3093 }
3094 
3095 
3096 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
3097  FPURegister ft) {
3098  DCHECK(IsMipsArchVariant(kMips32r6));
3099  DCHECK((fmt == D) || (fmt == S));
3100  GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
3101 }
3102 
3103 
3104 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
3105  FPURegister ft) {
3106  DCHECK(IsMipsArchVariant(kMips32r6));
3107  DCHECK((fmt == D) || (fmt == S));
3108  GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
3109 }
3110 
3111 
3112 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
3113  FPURegister ft) {
3114  DCHECK(IsMipsArchVariant(kMips32r6));
3115  DCHECK((fmt == D) || (fmt == S));
3116  GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
3117 }
3118 
3119 
3120 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3121  min(S, fd, fs, ft);
3122 }
3123 
3124 
3125 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3126  min(D, fd, fs, ft);
3127 }
3128 
3129 
3130 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3131  max(S, fd, fs, ft);
3132 }
3133 
3134 
3135 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3136  max(D, fd, fs, ft);
3137 }
3138 
3139 
3140 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3141  mina(S, fd, fs, ft);
3142 }
3143 
3144 
3145 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3146  mina(D, fd, fs, ft);
3147 }
3148 
3149 
3150 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
3151  maxa(S, fd, fs, ft);
3152 }
3153 
3154 
3155 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
3156  maxa(D, fd, fs, ft);
3157 }
3158 
3159 
3160 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
3161  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
3162 }
3163 
3164 
3165 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
3166  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3167  IsFp64Mode());
3168  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
3169 }
3170 
3171 
3172 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
3173  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
3174 }
3175 
3176 
3177 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
3178  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
3179 }
3180 
3181 
3182 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
3183  DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
3184  IsFp64Mode());
3185  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
3186 }
3187 
3188 
3189 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
3190  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
3191 }
3192 
3193 
3194 // Conditions for >= MIPSr6.
3195 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
3196  FPURegister fd, FPURegister fs, FPURegister ft) {
3197  DCHECK(IsMipsArchVariant(kMips32r6));
3198  DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3199  Instr instr = COP1 | fmt | ft.code() << kFtShift |
3200  fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
3201  emit(instr);
3202 }
3203 
3204 
3205 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
3206  FPURegister ft) {
3207  cmp(cond, W, fd, fs, ft);
3208 }
3209 
3210 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
3211  FPURegister ft) {
3212  cmp(cond, L, fd, fs, ft);
3213 }
3214 
3215 
3216 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
3217  DCHECK(IsMipsArchVariant(kMips32r6));
3218  BlockTrampolinePoolScope block_trampoline_pool(this);
3219  Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
3220  emit(instr);
3221  BlockTrampolinePoolFor(1); // For associated delay slot.
3222 }
3223 
3224 
3225 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
3226  DCHECK(IsMipsArchVariant(kMips32r6));
3227  BlockTrampolinePoolScope block_trampoline_pool(this);
3228  Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
3229  emit(instr);
3230  BlockTrampolinePoolFor(1); // For associated delay slot.
3231 }
3232 
3233 
3234 // Conditions for < MIPSr6.
3235 void Assembler::c(FPUCondition cond, SecondaryField fmt,
3236  FPURegister fs, FPURegister ft, uint16_t cc) {
3237  DCHECK(is_uint3(cc));
3238  DCHECK(fmt == S || fmt == D);
3239  DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3240  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
3241  | cc << 8 | 3 << 4 | cond;
3242  emit(instr);
3243 }
3244 
3245 
3246 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
3247  uint16_t cc) {
3248  c(cond, S, fs, ft, cc);
3249 }
3250 
3251 
3252 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
3253  uint16_t cc) {
3254  c(cond, D, fs, ft, cc);
3255 }
3256 
3257 
3258 void Assembler::fcmp(FPURegister src1, const double src2,
3259  FPUCondition cond) {
3260  DCHECK_EQ(src2, 0.0);
3261  mtc1(zero_reg, f14);
3262  cvt_d_w(f14, f14);
3263  c(cond, D, src1, f14, 0);
3264 }
3265 
3266 
3267 void Assembler::bc1f(int16_t offset, uint16_t cc) {
3268  BlockTrampolinePoolScope block_trampoline_pool(this);
3269  DCHECK(is_uint3(cc));
3270  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
3271  emit(instr);
3272  BlockTrampolinePoolFor(1); // For associated delay slot.
3273 }
3274 
3275 
3276 void Assembler::bc1t(int16_t offset, uint16_t cc) {
3277  BlockTrampolinePoolScope block_trampoline_pool(this);
3278  DCHECK(is_uint3(cc));
3279  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
3280  emit(instr);
3281  BlockTrampolinePoolFor(1); // For associated delay slot.
3282 }
3283 
3284 // ---------- MSA instructions ------------
3285 #define MSA_BRANCH_LIST(V) \
3286  V(bz_v, BZ_V) \
3287  V(bz_b, BZ_B) \
3288  V(bz_h, BZ_H) \
3289  V(bz_w, BZ_W) \
3290  V(bz_d, BZ_D) \
3291  V(bnz_v, BNZ_V) \
3292  V(bnz_b, BNZ_B) \
3293  V(bnz_h, BNZ_H) \
3294  V(bnz_w, BNZ_W) \
3295  V(bnz_d, BNZ_D)
3296 
3297 #define MSA_BRANCH(name, opcode) \
3298  void Assembler::name(MSARegister wt, int16_t offset) { \
3299  GenInstrMsaBranch(opcode, wt, offset); \
3300  }
3301 
3302 MSA_BRANCH_LIST(MSA_BRANCH)
3303 #undef MSA_BRANCH
3304 #undef MSA_BRANCH_LIST
3305 
3306 #define MSA_LD_ST_LIST(V) \
3307  V(ld_b, LD_B) \
3308  V(ld_h, LD_H) \
3309  V(ld_w, LD_W) \
3310  V(ld_d, LD_D) \
3311  V(st_b, ST_B) \
3312  V(st_h, ST_H) \
3313  V(st_w, ST_W) \
3314  V(st_d, ST_D)
3315 
3316 #define MSA_LD_ST(name, opcode) \
3317  void Assembler::name(MSARegister wd, const MemOperand& rs) { \
3318  MemOperand source = rs; \
3319  AdjustBaseAndOffset(source); \
3320  if (is_int10(source.offset())) { \
3321  GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
3322  } else { \
3323  UseScratchRegisterScope temps(this); \
3324  Register scratch = temps.Acquire(); \
3325  DCHECK(rs.rm() != scratch); \
3326  addiu(scratch, source.rm(), source.offset()); \
3327  GenInstrMsaMI10(opcode, 0, scratch, wd); \
3328  } \
3329  }
3330 
3331 MSA_LD_ST_LIST(MSA_LD_ST)
3332 #undef MSA_LD_ST
3333 #undef MSA_BRANCH_LIST
3334 
3335 #define MSA_I10_LIST(V) \
3336  V(ldi_b, I5_DF_b) \
3337  V(ldi_h, I5_DF_h) \
3338  V(ldi_w, I5_DF_w) \
3339  V(ldi_d, I5_DF_d)
3340 
3341 #define MSA_I10(name, format) \
3342  void Assembler::name(MSARegister wd, int32_t imm10) { \
3343  GenInstrMsaI10(LDI, format, imm10, wd); \
3344  }
3345 MSA_I10_LIST(MSA_I10)
3346 #undef MSA_I10
3347 #undef MSA_I10_LIST
3348 
3349 #define MSA_I5_LIST(V) \
3350  V(addvi, ADDVI) \
3351  V(subvi, SUBVI) \
3352  V(maxi_s, MAXI_S) \
3353  V(maxi_u, MAXI_U) \
3354  V(mini_s, MINI_S) \
3355  V(mini_u, MINI_U) \
3356  V(ceqi, CEQI) \
3357  V(clti_s, CLTI_S) \
3358  V(clti_u, CLTI_U) \
3359  V(clei_s, CLEI_S) \
3360  V(clei_u, CLEI_U)
3361 
3362 #define MSA_I5_FORMAT(name, opcode, format) \
3363  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3364  uint32_t imm5) { \
3365  GenInstrMsaI5(opcode, I5_DF_##format, imm5, ws, wd); \
3366  }
3367 
3368 #define MSA_I5(name, opcode) \
3369  MSA_I5_FORMAT(name, opcode, b) \
3370  MSA_I5_FORMAT(name, opcode, h) \
3371  MSA_I5_FORMAT(name, opcode, w) \
3372  MSA_I5_FORMAT(name, opcode, d)
3373 
3374 MSA_I5_LIST(MSA_I5)
3375 #undef MSA_I5
3376 #undef MSA_I5_FORMAT
3377 #undef MSA_I5_LIST
3378 
3379 #define MSA_I8_LIST(V) \
3380  V(andi_b, ANDI_B) \
3381  V(ori_b, ORI_B) \
3382  V(nori_b, NORI_B) \
3383  V(xori_b, XORI_B) \
3384  V(bmnzi_b, BMNZI_B) \
3385  V(bmzi_b, BMZI_B) \
3386  V(bseli_b, BSELI_B) \
3387  V(shf_b, SHF_B) \
3388  V(shf_h, SHF_H) \
3389  V(shf_w, SHF_W)
3390 
3391 #define MSA_I8(name, opcode) \
3392  void Assembler::name(MSARegister wd, MSARegister ws, uint32_t imm8) { \
3393  GenInstrMsaI8(opcode, imm8, ws, wd); \
3394  }
3395 
3396 MSA_I8_LIST(MSA_I8)
3397 #undef MSA_I8
3398 #undef MSA_I8_LIST
3399 
3400 #define MSA_VEC_LIST(V) \
3401  V(and_v, AND_V) \
3402  V(or_v, OR_V) \
3403  V(nor_v, NOR_V) \
3404  V(xor_v, XOR_V) \
3405  V(bmnz_v, BMNZ_V) \
3406  V(bmz_v, BMZ_V) \
3407  V(bsel_v, BSEL_V)
3408 
3409 #define MSA_VEC(name, opcode) \
3410  void Assembler::name(MSARegister wd, MSARegister ws, MSARegister wt) { \
3411  GenInstrMsaVec(opcode, wt, ws, wd); \
3412  }
3413 
3414 MSA_VEC_LIST(MSA_VEC)
3415 #undef MSA_VEC
3416 #undef MSA_VEC_LIST
3417 
3418 #define MSA_2R_LIST(V) \
3419  V(pcnt, PCNT) \
3420  V(nloc, NLOC) \
3421  V(nlzc, NLZC)
3422 
3423 #define MSA_2R_FORMAT(name, opcode, format) \
3424  void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3425  GenInstrMsa2R(opcode, MSA_2R_DF_##format, ws, wd); \
3426  }
3427 
3428 #define MSA_2R(name, opcode) \
3429  MSA_2R_FORMAT(name, opcode, b) \
3430  MSA_2R_FORMAT(name, opcode, h) \
3431  MSA_2R_FORMAT(name, opcode, w) \
3432  MSA_2R_FORMAT(name, opcode, d)
3433 
3434 MSA_2R_LIST(MSA_2R)
3435 #undef MSA_2R
3436 #undef MSA_2R_FORMAT
3437 #undef MSA_2R_LIST
3438 
3439 #define MSA_FILL(format) \
3440  void Assembler::fill_##format(MSARegister wd, Register rs) { \
3441  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); \
3442  DCHECK(rs.is_valid() && wd.is_valid()); \
3443  Instr instr = MSA | MSA_2R_FORMAT | FILL | MSA_2R_DF_##format | \
3444  (rs.code() << kWsShift) | (wd.code() << kWdShift) | \
3445  MSA_VEC_2R_2RF_MINOR; \
3446  emit(instr); \
3447  }
3448 
3449 MSA_FILL(b)
3450 MSA_FILL(h)
3451 MSA_FILL(w)
3452 #undef MSA_FILL
3453 
3454 #define MSA_2RF_LIST(V) \
3455  V(fclass, FCLASS) \
3456  V(ftrunc_s, FTRUNC_S) \
3457  V(ftrunc_u, FTRUNC_U) \
3458  V(fsqrt, FSQRT) \
3459  V(frsqrt, FRSQRT) \
3460  V(frcp, FRCP) \
3461  V(frint, FRINT) \
3462  V(flog2, FLOG2) \
3463  V(fexupl, FEXUPL) \
3464  V(fexupr, FEXUPR) \
3465  V(ffql, FFQL) \
3466  V(ffqr, FFQR) \
3467  V(ftint_s, FTINT_S) \
3468  V(ftint_u, FTINT_U) \
3469  V(ffint_s, FFINT_S) \
3470  V(ffint_u, FFINT_U)
3471 
3472 #define MSA_2RF_FORMAT(name, opcode, format) \
3473  void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3474  GenInstrMsa2RF(opcode, MSA_2RF_DF_##format, ws, wd); \
3475  }
3476 
3477 #define MSA_2RF(name, opcode) \
3478  MSA_2RF_FORMAT(name, opcode, w) \
3479  MSA_2RF_FORMAT(name, opcode, d)
3480 
3481 MSA_2RF_LIST(MSA_2RF)
3482 #undef MSA_2RF
3483 #undef MSA_2RF_FORMAT
3484 #undef MSA_2RF_LIST
3485 
3486 #define MSA_3R_LIST(V) \
3487  V(sll, SLL_MSA) \
3488  V(sra, SRA_MSA) \
3489  V(srl, SRL_MSA) \
3490  V(bclr, BCLR) \
3491  V(bset, BSET) \
3492  V(bneg, BNEG) \
3493  V(binsl, BINSL) \
3494  V(binsr, BINSR) \
3495  V(addv, ADDV) \
3496  V(subv, SUBV) \
3497  V(max_s, MAX_S) \
3498  V(max_u, MAX_U) \
3499  V(min_s, MIN_S) \
3500  V(min_u, MIN_U) \
3501  V(max_a, MAX_A) \
3502  V(min_a, MIN_A) \
3503  V(ceq, CEQ) \
3504  V(clt_s, CLT_S) \
3505  V(clt_u, CLT_U) \
3506  V(cle_s, CLE_S) \
3507  V(cle_u, CLE_U) \
3508  V(add_a, ADD_A) \
3509  V(adds_a, ADDS_A) \
3510  V(adds_s, ADDS_S) \
3511  V(adds_u, ADDS_U) \
3512  V(ave_s, AVE_S) \
3513  V(ave_u, AVE_U) \
3514  V(aver_s, AVER_S) \
3515  V(aver_u, AVER_U) \
3516  V(subs_s, SUBS_S) \
3517  V(subs_u, SUBS_U) \
3518  V(subsus_u, SUBSUS_U) \
3519  V(subsuu_s, SUBSUU_S) \
3520  V(asub_s, ASUB_S) \
3521  V(asub_u, ASUB_U) \
3522  V(mulv, MULV) \
3523  V(maddv, MADDV) \
3524  V(msubv, MSUBV) \
3525  V(div_s, DIV_S_MSA) \
3526  V(div_u, DIV_U) \
3527  V(mod_s, MOD_S) \
3528  V(mod_u, MOD_U) \
3529  V(dotp_s, DOTP_S) \
3530  V(dotp_u, DOTP_U) \
3531  V(dpadd_s, DPADD_S) \
3532  V(dpadd_u, DPADD_U) \
3533  V(dpsub_s, DPSUB_S) \
3534  V(dpsub_u, DPSUB_U) \
3535  V(pckev, PCKEV) \
3536  V(pckod, PCKOD) \
3537  V(ilvl, ILVL) \
3538  V(ilvr, ILVR) \
3539  V(ilvev, ILVEV) \
3540  V(ilvod, ILVOD) \
3541  V(vshf, VSHF) \
3542  V(srar, SRAR) \
3543  V(srlr, SRLR) \
3544  V(hadd_s, HADD_S) \
3545  V(hadd_u, HADD_U) \
3546  V(hsub_s, HSUB_S) \
3547  V(hsub_u, HSUB_U)
3548 
3549 #define MSA_3R_FORMAT(name, opcode, format) \
3550  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3551  MSARegister wt) { \
3552  GenInstrMsa3R<MSARegister>(opcode, MSA_3R_DF_##format, wt, ws, wd); \
3553  }
3554 
3555 #define MSA_3R_FORMAT_SLD_SPLAT(name, opcode, format) \
3556  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3557  Register rt) { \
3558  GenInstrMsa3R<Register>(opcode, MSA_3R_DF_##format, rt, ws, wd); \
3559  }
3560 
3561 #define MSA_3R(name, opcode) \
3562  MSA_3R_FORMAT(name, opcode, b) \
3563  MSA_3R_FORMAT(name, opcode, h) \
3564  MSA_3R_FORMAT(name, opcode, w) \
3565  MSA_3R_FORMAT(name, opcode, d)
3566 
3567 #define MSA_3R_SLD_SPLAT(name, opcode) \
3568  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, b) \
3569  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, h) \
3570  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, w) \
3571  MSA_3R_FORMAT_SLD_SPLAT(name, opcode, d)
3572 
3573 MSA_3R_LIST(MSA_3R)
3574 MSA_3R_SLD_SPLAT(sld, SLD)
3575 MSA_3R_SLD_SPLAT(splat, SPLAT)
3576 
3577 #undef MSA_3R
3578 #undef MSA_3R_FORMAT
3579 #undef MSA_3R_FORMAT_SLD_SPLAT
3580 #undef MSA_3R_SLD_SPLAT
3581 #undef MSA_3R_LIST
3582 
3583 #define MSA_3RF_LIST1(V) \
3584  V(fcaf, FCAF) \
3585  V(fcun, FCUN) \
3586  V(fceq, FCEQ) \
3587  V(fcueq, FCUEQ) \
3588  V(fclt, FCLT) \
3589  V(fcult, FCULT) \
3590  V(fcle, FCLE) \
3591  V(fcule, FCULE) \
3592  V(fsaf, FSAF) \
3593  V(fsun, FSUN) \
3594  V(fseq, FSEQ) \
3595  V(fsueq, FSUEQ) \
3596  V(fslt, FSLT) \
3597  V(fsult, FSULT) \
3598  V(fsle, FSLE) \
3599  V(fsule, FSULE) \
3600  V(fadd, FADD) \
3601  V(fsub, FSUB) \
3602  V(fmul, FMUL) \
3603  V(fdiv, FDIV) \
3604  V(fmadd, FMADD) \
3605  V(fmsub, FMSUB) \
3606  V(fexp2, FEXP2) \
3607  V(fmin, FMIN) \
3608  V(fmin_a, FMIN_A) \
3609  V(fmax, FMAX) \
3610  V(fmax_a, FMAX_A) \
3611  V(fcor, FCOR) \
3612  V(fcune, FCUNE) \
3613  V(fcne, FCNE) \
3614  V(fsor, FSOR) \
3615  V(fsune, FSUNE) \
3616  V(fsne, FSNE)
3617 
3618 #define MSA_3RF_LIST2(V) \
3619  V(fexdo, FEXDO) \
3620  V(ftq, FTQ) \
3621  V(mul_q, MUL_Q) \
3622  V(madd_q, MADD_Q) \
3623  V(msub_q, MSUB_Q) \
3624  V(mulr_q, MULR_Q) \
3625  V(maddr_q, MADDR_Q) \
3626  V(msubr_q, MSUBR_Q)
3627 
3628 #define MSA_3RF_FORMAT(name, opcode, df, df_c) \
3629  void Assembler::name##_##df(MSARegister wd, MSARegister ws, \
3630  MSARegister wt) { \
3631  GenInstrMsa3RF(opcode, df_c, wt, ws, wd); \
3632  }
3633 
3634 #define MSA_3RF_1(name, opcode) \
3635  MSA_3RF_FORMAT(name, opcode, w, 0) \
3636  MSA_3RF_FORMAT(name, opcode, d, 1)
3637 
3638 #define MSA_3RF_2(name, opcode) \
3639  MSA_3RF_FORMAT(name, opcode, h, 0) \
3640  MSA_3RF_FORMAT(name, opcode, w, 1)
3641 
3642 MSA_3RF_LIST1(MSA_3RF_1)
3643 MSA_3RF_LIST2(MSA_3RF_2)
3644 #undef MSA_3RF_1
3645 #undef MSA_3RF_2
3646 #undef MSA_3RF_FORMAT
3647 #undef MSA_3RF_LIST1
3648 #undef MSA_3RF_LIST2
3649 
3650 void Assembler::sldi_b(MSARegister wd, MSARegister ws, uint32_t n) {
3651  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_B, n, ws, wd);
3652 }
3653 
3654 void Assembler::sldi_h(MSARegister wd, MSARegister ws, uint32_t n) {
3655  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_H, n, ws, wd);
3656 }
3657 
3658 void Assembler::sldi_w(MSARegister wd, MSARegister ws, uint32_t n) {
3659  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_W, n, ws, wd);
3660 }
3661 
3662 void Assembler::sldi_d(MSARegister wd, MSARegister ws, uint32_t n) {
3663  GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_D, n, ws, wd);
3664 }
3665 
3666 void Assembler::splati_b(MSARegister wd, MSARegister ws, uint32_t n) {
3667  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_B, n, ws, wd);
3668 }
3669 
3670 void Assembler::splati_h(MSARegister wd, MSARegister ws, uint32_t n) {
3671  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_H, n, ws, wd);
3672 }
3673 
3674 void Assembler::splati_w(MSARegister wd, MSARegister ws, uint32_t n) {
3675  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_W, n, ws, wd);
3676 }
3677 
3678 void Assembler::splati_d(MSARegister wd, MSARegister ws, uint32_t n) {
3679  GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_D, n, ws, wd);
3680 }
3681 
3682 void Assembler::copy_s_b(Register rd, MSARegister ws, uint32_t n) {
3683  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_B, n, ws, rd);
3684 }
3685 
3686 void Assembler::copy_s_h(Register rd, MSARegister ws, uint32_t n) {
3687  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_H, n, ws, rd);
3688 }
3689 
3690 void Assembler::copy_s_w(Register rd, MSARegister ws, uint32_t n) {
3691  GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_W, n, ws, rd);
3692 }
3693 
3694 void Assembler::copy_u_b(Register rd, MSARegister ws, uint32_t n) {
3695  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_B, n, ws, rd);
3696 }
3697 
3698 void Assembler::copy_u_h(Register rd, MSARegister ws, uint32_t n) {
3699  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_H, n, ws, rd);
3700 }
3701 
3702 void Assembler::copy_u_w(Register rd, MSARegister ws, uint32_t n) {
3703  GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_W, n, ws, rd);
3704 }
3705 
3706 void Assembler::insert_b(MSARegister wd, uint32_t n, Register rs) {
3707  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_B, n, rs, wd);
3708 }
3709 
3710 void Assembler::insert_h(MSARegister wd, uint32_t n, Register rs) {
3711  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_H, n, rs, wd);
3712 }
3713 
3714 void Assembler::insert_w(MSARegister wd, uint32_t n, Register rs) {
3715  GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_W, n, rs, wd);
3716 }
3717 
3718 void Assembler::insve_b(MSARegister wd, uint32_t n, MSARegister ws) {
3719  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_B, n, ws, wd);
3720 }
3721 
3722 void Assembler::insve_h(MSARegister wd, uint32_t n, MSARegister ws) {
3723  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_H, n, ws, wd);
3724 }
3725 
3726 void Assembler::insve_w(MSARegister wd, uint32_t n, MSARegister ws) {
3727  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_W, n, ws, wd);
3728 }
3729 
3730 void Assembler::insve_d(MSARegister wd, uint32_t n, MSARegister ws) {
3731  GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_D, n, ws, wd);
3732 }
3733 
3734 void Assembler::move_v(MSARegister wd, MSARegister ws) {
3735  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3736  DCHECK(ws.is_valid() && wd.is_valid());
3737  Instr instr = MSA | MOVE_V | (ws.code() << kWsShift) |
3738  (wd.code() << kWdShift) | MSA_ELM_MINOR;
3739  emit(instr);
3740 }
3741 
3742 void Assembler::ctcmsa(MSAControlRegister cd, Register rs) {
3743  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3744  DCHECK(cd.is_valid() && rs.is_valid());
3745  Instr instr = MSA | CTCMSA | (rs.code() << kWsShift) |
3746  (cd.code() << kWdShift) | MSA_ELM_MINOR;
3747  emit(instr);
3748 }
3749 
3750 void Assembler::cfcmsa(Register rd, MSAControlRegister cs) {
3751  DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD));
3752  DCHECK(rd.is_valid() && cs.is_valid());
3753  Instr instr = MSA | CFCMSA | (cs.code() << kWsShift) |
3754  (rd.code() << kWdShift) | MSA_ELM_MINOR;
3755  emit(instr);
3756 }
3757 
3758 #define MSA_BIT_LIST(V) \
3759  V(slli, SLLI) \
3760  V(srai, SRAI) \
3761  V(srli, SRLI) \
3762  V(bclri, BCLRI) \
3763  V(bseti, BSETI) \
3764  V(bnegi, BNEGI) \
3765  V(binsli, BINSLI) \
3766  V(binsri, BINSRI) \
3767  V(sat_s, SAT_S) \
3768  V(sat_u, SAT_U) \
3769  V(srari, SRARI) \
3770  V(srlri, SRLRI)
3771 
3772 #define MSA_BIT_FORMAT(name, opcode, format) \
3773  void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3774  uint32_t m) { \
3775  GenInstrMsaBit(opcode, BIT_DF_##format, m, ws, wd); \
3776  }
3777 
3778 #define MSA_BIT(name, opcode) \
3779  MSA_BIT_FORMAT(name, opcode, b) \
3780  MSA_BIT_FORMAT(name, opcode, h) \
3781  MSA_BIT_FORMAT(name, opcode, w) \
3782  MSA_BIT_FORMAT(name, opcode, d)
3783 
3784 MSA_BIT_LIST(MSA_BIT)
3785 #undef MSA_BIT
3786 #undef MSA_BIT_FORMAT
3787 #undef MSA_BIT_LIST
3788 
3789 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
3790  intptr_t pc_delta) {
3791  Instr instr = instr_at(pc);
3792 
3793  if (RelocInfo::IsInternalReference(rmode)) {
3794  int32_t* p = reinterpret_cast<int32_t*>(pc);
3795  if (*p == 0) {
3796  return 0; // Number of instructions patched.
3797  }
3798  *p += pc_delta;
3799  return 1; // Number of instructions patched.
3800  } else {
3801  DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
3802  if (IsLui(instr)) {
3803  Instr instr1 = instr_at(pc + 0 * kInstrSize);
3804  Instr instr2 = instr_at(pc + 1 * kInstrSize);
3805  DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
3806  int32_t imm;
3807  if (IsJicOrJialc(instr2)) {
3808  imm = CreateTargetAddress(instr1, instr2);
3809  } else {
3810  imm = GetLuiOriImmediate(instr1, instr2);
3811  }
3812 
3813  if (imm == kEndOfJumpChain) {
3814  return 0; // Number of instructions patched.
3815  }
3816  imm += pc_delta;
3817  DCHECK_EQ(imm & 3, 0);
3818  instr1 &= ~kImm16Mask;
3819  instr2 &= ~kImm16Mask;
3820 
3821  if (IsJicOrJialc(instr2)) {
3822  uint32_t lui_offset_u, jic_offset_u;
3823  Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
3824  instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
3825  instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
3826  } else {
3827  PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2,
3828  1 * kInstrSize);
3829  }
3830  return 2; // Number of instructions patched.
3831  } else {
3832  UNREACHABLE();
3833  }
3834  }
3835 }
3836 
3837 void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
3838  intptr_t pc_delta) {
3839  Instr instr = instr_at(pc);
3840 
3841  DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
3842  if (IsLui(instr)) {
3843  Instr instr1 = instr_at(pc + 0 * kInstrSize);
3844  Instr instr2 = instr_at(pc + 1 * kInstrSize);
3845  Instr instr3 = instr_at(pc + 2 * kInstrSize);
3846  int32_t imm;
3847  Address ori_offset;
3848  if (IsNal(instr2)) {
3849  instr2 = instr3;
3850  ori_offset = 2 * kInstrSize;
3851  } else {
3852  ori_offset = 1 * kInstrSize;
3853  }
3854  DCHECK(IsOri(instr2));
3855  imm = GetLuiOriImmediate(instr1, instr2);
3856  instr1 &= ~kImm16Mask;
3857  instr2 &= ~kImm16Mask;
3858 
3859  if (imm == kEndOfJumpChain) {
3860  return;
3861  }
3862  imm += pc_delta;
3863  DCHECK_EQ(imm & 3, 0);
3864  PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2, ori_offset);
3865  return;
3866  } else {
3867  UNREACHABLE();
3868  }
3869 }
3870 
3871 void Assembler::GrowBuffer() {
3872  if (!own_buffer_) FATAL("external code buffer is too small");
3873 
3874  // Compute new buffer size.
3875  CodeDesc desc; // the new buffer
3876  if (buffer_size_ < 1 * MB) {
3877  desc.buffer_size = 2*buffer_size_;
3878  } else {
3879  desc.buffer_size = buffer_size_ + 1*MB;
3880  }
3881 
3882  // Some internal data structures overflow for very large buffers,
3883  // they must ensure that kMaximalBufferSize is not too large.
3884  if (desc.buffer_size > kMaximalBufferSize) {
3885  V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
3886  }
3887 
3888  // Set up new buffer.
3889  desc.buffer = NewArray<byte>(desc.buffer_size);
3890  desc.origin = this;
3891 
3892  desc.instr_size = pc_offset();
3893  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3894 
3895  // Copy the data.
3896  int pc_delta = desc.buffer - buffer_;
3897  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
3898  MemMove(desc.buffer, buffer_, desc.instr_size);
3899  MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3900  desc.reloc_size);
3901 
3902  // Switch buffers.
3903  DeleteArray(buffer_);
3904  buffer_ = desc.buffer;
3905  buffer_size_ = desc.buffer_size;
3906  pc_ += pc_delta;
3907  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3908  reloc_info_writer.last_pc() + pc_delta);
3909 
3910  // Relocate runtime entries.
3911  for (RelocIterator it(desc); !it.done(); it.next()) {
3912  RelocInfo::Mode rmode = it.rinfo()->rmode();
3913  if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
3914  rmode == RelocInfo::INTERNAL_REFERENCE) {
3915  RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
3916  }
3917  }
3918  DCHECK(!overflow());
3919 }
3920 
3921 
3922 void Assembler::db(uint8_t data) {
3923  CheckForEmitInForbiddenSlot();
3924  EmitHelper(data);
3925 }
3926 
3927 
3928 void Assembler::dd(uint32_t data) {
3929  CheckForEmitInForbiddenSlot();
3930  EmitHelper(data);
3931 }
3932 
3933 
3934 void Assembler::dq(uint64_t data) {
3935  CheckForEmitInForbiddenSlot();
3936  EmitHelper(data);
3937 }
3938 
3939 
3940 void Assembler::dd(Label* label) {
3941  uint32_t data;
3942  CheckForEmitInForbiddenSlot();
3943  if (label->is_bound()) {
3944  data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
3945  } else {
3946  data = jump_address(label);
3947  unbound_labels_count_++;
3948  internal_reference_positions_.insert(label->pos());
3949  }
3950  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3951  EmitHelper(data);
3952 }
3953 
3954 
3955 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3956  if (!ShouldRecordRelocInfo(rmode)) return;
3957  // We do not try to reuse pool constants.
3958  RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
3959  DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
3960  reloc_info_writer.Write(&rinfo);
3961 }
3962 
3963 void Assembler::BlockTrampolinePoolFor(int instructions) {
3964  CheckTrampolinePoolQuick(instructions);
3965  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3966 }
3967 
3968 
3969 void Assembler::CheckTrampolinePool() {
3970  // Some small sequences of instructions must not be broken up by the
3971  // insertion of a trampoline pool; such sequences are protected by setting
3972  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3973  // which are both checked here. Also, recursive calls to CheckTrampolinePool
3974  // are blocked by trampoline_pool_blocked_nesting_.
3975  if ((trampoline_pool_blocked_nesting_ > 0) ||
3976  (pc_offset() < no_trampoline_pool_before_)) {
3977  // Emission is currently blocked; make sure we try again as soon as
3978  // possible.
3979  if (trampoline_pool_blocked_nesting_ > 0) {
3980  next_buffer_check_ = pc_offset() + kInstrSize;
3981  } else {
3982  next_buffer_check_ = no_trampoline_pool_before_;
3983  }
3984  return;
3985  }
3986 
3987  DCHECK(!trampoline_emitted_);
3988  DCHECK_GE(unbound_labels_count_, 0);
3989  if (unbound_labels_count_ > 0) {
3990  // First we emit jump (2 instructions), then we emit trampoline pool.
3991  { BlockTrampolinePoolScope block_trampoline_pool(this);
3992  Label after_pool;
3993  if (IsMipsArchVariant(kMips32r6)) {
3994  bc(&after_pool);
3995  } else {
3996  b(&after_pool);
3997  }
3998  nop();
3999 
4000  int pool_start = pc_offset();
4001  for (int i = 0; i < unbound_labels_count_; i++) {
4002  {
4003  if (IsMipsArchVariant(kMips32r6)) {
4004  bc(&after_pool);
4005  nop();
4006  } else {
4007  GenPCRelativeJump(t8, t9, 0, RelocInfo::NONE,
4008  BranchDelaySlot::PROTECT);
4009  }
4010  }
4011  }
4012  bind(&after_pool);
4013  trampoline_ = Trampoline(pool_start, unbound_labels_count_);
4014 
4015  trampoline_emitted_ = true;
4016  // As we are only going to emit trampoline once, we need to prevent any
4017  // further emission.
4018  next_buffer_check_ = kMaxInt;
4019  }
4020  } else {
4021  // Number of branches to unbound label at this point is zero, so we can
4022  // move next buffer check to maximum.
4023  next_buffer_check_ = pc_offset() +
4024  kMaxBranchOffset - kTrampolineSlotsSize * 16;
4025  }
4026  return;
4027 }
4028 
4029 
4030 Address Assembler::target_address_at(Address pc) {
4031  Instr instr1 = instr_at(pc);
4032  Instr instr2 = instr_at(pc + kInstrSize);
4033  Instr instr3 = instr_at(pc + 2 * kInstrSize);
4034  // Interpret 2 instructions generated by li (lui/ori) or optimized pairs
4035  // lui/jic, aui/jic or lui/jialc.
4036  if (IsLui(instr1)) {
4037  if (IsOri(instr2)) {
4038  Address target_address;
4039  // Assemble the 32 bit value.
4040  target_address = GetLuiOriImmediate(instr1, instr2);
4041  if (IsAddu(instr3, t9, ra, t9)) {
4042  target_address += pc + kRelativeJumpForBuiltinsOffset;
4043  }
4044  return target_address;
4045  } else if (IsJicOrJialc(instr2)) {
4046  // Assemble the 32 bit value.
4047  return static_cast<Address>(CreateTargetAddress(instr1, instr2));
4048  } else if (IsNal(instr2)) {
4049  DCHECK(IsOri(instr3));
4050  Address target_address;
4051  target_address = GetLuiOriImmediate(instr1, instr3);
4052  return target_address + pc + kRelativeCallForBuiltinsOffset;
4053  }
4054  }
4055 
4056  // We should never get here, force a bad address if we do.
4057  UNREACHABLE();
4058 }
4059 
4060 
4061 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
4062 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
4063 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
4064 // OS::nan_value() returns a qNaN.
4065 void Assembler::QuietNaN(HeapObject* object) {
4066  HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
4067 }
4068 
4069 
4070 // On Mips, a target address is stored in a lui/ori instruction pair, each
4071 // of which load 16 bits of the 32-bit address to a register.
4072 // Patching the address must replace both instr, and flush the i-cache.
4073 // On r6, target address is stored in a lui/jic pair, and both instr have to be
4074 // patched.
4075 void Assembler::set_target_value_at(Address pc, uint32_t target,
4076  ICacheFlushMode icache_flush_mode) {
4077  Instr instr1 = instr_at(pc);
4078  Instr instr2 = instr_at(pc + kInstrSize);
4079 
4080 #ifdef DEBUG
4081  // Check we have the result from a li macro-instruction, using instr pair.
4082  DCHECK(IsLui(instr1) &&
4083  (IsOri(instr2) || IsJicOrJialc(instr2) || IsNal(instr2)));
4084 #endif
4085 
4086  if (IsJicOrJialc(instr2)) {
4087  // Must use 2 instructions to insure patchable code => use lui and jic
4088  uint32_t lui_offset, jic_offset;
4089  Assembler::UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
4090 
4091  instr1 &= ~kImm16Mask;
4092  instr2 &= ~kImm16Mask;
4093 
4094  instr1 |= lui_offset;
4095  instr2 |= jic_offset;
4096 
4097  instr_at_put(pc, instr1);
4098  instr_at_put(pc + kInstrSize, instr2);
4099  } else {
4100  Instr instr3 = instr_at(pc + 2 * kInstrSize);
4101  // If we are using relative calls/jumps for builtins.
4102  if (IsNal(instr2)) {
4103  target -= pc + kRelativeCallForBuiltinsOffset;
4104  }
4105  if (IsAddu(instr3, t9, ra, t9)) {
4106  target -= pc + kRelativeJumpForBuiltinsOffset;
4107  }
4108  // Must use 2 instructions to insure patchable code => just use lui and ori.
4109  // lui rt, upper-16.
4110  // ori rt rt, lower-16.
4111  if (IsNal(instr2)) {
4112  instr1 &= ~kImm16Mask;
4113  instr3 &= ~kImm16Mask;
4114  PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr3,
4115  2 * kInstrSize);
4116  } else {
4117  instr1 &= ~kImm16Mask;
4118  instr2 &= ~kImm16Mask;
4119  PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr2,
4120  1 * kInstrSize);
4121  }
4122  }
4123 
4124  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
4125  Assembler::FlushICache(pc, 2 * sizeof(int32_t));
4126  }
4127 }
4128 
4129 void Assembler::GenPCRelativeJump(Register tf, Register ts, int32_t imm32,
4130  RelocInfo::Mode rmode,
4131  BranchDelaySlot bdslot) {
4132  // Order of these instructions is relied upon when patching them
4133  // or when changing imm32 that lui/ori pair loads.
4134  or_(tf, ra, zero_reg);
4135  nal(); // Relative place of nal instruction determines kLongBranchPCOffset.
4136  if (!RelocInfo::IsNone(rmode)) {
4137  RecordRelocInfo(rmode);
4138  }
4139  lui(ts, (imm32 & kHiMask) >> kLuiShift);
4140  ori(ts, ts, (imm32 & kImm16Mask));
4141  addu(ts, ra, ts);
4142  if (bdslot == USE_DELAY_SLOT) {
4143  or_(ra, tf, zero_reg);
4144  }
4145  jr(ts);
4146  if (bdslot == PROTECT) {
4147  or_(ra, tf, zero_reg);
4148  }
4149 }
4150 
4151 void Assembler::GenPCRelativeJumpAndLink(Register t, int32_t imm32,
4152  RelocInfo::Mode rmode,
4153  BranchDelaySlot bdslot) {
4154  if (!RelocInfo::IsNone(rmode)) {
4155  RecordRelocInfo(rmode);
4156  }
4157  // Order of these instructions is relied upon when patching them
4158  // or when changing imm32 that lui/ori pair loads.
4159  lui(t, (imm32 & kHiMask) >> kLuiShift);
4160  nal(); // Relative place of nal instruction determines kLongBranchPCOffset.
4161  ori(t, t, (imm32 & kImm16Mask));
4162  addu(t, ra, t);
4163  jalr(t);
4164  if (bdslot == PROTECT) nop();
4165 }
4166 
4167 UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
4168  : available_(assembler->GetScratchRegisterList()),
4169  old_available_(*available_) {}
4170 
4171 UseScratchRegisterScope::~UseScratchRegisterScope() {
4172  *available_ = old_available_;
4173 }
4174 
4175 Register UseScratchRegisterScope::Acquire() {
4176  DCHECK_NOT_NULL(available_);
4177  DCHECK_NE(*available_, 0);
4178  int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
4179  *available_ &= ~(1UL << index);
4180 
4181  return Register::from_code(index);
4182 }
4183 
4184 bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
4185 
4186 } // namespace internal
4187 } // namespace v8
4188 
4189 #endif // V8_TARGET_ARCH_MIPS
Definition: libplatform.h:13