V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Pages
assembler-s390.cc
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2014 the V8 project authors. All rights reserved.
36 
37 #include "src/s390/assembler-s390.h"
38 #include <sys/auxv.h>
39 #include <set>
40 #include <string>
41 
42 #if V8_TARGET_ARCH_S390
43 
44 #if V8_HOST_ARCH_S390
45 #include <elf.h> // Required for auxv checks for STFLE support
46 #endif
47 
48 #include "src/base/bits.h"
49 #include "src/base/cpu.h"
50 #include "src/code-stubs.h"
51 #include "src/deoptimizer.h"
52 #include "src/macro-assembler.h"
53 #include "src/s390/assembler-s390-inl.h"
54 #include "src/string-constants.h"
55 
56 namespace v8 {
57 namespace internal {
58 
59 // Get the CPU features enabled by the build.
60 static unsigned CpuFeaturesImpliedByCompiler() {
61  unsigned answer = 0;
62  return answer;
63 }
64 
65 static bool supportsCPUFeature(const char* feature) {
66  static std::set<std::string>& features = *new std::set<std::string>();
67  static std::set<std::string>& all_available_features =
68  *new std::set<std::string>({"iesan3", "zarch", "stfle", "msa", "ldisp",
69  "eimm", "dfp", "etf3eh", "highgprs", "te",
70  "vx"});
71  if (features.empty()) {
72 #if V8_HOST_ARCH_S390
73 
74 #ifndef HWCAP_S390_VX
75 #define HWCAP_S390_VX 2048
76 #endif
77 #define CHECK_AVAILABILITY_FOR(mask, value) \
78  if (f & mask) features.insert(value);
79 
80  // initialize feature vector
81  uint64_t f = getauxval(AT_HWCAP);
82  CHECK_AVAILABILITY_FOR(HWCAP_S390_ESAN3, "iesan3")
83  CHECK_AVAILABILITY_FOR(HWCAP_S390_ZARCH, "zarch")
84  CHECK_AVAILABILITY_FOR(HWCAP_S390_STFLE, "stfle")
85  CHECK_AVAILABILITY_FOR(HWCAP_S390_MSA, "msa")
86  CHECK_AVAILABILITY_FOR(HWCAP_S390_LDISP, "ldisp")
87  CHECK_AVAILABILITY_FOR(HWCAP_S390_EIMM, "eimm")
88  CHECK_AVAILABILITY_FOR(HWCAP_S390_DFP, "dfp")
89  CHECK_AVAILABILITY_FOR(HWCAP_S390_ETF3EH, "etf3eh")
90  CHECK_AVAILABILITY_FOR(HWCAP_S390_HIGH_GPRS, "highgprs")
91  CHECK_AVAILABILITY_FOR(HWCAP_S390_TE, "te")
92  CHECK_AVAILABILITY_FOR(HWCAP_S390_VX, "vx")
93 #else
94  // import all features
95  features.insert(all_available_features.begin(),
96  all_available_features.end());
97 #endif
98  }
99  USE(all_available_features);
100  return features.find(feature) != features.end();
101 }
102 
103 // Check whether Store Facility STFLE instruction is available on the platform.
104 // Instruction returns a bit vector of the enabled hardware facilities.
105 static bool supportsSTFLE() {
106 #if V8_HOST_ARCH_S390
107  static bool read_tried = false;
108  static uint32_t auxv_hwcap = 0;
109 
110  if (!read_tried) {
111  // Open the AUXV (auxiliary vector) pseudo-file
112  int fd = open("/proc/self/auxv", O_RDONLY);
113 
114  read_tried = true;
115  if (fd != -1) {
116 #if V8_TARGET_ARCH_S390X
117  static Elf64_auxv_t buffer[16];
118  Elf64_auxv_t* auxv_element;
119 #else
120  static Elf32_auxv_t buffer[16];
121  Elf32_auxv_t* auxv_element;
122 #endif
123  int bytes_read = 0;
124  while (bytes_read >= 0) {
125  // Read a chunk of the AUXV
126  bytes_read = read(fd, buffer, sizeof(buffer));
127  // Locate and read the platform field of AUXV if it is in the chunk
128  for (auxv_element = buffer;
129  auxv_element + sizeof(auxv_element) <= buffer + bytes_read &&
130  auxv_element->a_type != AT_NULL;
131  auxv_element++) {
132  // We are looking for HWCAP entry in AUXV to search for STFLE support
133  if (auxv_element->a_type == AT_HWCAP) {
134  /* Note: Both auxv_hwcap and buffer are static */
135  auxv_hwcap = auxv_element->a_un.a_val;
136  goto done_reading;
137  }
138  }
139  }
140  done_reading:
141  close(fd);
142  }
143  }
144 
145  // Did not find result
146  if (0 == auxv_hwcap) {
147  return false;
148  }
149 
150  // HWCAP_S390_STFLE is defined to be 4 in include/asm/elf.h. Currently
151  // hardcoded in case that include file does not exist.
152  const uint32_t _HWCAP_S390_STFLE = 4;
153  return (auxv_hwcap & _HWCAP_S390_STFLE);
154 #else
155  // STFLE is not available on non-s390 hosts
156  return false;
157 #endif
158 }
159 
160 void CpuFeatures::ProbeImpl(bool cross_compile) {
161  supported_ |= CpuFeaturesImpliedByCompiler();
162  icache_line_size_ = 256;
163 
164  // Only use statically determined features for cross compile (snapshot).
165  if (cross_compile) return;
166 
167 #ifdef DEBUG
168  initialized_ = true;
169 #endif
170 
171  static bool performSTFLE = supportsSTFLE();
172 
173 // Need to define host, as we are generating inlined S390 assembly to test
174 // for facilities.
175 #if V8_HOST_ARCH_S390
176  if (performSTFLE) {
177  // STFLE D(B) requires:
178  // GPR0 to specify # of double words to update minus 1.
179  // i.e. GPR0 = 0 for 1 doubleword
180  // D(B) to specify to memory location to store the facilities bits
181  // The facilities we are checking for are:
182  // Bit 45 - Distinct Operands for instructions like ARK, SRK, etc.
183  // As such, we require only 1 double word
184  int64_t facilities[3] = {0L};
185  int16_t reg0;
186  // LHI sets up GPR0
187  // STFLE is specified as .insn, as opcode is not recognized.
188  // We register the instructions kill r0 (LHI) and the CC (STFLE).
189  asm volatile(
190  "lhi %%r0,2\n"
191  ".insn s,0xb2b00000,%0\n"
192  : "=Q"(facilities), "=r"(reg0)
193  :
194  : "cc", "r0");
195 
196  uint64_t one = static_cast<uint64_t>(1);
197  // Test for Distinct Operands Facility - Bit 45
198  if (facilities[0] & (one << (63 - 45))) {
199  supported_ |= (1u << DISTINCT_OPS);
200  }
201  // Test for General Instruction Extension Facility - Bit 34
202  if (facilities[0] & (one << (63 - 34))) {
203  supported_ |= (1u << GENERAL_INSTR_EXT);
204  }
205  // Test for Floating Point Extension Facility - Bit 37
206  if (facilities[0] & (one << (63 - 37))) {
207  supported_ |= (1u << FLOATING_POINT_EXT);
208  }
209  // Test for Vector Facility - Bit 129
210  if (facilities[2] & (one << (63 - (129 - 128))) &&
211  supportsCPUFeature("vx")) {
212  supported_ |= (1u << VECTOR_FACILITY);
213  }
214  // Test for Miscellaneous Instruction Extension Facility - Bit 58
215  if (facilities[0] & (1lu << (63 - 58))) {
216  supported_ |= (1u << MISC_INSTR_EXT2);
217  }
218  }
219 #else
220  // All distinct ops instructions can be simulated
221  supported_ |= (1u << DISTINCT_OPS);
222  // RISBG can be simulated
223  supported_ |= (1u << GENERAL_INSTR_EXT);
224  supported_ |= (1u << FLOATING_POINT_EXT);
225  supported_ |= (1u << MISC_INSTR_EXT2);
226  USE(performSTFLE); // To avoid assert
227  USE(supportsCPUFeature);
228  supported_ |= (1u << VECTOR_FACILITY);
229 #endif
230  supported_ |= (1u << FPU);
231 }
232 
233 void CpuFeatures::PrintTarget() {
234  const char* s390_arch = nullptr;
235 
236 #if V8_TARGET_ARCH_S390X
237  s390_arch = "s390x";
238 #else
239  s390_arch = "s390";
240 #endif
241 
242  printf("target %s\n", s390_arch);
243 }
244 
245 void CpuFeatures::PrintFeatures() {
246  printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
247  printf("FPU_EXT=%d\n", CpuFeatures::IsSupported(FLOATING_POINT_EXT));
248  printf("GENERAL_INSTR=%d\n", CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
249  printf("DISTINCT_OPS=%d\n", CpuFeatures::IsSupported(DISTINCT_OPS));
250  printf("VECTOR_FACILITY=%d\n", CpuFeatures::IsSupported(VECTOR_FACILITY));
251  printf("MISC_INSTR_EXT2=%d\n", CpuFeatures::IsSupported(MISC_INSTR_EXT2));
252 }
253 
254 Register ToRegister(int num) {
255  DCHECK(num >= 0 && num < kNumRegisters);
256  const Register kRegisters[] = {r0, r1, r2, r3, r4, r5, r6, r7,
257  r8, r9, r10, fp, ip, r13, r14, sp};
258  return kRegisters[num];
259 }
260 
261 // -----------------------------------------------------------------------------
262 // Implementation of RelocInfo
263 
264 const int RelocInfo::kApplyMask =
265  RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
266  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE);
267 
268 bool RelocInfo::IsCodedSpecially() {
269  // The deserializer needs to know whether a pointer is specially
270  // coded. Being specially coded on S390 means that it is an iihf/iilf
271  // instruction sequence, and that is always the case inside code
272  // objects.
273  return true;
274 }
275 
276 bool RelocInfo::IsInConstantPool() { return false; }
277 
278 int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
279  DCHECK(IsRuntimeEntry(rmode_));
280  return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
281 }
282 
283 uint32_t RelocInfo::wasm_call_tag() const {
284  DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
285  return static_cast<uint32_t>(
286  Assembler::target_address_at(pc_, constant_pool_));
287 }
288 
289 // -----------------------------------------------------------------------------
290 // Implementation of Operand and MemOperand
291 // See assembler-s390-inl.h for inlined constructors
292 
293 Operand::Operand(Handle<HeapObject> handle) {
294  AllowHandleDereference using_location;
295  rm_ = no_reg;
296  value_.immediate = static_cast<intptr_t>(handle.address());
297  rmode_ = RelocInfo::EMBEDDED_OBJECT;
298 }
299 
300 Operand Operand::EmbeddedNumber(double value) {
301  int32_t smi;
302  if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
303  Operand result(0, RelocInfo::EMBEDDED_OBJECT);
304  result.is_heap_object_request_ = true;
305  result.value_.heap_object_request = HeapObjectRequest(value);
306  return result;
307 }
308 
309 Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
310  Operand result(0, RelocInfo::EMBEDDED_OBJECT);
311  result.is_heap_object_request_ = true;
312  result.value_.heap_object_request = HeapObjectRequest(str);
313  return result;
314 }
315 
316 MemOperand::MemOperand(Register rn, int32_t offset)
317  : baseRegister(rn), indexRegister(r0), offset_(offset) {}
318 
319 MemOperand::MemOperand(Register rx, Register rb, int32_t offset)
320  : baseRegister(rb), indexRegister(rx), offset_(offset) {}
321 
322 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
323  DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
324  for (auto& request : heap_object_requests_) {
325  Handle<HeapObject> object;
326  Address pc = reinterpret_cast<Address>(buffer_ + request.offset());
327  switch (request.kind()) {
328  case HeapObjectRequest::kHeapNumber: {
329  object =
330  isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
331  set_target_address_at(pc, kNullAddress, object.address(),
332  SKIP_ICACHE_FLUSH);
333  break;
334  }
335  case HeapObjectRequest::kCodeStub: {
336  request.code_stub()->set_isolate(isolate);
337  SixByteInstr instr =
338  Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
339  int index = instr & 0xFFFFFFFF;
340  UpdateCodeTarget(index, request.code_stub()->GetCode());
341  break;
342  }
343  case HeapObjectRequest::kStringConstant: {
344  const StringConstantBase* str = request.string();
345  CHECK_NOT_NULL(str);
346  set_target_address_at(pc, kNullAddress,
347  str->AllocateStringConstant(isolate).address());
348  break;
349  }
350  }
351  }
352 }
353 
354 // -----------------------------------------------------------------------------
355 // Specific instructions, constants, and masks.
356 
357 Assembler::Assembler(const AssemblerOptions& options, void* buffer,
358  int buffer_size)
359  : AssemblerBase(options, buffer, buffer_size) {
360  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
361  ReserveCodeTargetSpace(100);
362  last_bound_pos_ = 0;
363  relocations_.reserve(128);
364 }
365 
366 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
367  EmitRelocations();
368 
369  AllocateAndInstallRequestedHeapObjects(isolate);
370 
371  // Set up code descriptor.
372  desc->buffer = buffer_;
373  desc->buffer_size = buffer_size_;
374  desc->instr_size = pc_offset();
375  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
376  desc->constant_pool_size = 0;
377  desc->origin = this;
378  desc->unwinding_info_size = 0;
379  desc->unwinding_info = nullptr;
380 }
381 
382 void Assembler::Align(int m) {
383  DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
384  while ((pc_offset() & (m - 1)) != 0) {
385  nop(0);
386  }
387 }
388 
389 void Assembler::CodeTargetAlign() { Align(8); }
390 
391 Condition Assembler::GetCondition(Instr instr) {
392  switch (instr & kCondMask) {
393  case BT:
394  return eq;
395  case BF:
396  return ne;
397  default:
398  UNIMPLEMENTED();
399  }
400  return al;
401 }
402 
403 #if V8_TARGET_ARCH_S390X
404 // This code assumes a FIXED_SEQUENCE for 64bit loads (iihf/iilf)
405 bool Assembler::Is64BitLoadIntoIP(SixByteInstr instr1, SixByteInstr instr2) {
406  // Check the instructions are the iihf/iilf load into ip
407  return (((instr1 >> 32) == 0xC0C8) && ((instr2 >> 32) == 0xC0C9));
408 }
409 #else
410 // This code assumes a FIXED_SEQUENCE for 32bit loads (iilf)
411 bool Assembler::Is32BitLoadIntoIP(SixByteInstr instr) {
412  // Check the instruction is an iilf load into ip/r12.
413  return ((instr >> 32) == 0xC0C9);
414 }
415 #endif
416 
417 // Labels refer to positions in the (to be) generated code.
418 // There are bound, linked, and unused labels.
419 //
420 // Bound labels refer to known positions in the already
421 // generated code. pos() is the position the label refers to.
422 //
423 // Linked labels refer to unknown positions in the code
424 // to be generated; pos() is the position of the last
425 // instruction using the label.
426 
427 // The link chain is terminated by a negative code position (must be aligned)
428 const int kEndOfChain = -4;
429 
430 // Returns the target address of the relative instructions, typically
431 // of the form: pos + imm (where immediate is in # of halfwords for
432 // BR* and LARL).
433 int Assembler::target_at(int pos) {
434  SixByteInstr instr = instr_at(pos);
435  // check which type of branch this is 16 or 26 bit offset
436  Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
437 
438  if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
439  int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
440  imm16 <<= 1; // immediate is in # of halfwords
441  if (imm16 == 0) return kEndOfChain;
442  return pos + imm16;
443  } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
444  BRASL == opcode) {
445  int32_t imm32 =
446  static_cast<int32_t>(instr & (static_cast<uint64_t>(0xFFFFFFFF)));
447  if (LLILF != opcode)
448  imm32 <<= 1; // BR* + LARL treat immediate in # of halfwords
449  if (imm32 == 0) return kEndOfChain;
450  return pos + imm32;
451  } else if (BRXHG == opcode) {
452  // offset is in bits 16-31 of 48 bit instruction
453  instr = instr >> 16;
454  int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
455  imm16 <<= 1; // immediate is in # of halfwords
456  if (imm16 == 0) return kEndOfChain;
457  return pos + imm16;
458  }
459 
460  // Unknown condition
461  DCHECK(false);
462  return -1;
463 }
464 
465 // Update the target address of the current relative instruction.
466 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
467  SixByteInstr instr = instr_at(pos);
468  Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
469 
470  if (is_branch != nullptr) {
471  *is_branch = (opcode == BRC || opcode == BRCT || opcode == BRCTG ||
472  opcode == BRCL || opcode == BRASL || opcode == BRXH ||
473  opcode == BRXHG);
474  }
475 
476  if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
477  int16_t imm16 = target_pos - pos;
478  instr &= (~0xFFFF);
479  DCHECK(is_int16(imm16));
480  instr_at_put<FourByteInstr>(pos, instr | (imm16 >> 1));
481  return;
482  } else if (BRCL == opcode || LARL == opcode || BRASL == opcode) {
483  // Immediate is in # of halfwords
484  int32_t imm32 = target_pos - pos;
485  instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
486  instr_at_put<SixByteInstr>(pos, instr | (imm32 >> 1));
487  return;
488  } else if (LLILF == opcode) {
489  DCHECK(target_pos == kEndOfChain || target_pos >= 0);
490  // Emitted label constant, not part of a branch.
491  // Make label relative to Code pointer of generated Code object.
492  int32_t imm32 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
493  instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
494  instr_at_put<SixByteInstr>(pos, instr | imm32);
495  return;
496  } else if (BRXHG == opcode) {
497  // Immediate is in bits 16-31 of 48 bit instruction
498  int32_t imm16 = target_pos - pos;
499  instr &= (0xFFFF0000FFFF); // clear bits 16-31
500  imm16 &= 0xFFFF; // clear high halfword
501  imm16 <<= 16;
502  // Immediate is in # of halfwords
503  instr_at_put<SixByteInstr>(pos, instr | (imm16 >> 1));
504  return;
505  }
506  DCHECK(false);
507 }
508 
509 // Returns the maximum number of bits given instruction can address.
510 int Assembler::max_reach_from(int pos) {
511  Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
512  // Check which type of instr. In theory, we can return
513  // the values below + 1, given offset is # of halfwords
514  if (BRC == opcode || BRCT == opcode || BRCTG == opcode|| BRXH == opcode ||
515  BRXHG == opcode) {
516  return 16;
517  } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
518  BRASL == opcode) {
519  return 31; // Using 31 as workaround instead of 32 as
520  // is_intn(x,32) doesn't work on 32-bit platforms.
521  // llilf: Emitted label constant, not part of
522  // a branch (regexp PushBacktrack).
523  }
524  DCHECK(false);
525  return 16;
526 }
527 
528 void Assembler::bind_to(Label* L, int pos) {
529  DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
530  bool is_branch = false;
531  while (L->is_linked()) {
532  int fixup_pos = L->pos();
533 #ifdef DEBUG
534  int32_t offset = pos - fixup_pos;
535  int maxReach = max_reach_from(fixup_pos);
536 #endif
537  next(L); // call next before overwriting link with target at fixup_pos
538  DCHECK(is_intn(offset, maxReach));
539  target_at_put(fixup_pos, pos, &is_branch);
540  }
541  L->bind_to(pos);
542 
543  // Keep track of the last bound label so we don't eliminate any instructions
544  // before a bound label.
545  if (pos > last_bound_pos_) last_bound_pos_ = pos;
546 }
547 
548 void Assembler::bind(Label* L) {
549  DCHECK(!L->is_bound()); // label can only be bound once
550  bind_to(L, pc_offset());
551 }
552 
553 void Assembler::next(Label* L) {
554  DCHECK(L->is_linked());
555  int link = target_at(L->pos());
556  if (link == kEndOfChain) {
557  L->Unuse();
558  } else {
559  DCHECK_GE(link, 0);
560  L->link_to(link);
561  }
562 }
563 
564 bool Assembler::is_near(Label* L, Condition cond) {
565  DCHECK(L->is_bound());
566  if (L->is_bound() == false) return false;
567 
568  int maxReach = ((cond == al) ? 26 : 16);
569  int offset = L->pos() - pc_offset();
570 
571  return is_intn(offset, maxReach);
572 }
573 
574 int Assembler::link(Label* L) {
575  int position;
576  if (L->is_bound()) {
577  position = L->pos();
578  } else {
579  if (L->is_linked()) {
580  position = L->pos(); // L's link
581  } else {
582  // was: target_pos = kEndOfChain;
583  // However, using self to mark the first reference
584  // should avoid most instances of branch offset overflow. See
585  // target_at() for where this is converted back to kEndOfChain.
586  position = pc_offset();
587  }
588  L->link_to(pc_offset());
589  }
590 
591  return position;
592 }
593 
594 void Assembler::load_label_offset(Register r1, Label* L) {
595  int target_pos;
596  int constant;
597  if (L->is_bound()) {
598  target_pos = L->pos();
599  constant = target_pos + (Code::kHeaderSize - kHeapObjectTag);
600  } else {
601  if (L->is_linked()) {
602  target_pos = L->pos(); // L's link
603  } else {
604  // was: target_pos = kEndOfChain;
605  // However, using branch to self to mark the first reference
606  // should avoid most instances of branch offset overflow. See
607  // target_at() for where this is converted back to kEndOfChain.
608  target_pos = pc_offset();
609  }
610  L->link_to(pc_offset());
611 
612  constant = target_pos - pc_offset();
613  }
614  llilf(r1, Operand(constant));
615 }
616 
617 // Pseudo op - branch on condition
618 void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) {
619  int offset_in_halfwords = branch_offset / 2;
620  if (is_bound && is_int16(offset_in_halfwords)) {
621  brc(c, Operand(offset_in_halfwords)); // short jump
622  } else {
623  brcl(c, Operand(offset_in_halfwords)); // long jump
624  }
625 }
626 
627 // Exception-generating instructions and debugging support.
628 // Stops with a non-negative code less than kNumOfWatchedStops support
629 // enabling/disabling and a counter feature. See simulator-s390.h .
630 void Assembler::stop(const char* msg, Condition cond, int32_t code,
631  CRegister cr) {
632  if (cond != al) {
633  Label skip;
634  b(NegateCondition(cond), &skip, Label::kNear);
635  bkpt(0);
636  bind(&skip);
637  } else {
638  bkpt(0);
639  }
640 }
641 
642 void Assembler::bkpt(uint32_t imm16) {
643  // GDB software breakpoint instruction
644  emit2bytes(0x0001);
645 }
646 
647 // Pseudo instructions.
648 void Assembler::nop(int type) {
649  switch (type) {
650  case 0:
651  lr(r0, r0);
652  break;
653  case DEBUG_BREAK_NOP:
654  // TODO(john.yan): Use a better NOP break
655  oill(r3, Operand::Zero());
656  break;
657  default:
658  UNIMPLEMENTED();
659  }
660 }
661 
662 // -------------------------
663 // Load Address Instructions
664 // -------------------------
665 // Load Address Relative Long
666 void Assembler::larl(Register r1, Label* l) {
667  larl(r1, Operand(branch_offset(l)));
668 }
669 
670 void Assembler::EnsureSpaceFor(int space_needed) {
671  if (buffer_space() <= (kGap + space_needed)) {
672  GrowBuffer(space_needed);
673  }
674 }
675 
676 void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
677  DCHECK(RelocInfo::IsCodeTarget(rmode));
678  EnsureSpace ensure_space(this);
679 
680  RecordRelocInfo(rmode);
681  int32_t target_index = AddCodeTarget(target);
682  brasl(r14, Operand(target_index));
683 }
684 
685 void Assembler::call(CodeStub* stub) {
686  EnsureSpace ensure_space(this);
687  RequestHeapObject(HeapObjectRequest(stub));
688  RecordRelocInfo(RelocInfo::CODE_TARGET);
689  int32_t target_index = AddCodeTarget(Handle<Code>());
690  brasl(r14, Operand(target_index));
691 }
692 
693 void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
694  Condition cond) {
695  DCHECK(RelocInfo::IsCodeTarget(rmode));
696  EnsureSpace ensure_space(this);
697 
698  RecordRelocInfo(rmode);
699  int32_t target_index = AddCodeTarget(target);
700  brcl(cond, Operand(target_index));
701 }
702 
703 // end of S390instructions
704 
705 bool Assembler::IsNop(SixByteInstr instr, int type) {
706  DCHECK((0 == type) || (DEBUG_BREAK_NOP == type));
707  if (DEBUG_BREAK_NOP == type) {
708  return ((instr & 0xFFFFFFFF) == 0xA53B0000); // oill r3, 0
709  }
710  return ((instr & 0xFFFF) == 0x1800); // lr r0,r0
711 }
712 
713 // dummy instruction reserved for special use.
714 void Assembler::dumy(int r1, int x2, int b2, int d2) {
715 #if defined(USE_SIMULATOR)
716  int op = 0xE353;
717  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
718  (static_cast<uint64_t>(r1) & 0xF) * B36 |
719  (static_cast<uint64_t>(x2) & 0xF) * B32 |
720  (static_cast<uint64_t>(b2) & 0xF) * B28 |
721  (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
722  (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
723  (static_cast<uint64_t>(op & 0x00FF));
724  emit6bytes(code);
725 #endif
726 }
727 
728 void Assembler::GrowBuffer(int needed) {
729  if (!own_buffer_) FATAL("external code buffer is too small");
730 
731  // Compute new buffer size.
732  CodeDesc desc; // the new buffer
733  if (buffer_size_ < 4 * KB) {
734  desc.buffer_size = 4 * KB;
735  } else if (buffer_size_ < 1 * MB) {
736  desc.buffer_size = 2 * buffer_size_;
737  } else {
738  desc.buffer_size = buffer_size_ + 1 * MB;
739  }
740  int space = buffer_space() + (desc.buffer_size - buffer_size_);
741  if (space < needed) {
742  desc.buffer_size += needed - space;
743  }
744 
745  // Some internal data structures overflow for very large buffers,
746  // they must ensure that kMaximalBufferSize is not too large.
747  if (desc.buffer_size > kMaximalBufferSize) {
748  V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
749  }
750 
751  // Set up new buffer.
752  desc.buffer = NewArray<byte>(desc.buffer_size);
753  desc.origin = this;
754 
755  desc.instr_size = pc_offset();
756  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
757 
758  // Copy the data.
759  intptr_t pc_delta = desc.buffer - buffer_;
760  intptr_t rc_delta =
761  (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
762  memmove(desc.buffer, buffer_, desc.instr_size);
763  memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
764  desc.reloc_size);
765 
766  // Switch buffers.
767  DeleteArray(buffer_);
768  buffer_ = desc.buffer;
769  buffer_size_ = desc.buffer_size;
770  pc_ += pc_delta;
771  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
772  reloc_info_writer.last_pc() + pc_delta);
773 
774  // None of our relocation types are pc relative pointing outside the code
775  // buffer nor pc absolute pointing inside the code buffer, so there is no need
776  // to relocate any emitted relocation entries.
777 }
778 
779 void Assembler::db(uint8_t data) {
780  CheckBuffer();
781  *reinterpret_cast<uint8_t*>(pc_) = data;
782  pc_ += sizeof(uint8_t);
783 }
784 
785 void Assembler::dd(uint32_t data) {
786  CheckBuffer();
787  *reinterpret_cast<uint32_t*>(pc_) = data;
788  pc_ += sizeof(uint32_t);
789 }
790 
791 void Assembler::dq(uint64_t value) {
792  CheckBuffer();
793  *reinterpret_cast<uint64_t*>(pc_) = value;
794  pc_ += sizeof(uint64_t);
795 }
796 
797 void Assembler::dp(uintptr_t data) {
798  CheckBuffer();
799  *reinterpret_cast<uintptr_t*>(pc_) = data;
800  pc_ += sizeof(uintptr_t);
801 }
802 
803 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
804  if (!ShouldRecordRelocInfo(rmode)) return;
805  DeferredRelocInfo rinfo(pc_offset(), rmode, data);
806  relocations_.push_back(rinfo);
807 }
808 
809 void Assembler::emit_label_addr(Label* label) {
810  CheckBuffer();
811  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
812  int position = link(label);
813  DCHECK(label->is_bound());
814  // Keep internal references relative until EmitRelocations.
815  dp(position);
816 }
817 
818 void Assembler::EmitRelocations() {
819  EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
820 
821  for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
822  it != relocations_.end(); it++) {
823  RelocInfo::Mode rmode = it->rmode();
824  Address pc = reinterpret_cast<Address>(buffer_) + it->position();
825  RelocInfo rinfo(pc, rmode, it->data(), Code());
826 
827  // Fix up internal references now that they are guaranteed to be bound.
828  if (RelocInfo::IsInternalReference(rmode)) {
829  // Jump table entry
830  Address pos = Memory<Address>(pc);
831  Memory<Address>(pc) = reinterpret_cast<Address>(buffer_) + pos;
832  } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
833  // mov sequence
834  Address pos = target_address_at(pc, 0);
835  set_target_address_at(pc, 0, reinterpret_cast<Address>(buffer_) + pos,
836  SKIP_ICACHE_FLUSH);
837  }
838 
839  reloc_info_writer.Write(&rinfo);
840  }
841 }
842 
843 } // namespace internal
844 } // namespace v8
845 #endif // V8_TARGET_ARCH_S390
Definition: libplatform.h:13