5 #include "src/compiler/memory-optimizer.h" 7 #include "src/compiler/js-graph.h" 8 #include "src/compiler/linkage.h" 9 #include "src/compiler/node-matchers.h" 10 #include "src/compiler/node-properties.h" 11 #include "src/compiler/node.h" 12 #include "src/compiler/simplified-operator.h" 18 MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
19 PoisoningMitigationLevel poisoning_level,
20 AllocationFolding allocation_folding)
22 empty_state_(AllocationState::Empty(zone)),
26 graph_assembler_(jsgraph, nullptr, nullptr, zone),
27 poisoning_level_(poisoning_level),
28 allocation_folding_(allocation_folding) {}
30 void MemoryOptimizer::Optimize() {
31 EnqueueUses(graph()->start(), empty_state());
32 while (!tokens_.empty()) {
33 Token
const token = tokens_.front();
35 VisitNode(token.node, token.state);
37 DCHECK(pending_.empty());
38 DCHECK(tokens_.empty());
41 MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
42 PretenureFlag pretenure,
44 : node_ids_(zone), pretenure_(pretenure), size_(nullptr) {
45 node_ids_.insert(node->id());
48 MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
49 PretenureFlag pretenure,
50 Node* size, Zone* zone)
51 : node_ids_(zone), pretenure_(pretenure), size_(size) {
52 node_ids_.insert(node->id());
55 void MemoryOptimizer::AllocationGroup::Add(Node* node) {
56 node_ids_.insert(node->id());
59 bool MemoryOptimizer::AllocationGroup::Contains(Node* node)
const {
60 return node_ids_.find(node->id()) != node_ids_.end();
63 MemoryOptimizer::AllocationState::AllocationState()
64 : group_(nullptr), size_(
std::numeric_limits<
int>::max()), top_(nullptr) {}
66 MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
67 : group_(group), size_(
std::numeric_limits<
int>::max()), top_(nullptr) {}
69 MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
70 intptr_t size, Node* top)
71 : group_(group), size_(size), top_(top) {}
73 bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation()
const {
74 return group() && group()->IsNewSpaceAllocation();
77 void MemoryOptimizer::VisitNode(Node* node, AllocationState
const* state) {
78 DCHECK(!node->IsDead());
79 DCHECK_LT(0, node->op()->EffectInputCount());
80 switch (node->opcode()) {
81 case IrOpcode::kAllocate:
85 case IrOpcode::kAllocateRaw:
86 return VisitAllocateRaw(node, state);
88 return VisitCall(node, state);
89 case IrOpcode::kCallWithCallerSavedRegisters:
90 return VisitCallWithCallerSavedRegisters(node, state);
91 case IrOpcode::kLoadElement:
92 return VisitLoadElement(node, state);
93 case IrOpcode::kLoadField:
94 return VisitLoadField(node, state);
95 case IrOpcode::kStoreElement:
96 return VisitStoreElement(node, state);
97 case IrOpcode::kStoreField:
98 return VisitStoreField(node, state);
99 case IrOpcode::kDeoptimizeIf:
100 case IrOpcode::kDeoptimizeUnless:
101 case IrOpcode::kIfException:
102 case IrOpcode::kLoad:
103 case IrOpcode::kProtectedLoad:
104 case IrOpcode::kUnalignedLoad:
105 case IrOpcode::kStore:
106 case IrOpcode::kProtectedStore:
107 case IrOpcode::kUnalignedStore:
108 case IrOpcode::kRetain:
109 case IrOpcode::kUnsafePointerAdd:
110 case IrOpcode::kDebugBreak:
111 case IrOpcode::kUnreachable:
112 case IrOpcode::kWord32PoisonOnSpeculation:
113 case IrOpcode::kWord64PoisonOnSpeculation:
114 return VisitOtherEffect(node, state);
118 DCHECK_EQ(0, node->op()->EffectOutputCount());
123 void MemoryOptimizer::VisitAllocateRaw(Node* node,
124 AllocationState
const* state) {
125 DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
127 Node* size = node->InputAt(0);
128 Node* effect = node->InputAt(1);
129 Node* control = node->InputAt(2);
131 gasm()->Reset(effect, control);
133 PretenureFlag pretenure = PretenureFlagOf(node->op());
139 if (pretenure == TENURED) {
140 for (Edge
const edge : node->use_edges()) {
141 Node*
const user = edge.from();
142 if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
143 Node*
const child = user->InputAt(1);
144 if (child->opcode() == IrOpcode::kAllocateRaw &&
145 PretenureFlagOf(child->op()) == NOT_TENURED) {
146 NodeProperties::ChangeOp(child, node->op());
152 DCHECK_EQ(NOT_TENURED, pretenure);
153 for (Edge
const edge : node->use_edges()) {
154 Node*
const user = edge.from();
155 if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
156 Node*
const parent = user->InputAt(0);
157 if (parent->opcode() == IrOpcode::kAllocateRaw &&
158 PretenureFlagOf(parent->op()) == TENURED) {
167 Node* top_address = __ ExternalConstant(
168 pretenure == NOT_TENURED
169 ? ExternalReference::new_space_allocation_top_address(isolate())
170 : ExternalReference::old_space_allocation_top_address(isolate()));
171 Node* limit_address = __ ExternalConstant(
172 pretenure == NOT_TENURED
173 ? ExternalReference::new_space_allocation_limit_address(isolate())
174 : ExternalReference::old_space_allocation_limit_address(isolate()));
178 IntPtrMatcher m(size);
179 if (m.IsInRange(0, kMaxRegularHeapObjectSize)) {
180 intptr_t
const object_size = m.Value();
181 if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
182 state->size() <= kMaxRegularHeapObjectSize - object_size &&
183 state->group()->pretenure() == pretenure) {
187 intptr_t
const state_size = state->size() + object_size;
190 AllocationGroup*
const group = state->group();
191 if (machine()->Is64()) {
192 if (OpParameter<int64_t>(group->size()->op()) < state_size) {
193 NodeProperties::ChangeOp(group->size(),
194 common()->Int64Constant(state_size));
197 if (OpParameter<int32_t>(group->size()->op()) < state_size) {
198 NodeProperties::ChangeOp(
200 common()->Int32Constant(static_cast<int32_t>(state_size)));
206 Node* top = __ IntAdd(state->top(), size);
207 __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
209 top_address, __ IntPtrConstant(0), top);
212 value = __ BitcastWordToTagged(
213 __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
217 state = AllocationState::Open(group, state_size, top, zone());
219 auto call_runtime = __ MakeDeferredLabel();
220 auto done = __ MakeLabel(MachineType::PointerRepresentation());
224 Node* size = __ UniqueIntPtrConstant(object_size);
228 __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
230 __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
234 Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
236 __ GotoIfNot(check, &call_runtime);
239 __ Bind(&call_runtime);
242 pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
244 AllocateInOldSpaceStubConstant();
245 if (!allocate_operator_.is_set()) {
246 auto descriptor = AllocateDescriptor{};
247 auto call_descriptor = Linkage::GetStubCallDescriptor(
248 graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
249 CallDescriptor::kCanUseRoots, Operator::kNoThrow);
250 allocate_operator_.set(common()->Call(call_descriptor));
252 Node* vfalse = __ Call(allocate_operator_.get(), target, size);
253 vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
254 __ Goto(&done, vfalse);
260 top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
261 __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
263 top_address, __ IntPtrConstant(0), top);
266 value = __ BitcastWordToTagged(
267 __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
270 AllocationGroup* group =
271 new (zone()) AllocationGroup(value, pretenure, size, zone());
272 state = AllocationState::Open(group, object_size, top, zone());
275 auto call_runtime = __ MakeDeferredLabel();
276 auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
280 __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
282 __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
285 Node* new_top = __ IntAdd(top, size);
288 Node* check = __ UintLessThan(new_top, limit);
289 __ GotoIfNot(check, &call_runtime);
290 __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
292 top_address, __ IntPtrConstant(0), new_top);
293 __ Goto(&done, __ BitcastWordToTagged(
294 __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
296 __ Bind(&call_runtime);
298 pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
300 AllocateInOldSpaceStubConstant();
301 if (!allocate_operator_.is_set()) {
302 auto descriptor = AllocateDescriptor{};
303 auto call_descriptor = Linkage::GetStubCallDescriptor(
304 graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
305 CallDescriptor::kCanUseRoots, Operator::kNoThrow);
306 allocate_operator_.set(common()->Call(call_descriptor));
308 __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
311 value = done.PhiAt(0);
314 AllocationGroup* group =
315 new (zone()) AllocationGroup(value, pretenure, zone());
316 state = AllocationState::Closed(group, zone());
319 effect = __ ExtractCurrentEffect();
320 control = __ ExtractCurrentControl();
325 for (Edge edge : node->use_edges()) {
326 if (NodeProperties::IsEffectEdge(edge)) {
327 EnqueueUse(edge.from(), edge.index(), state);
328 edge.UpdateTo(effect);
329 }
else if (NodeProperties::IsValueEdge(edge)) {
330 edge.UpdateTo(value);
332 DCHECK(NodeProperties::IsControlEdge(edge));
333 edge.UpdateTo(control);
343 void MemoryOptimizer::VisitCall(Node* node, AllocationState
const* state) {
344 DCHECK_EQ(IrOpcode::kCall, node->opcode());
346 if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
347 state = empty_state();
349 EnqueueUses(node, state);
352 void MemoryOptimizer::VisitCallWithCallerSavedRegisters(
353 Node* node, AllocationState
const* state) {
354 DCHECK_EQ(IrOpcode::kCallWithCallerSavedRegisters, node->opcode());
356 if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
357 state = empty_state();
359 EnqueueUses(node, state);
362 void MemoryOptimizer::VisitLoadElement(Node* node,
363 AllocationState
const* state) {
364 DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
365 ElementAccess
const& access = ElementAccessOf(node->op());
366 Node* index = node->InputAt(1);
367 node->ReplaceInput(1, ComputeIndex(access, index));
368 if (NeedsPoisoning(access.load_sensitivity) &&
369 access.machine_type.representation() !=
370 MachineRepresentation::kTaggedPointer) {
371 NodeProperties::ChangeOp(node,
372 machine()->PoisonedLoad(access.machine_type));
374 NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
376 EnqueueUses(node, state);
379 void MemoryOptimizer::VisitLoadField(Node* node, AllocationState
const* state) {
380 DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
381 FieldAccess
const& access = FieldAccessOf(node->op());
382 Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
383 node->InsertInput(graph()->zone(), 1, offset);
384 if (NeedsPoisoning(access.load_sensitivity) &&
385 access.machine_type.representation() !=
386 MachineRepresentation::kTaggedPointer) {
387 NodeProperties::ChangeOp(node,
388 machine()->PoisonedLoad(access.machine_type));
390 NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
392 EnqueueUses(node, state);
395 void MemoryOptimizer::VisitStoreElement(Node* node,
396 AllocationState
const* state) {
397 DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
398 ElementAccess
const& access = ElementAccessOf(node->op());
399 Node*
object = node->InputAt(0);
400 Node* index = node->InputAt(1);
401 WriteBarrierKind write_barrier_kind =
402 ComputeWriteBarrierKind(
object, state, access.write_barrier_kind);
403 node->ReplaceInput(1, ComputeIndex(access, index));
404 NodeProperties::ChangeOp(
405 node, machine()->Store(StoreRepresentation(
406 access.machine_type.representation(), write_barrier_kind)));
407 EnqueueUses(node, state);
410 void MemoryOptimizer::VisitStoreField(Node* node,
411 AllocationState
const* state) {
412 DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
413 FieldAccess
const& access = FieldAccessOf(node->op());
414 Node*
object = node->InputAt(0);
415 WriteBarrierKind write_barrier_kind =
416 ComputeWriteBarrierKind(
object, state, access.write_barrier_kind);
417 Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
418 node->InsertInput(graph()->zone(), 1, offset);
419 NodeProperties::ChangeOp(
420 node, machine()->Store(StoreRepresentation(
421 access.machine_type.representation(), write_barrier_kind)));
422 EnqueueUses(node, state);
425 void MemoryOptimizer::VisitOtherEffect(Node* node,
426 AllocationState
const* state) {
427 EnqueueUses(node, state);
430 Node* MemoryOptimizer::ComputeIndex(ElementAccess
const& access, Node* index) {
431 int const element_size_shift =
432 ElementSizeLog2Of(access.machine_type.representation());
433 if (element_size_shift) {
434 index = graph()->NewNode(machine()->WordShl(), index,
435 jsgraph()->IntPtrConstant(element_size_shift));
437 int const fixed_offset = access.header_size - access.tag();
439 index = graph()->NewNode(machine()->IntAdd(), index,
440 jsgraph()->IntPtrConstant(fixed_offset));
445 WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
446 Node*
object, AllocationState
const* state,
447 WriteBarrierKind write_barrier_kind) {
448 if (state->IsNewSpaceAllocation() && state->group()->Contains(
object)) {
449 write_barrier_kind = kNoWriteBarrier;
451 return write_barrier_kind;
454 MemoryOptimizer::AllocationState
const* MemoryOptimizer::MergeStates(
455 AllocationStates
const& states) {
458 AllocationState
const* state = states.front();
459 AllocationGroup* group = state->group();
460 for (
size_t i = 1;
i < states.size(); ++
i) {
461 if (states[
i] != state) state =
nullptr;
462 if (states[
i]->group() != group) group =
nullptr;
464 if (state ==
nullptr) {
465 if (group !=
nullptr) {
471 state = AllocationState::Closed(group, zone());
474 state = empty_state();
480 void MemoryOptimizer::EnqueueMerge(Node* node,
int index,
481 AllocationState
const* state) {
482 DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
483 int const input_count = node->InputCount() - 1;
484 DCHECK_LT(0, input_count);
485 Node*
const control = node->InputAt(input_count);
486 if (control->opcode() == IrOpcode::kLoop) {
488 if (index == 0) EnqueueUses(node, empty_state());
490 DCHECK_EQ(IrOpcode::kMerge, control->opcode());
492 NodeId
const id = node->id();
493 auto it = pending_.find(
id);
494 if (it == pending_.end()) {
496 it = pending_.insert(std::make_pair(
id, AllocationStates(zone()))).first;
499 it->second.push_back(state);
501 if (it->second.size() ==
static_cast<size_t>(input_count)) {
505 state = MergeStates(it->second);
506 EnqueueUses(node, state);
512 void MemoryOptimizer::EnqueueUses(Node* node, AllocationState
const* state) {
513 for (Edge
const edge : node->use_edges()) {
514 if (NodeProperties::IsEffectEdge(edge)) {
515 EnqueueUse(edge.from(), edge.index(), state);
520 void MemoryOptimizer::EnqueueUse(Node* node,
int index,
521 AllocationState
const* state) {
522 if (node->opcode() == IrOpcode::kEffectPhi) {
526 EnqueueMerge(node, index, state);
528 Token token = {node, state};
533 Graph* MemoryOptimizer::graph()
const {
return jsgraph()->graph(); }
535 Isolate* MemoryOptimizer::isolate()
const {
return jsgraph()->isolate(); }
537 CommonOperatorBuilder* MemoryOptimizer::common()
const {
538 return jsgraph()->common();
541 MachineOperatorBuilder* MemoryOptimizer::machine()
const {
542 return jsgraph()->machine();
545 bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity)
const {
547 if (load_sensitivity == LoadSensitivity::kSafe)
return false;
549 switch (poisoning_level_) {
550 case PoisoningMitigationLevel::kDontPoison:
552 case PoisoningMitigationLevel::kPoisonAll:
554 case PoisoningMitigationLevel::kPoisonCriticalOnly:
555 return load_sensitivity == LoadSensitivity::kCritical;