1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_BACKEND_IL_H_
6#define RUNTIME_VM_COMPILER_BACKEND_IL_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#include <memory>
13#include <tuple>
14#include <type_traits>
15#include <utility>
16
17#include "vm/allocation.h"
18#include "vm/code_descriptors.h"
19#include "vm/compiler/backend/compile_type.h"
20#include "vm/compiler/backend/il_serializer.h"
21#include "vm/compiler/backend/locations.h"
22#include "vm/compiler/backend/slot.h"
23#include "vm/compiler/compiler_pass.h"
24#include "vm/compiler/compiler_state.h"
25#include "vm/compiler/ffi/marshaller.h"
26#include "vm/compiler/ffi/native_calling_convention.h"
27#include "vm/compiler/ffi/native_location.h"
28#include "vm/compiler/ffi/native_type.h"
29#include "vm/compiler/method_recognizer.h"
30#include "vm/dart_entry.h"
31#include "vm/flags.h"
32#include "vm/growable_array.h"
33#include "vm/native_entry.h"
34#include "vm/object.h"
35#include "vm/parser.h"
36#include "vm/runtime_entry.h"
37#include "vm/static_type_exactness_state.h"
38#include "vm/token_position.h"
39
40namespace dart {
41
42class BaseTextBuffer;
43class BinaryFeedback;
44class BitVector;
45class BlockEntryInstr;
46class BlockEntryWithInitialDefs;
47class BoxIntegerInstr;
48class CallTargets;
49class CatchBlockEntryInstr;
50class CheckBoundBase;
51class ComparisonInstr;
52class Definition;
53class Environment;
54class FlowGraph;
55class FlowGraphCompiler;
56class FlowGraphVisitor;
57class ForwardInstructionIterator;
58class Instruction;
59class InstructionVisitor;
60class LocalVariable;
61class LoopInfo;
62class MoveSchedule;
63class ParsedFunction;
64class Range;
65class RangeAnalysis;
66class RangeBoundary;
67class TypeUsageInfo;
68class UnboxIntegerInstr;
69
70namespace compiler {
71class BlockBuilder;
72struct TableSelector;
73} // namespace compiler
74
75class Value : public ZoneAllocated {
76 public:
77 // A forward iterator that allows removing the current value from the
78 // underlying use list during iteration.
79 class Iterator {
80 public:
81 explicit Iterator(Value* head) : next_(head) { Advance(); }
82 Value* Current() const { return current_; }
83 bool Done() const { return current_ == nullptr; }
84 void Advance() {
85 // Pre-fetch next on advance and cache it.
86 current_ = next_;
87 if (next_ != nullptr) next_ = next_->next_use();
88 }
89
90 private:
91 Value* current_;
92 Value* next_;
93 };
94
95 explicit Value(Definition* definition)
96 : definition_(definition),
97 previous_use_(nullptr),
98 next_use_(nullptr),
99 instruction_(nullptr),
100 use_index_(-1),
101 reaching_type_(nullptr) {}
102
103 Definition* definition() const { return definition_; }
104 void set_definition(Definition* definition) {
105 definition_ = definition;
106 // Clone the reaching type if there was one and the owner no longer matches
107 // this value's definition.
108 SetReachingType(reaching_type_);
109 }
110
111 Value* previous_use() const { return previous_use_; }
112 void set_previous_use(Value* previous) { previous_use_ = previous; }
113
114 Value* next_use() const { return next_use_; }
115 void set_next_use(Value* next) { next_use_ = next; }
116
117 bool IsSingleUse() const {
118 return (next_use_ == nullptr) && (previous_use_ == nullptr);
119 }
120
121 Instruction* instruction() const { return instruction_; }
122 void set_instruction(Instruction* instruction) { instruction_ = instruction; }
123
124 intptr_t use_index() const { return use_index_; }
125 void set_use_index(intptr_t index) { use_index_ = index; }
126
127 static void AddToList(Value* value, Value** list);
128 void RemoveFromUseList();
129
130 // Change the definition after use lists have been computed.
131 inline void BindTo(Definition* definition);
132 inline void BindToEnvironment(Definition* definition);
133
134 Value* Copy(Zone* zone) { return new (zone) Value(definition_); }
135
136 // CopyWithType() must only be used when the new Value is dominated by
137 // the original Value.
138 Value* CopyWithType(Zone* zone) {
139 Value* copy = new (zone) Value(definition_);
140 copy->reaching_type_ = reaching_type_;
141 return copy;
142 }
143 Value* CopyWithType() { return CopyWithType(Thread::Current()->zone()); }
144
145 CompileType* Type();
146
147 CompileType* reaching_type() const { return reaching_type_; }
148 void SetReachingType(CompileType* type);
149 void RefineReachingType(CompileType* type);
150
151#if defined(INCLUDE_IL_PRINTER)
152 void PrintTo(BaseTextBuffer* f) const;
153#endif // defined(INCLUDE_IL_PRINTER)
154
155 const char* ToCString() const;
156
157 bool IsSmiValue() { return Type()->ToCid() == kSmiCid; }
158
159 // Return true if the value represents a constant.
160 bool BindsToConstant() const;
161
162 // Return true if the value represents the constant null.
163 bool BindsToConstantNull() const;
164
165 // Assert if BindsToConstant() is false, otherwise returns the constant value.
166 const Object& BoundConstant() const;
167
168 // Return true if the value represents Smi constant.
169 bool BindsToSmiConstant() const;
170
171 // Return value of represented Smi constant.
172 intptr_t BoundSmiConstant() const;
173
174 // Return true if storing the value into a heap object requires applying the
175 // write barrier. Can change the reaching type of the Value or other Values
176 // in the same chain of redefinitions.
177 bool NeedsWriteBarrier();
178
179 bool Equals(const Value& other) const;
180
181 // Returns true if this |Value| can evaluate to the given |value| during
182 // execution.
183 inline bool CanBe(const Object& value);
184
185 private:
186 friend class FlowGraphPrinter;
187
188 Definition* definition_;
189 Value* previous_use_;
190 Value* next_use_;
191 Instruction* instruction_;
192 intptr_t use_index_;
193
194 CompileType* reaching_type_;
195
196 DISALLOW_COPY_AND_ASSIGN(Value);
197};
198
199// Represents a range of class-ids for use in class checks and polymorphic
200// dispatches. The range includes both ends, i.e. it is [cid_start, cid_end].
201struct CidRange : public ZoneAllocated {
202 CidRange(intptr_t cid_start_arg, intptr_t cid_end_arg)
203 : cid_start(cid_start_arg), cid_end(cid_end_arg) {}
204 CidRange() : cid_start(kIllegalCid), cid_end(kIllegalCid) {}
205
206 bool IsSingleCid() const { return cid_start == cid_end; }
207 bool Contains(intptr_t cid) const {
208 return cid_start <= cid && cid <= cid_end;
209 }
210 int32_t Extent() const { return cid_end - cid_start; }
211
212 // The number of class ids this range covers.
213 intptr_t size() const { return cid_end - cid_start + 1; }
214
215 bool IsIllegalRange() const {
216 return cid_start == kIllegalCid && cid_end == kIllegalCid;
217 }
218
219 intptr_t cid_start;
220 intptr_t cid_end;
221
222 DISALLOW_COPY_AND_ASSIGN(CidRange);
223};
224
225struct CidRangeValue {
226 CidRangeValue(intptr_t cid_start_arg, intptr_t cid_end_arg)
227 : cid_start(cid_start_arg), cid_end(cid_end_arg) {}
228 CidRangeValue(const CidRange& other) // NOLINT
229 : cid_start(other.cid_start), cid_end(other.cid_end) {}
230
231 bool IsSingleCid() const { return cid_start == cid_end; }
232 bool Contains(intptr_t cid) const {
233 return cid_start <= cid && cid <= cid_end;
234 }
235 int32_t Extent() const { return cid_end - cid_start; }
236
237 // The number of class ids this range covers.
238 intptr_t size() const { return cid_end - cid_start + 1; }
239
240 bool IsIllegalRange() const {
241 return cid_start == kIllegalCid && cid_end == kIllegalCid;
242 }
243
244 bool Equals(const CidRangeValue& other) const {
245 return cid_start == other.cid_start && cid_end == other.cid_end;
246 }
247
248 intptr_t cid_start;
249 intptr_t cid_end;
250};
251
252typedef MallocGrowableArray<CidRangeValue> CidRangeVector;
253
254class CidRangeVectorUtils : public AllStatic {
255 public:
256 static bool ContainsCid(const CidRangeVector& ranges, intptr_t cid) {
257 for (const CidRangeValue& range : ranges) {
258 if (range.Contains(cid)) {
259 return true;
260 }
261 }
262 return false;
263 }
264};
265
266class HierarchyInfo : public ThreadStackResource {
267 public:
268 explicit HierarchyInfo(Thread* thread)
269 : ThreadStackResource(thread),
270 cid_subtype_ranges_nullable_(),
271 cid_subtype_ranges_abstract_nullable_(),
272 cid_subtype_ranges_nonnullable_(),
273 cid_subtype_ranges_abstract_nonnullable_() {
274 thread->set_hierarchy_info(this);
275 }
276
277 ~HierarchyInfo() { thread()->set_hierarchy_info(nullptr); }
278
279 // Returned from FindBestTAVOffset and SplitOnConsistentTypeArguments
280 // to denote a failure to find a compatible concrete, finalized class.
281 static constexpr intptr_t kNoCompatibleTAVOffset = 0;
282
283 const CidRangeVector& SubtypeRangesForClass(const Class& klass,
284 bool include_abstract,
285 bool exclude_null);
286
287 bool InstanceOfHasClassRange(const AbstractType& type,
288 intptr_t* lower_limit,
289 intptr_t* upper_limit);
290
291 // Returns `true` if a simple [CidRange]-based subtype-check can be used to
292 // determine if a given instance's type is a subtype of [type].
293 //
294 // This is the case for [type]s without type arguments or where the type
295 // arguments are all dynamic (known as "rare type").
296 bool CanUseSubtypeRangeCheckFor(const AbstractType& type);
297
298 // Returns `true` if a combination of [CidRange]-based checks can be used to
299 // determine if a given instance's type is a subtype of [type].
300 //
301 // This is the case for [type]s with type arguments where we are able to do a
302 // [CidRange]-based subclass-check against the class and [CidRange]-based
303 // subtype-checks against the type arguments.
304 //
305 // This method should only be called if [CanUseSubtypeRangecheckFor] returned
306 // false.
307 bool CanUseGenericSubtypeRangeCheckFor(const AbstractType& type);
308
309 // Returns `true` if [type] is a record type which fields can be tested using
310 // simple [CidRange]-based subtype-check.
311 bool CanUseRecordSubtypeRangeCheckFor(const AbstractType& type);
312
313 private:
314 // Does not use any hierarchy information available in the system but computes
315 // it via O(n) class table traversal.
316 //
317 // The boolean parameters denote:
318 // include_abstract : if set, include abstract types (don't care otherwise)
319 // exclude_null : if set, exclude null types (don't care otherwise)
320 void BuildRangesUsingClassTableFor(ClassTable* table,
321 CidRangeVector* ranges,
322 const Class& klass,
323 bool include_abstract,
324 bool exclude_null);
325
326 // Uses hierarchy information stored in the [Class]'s direct_subclasses() and
327 // direct_implementors() arrays, unless that information is not available
328 // in which case we fall back to the class table.
329 //
330 // The boolean parameters denote:
331 // include_abstract : if set, include abstract types (don't care otherwise)
332 // exclude_null : if set, exclude null types (don't care otherwise)
333 void BuildRangesFor(ClassTable* table,
334 CidRangeVector* ranges,
335 const Class& klass,
336 bool include_abstract,
337 bool exclude_null);
338
339 std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_nullable_;
340 std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_abstract_nullable_;
341 std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_nonnullable_;
342 std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_abstract_nonnullable_;
343};
344
345// An embedded container with N elements of type T. Used (with partial
346// specialization for N=0) because embedded arrays cannot have size 0.
347template <typename T, intptr_t N>
348class EmbeddedArray {
349 public:
350 EmbeddedArray() : elements_() {}
351
352 intptr_t length() const { return N; }
353
354 const T& operator[](intptr_t i) const {
355 ASSERT(i < length());
356 return elements_[i];
357 }
358
359 T& operator[](intptr_t i) {
360 ASSERT(i < length());
361 return elements_[i];
362 }
363
364 const T& At(intptr_t i) const { return (*this)[i]; }
365
366 void SetAt(intptr_t i, const T& val) { (*this)[i] = val; }
367
368 private:
369 T elements_[N];
370};
371
372template <typename T>
373class EmbeddedArray<T, 0> {
374 public:
375 intptr_t length() const { return 0; }
376 const T& operator[](intptr_t i) const {
377 UNREACHABLE();
378 static T sentinel = nullptr;
379 return sentinel;
380 }
381 T& operator[](intptr_t i) {
382 UNREACHABLE();
383 static T sentinel = nullptr;
384 return sentinel;
385 }
386};
387
388// Instructions.
389
390// M is a two argument macro. It is applied to each concrete instruction type
391// name. The concrete instruction classes are the name with Instr concatenated.
392
393struct InstrAttrs {
394 enum Attributes {
395 _ = 0, // No special attributes.
396 //
397 // The instruction is guaranteed to not trigger GC on a non-exceptional
398 // path. If the conditions depend on parameters of the instruction, do not
399 // use this attribute but overload CanTriggerGC() instead.
400 kNoGC = 1,
401 };
402};
403
404#define FOR_EACH_INSTRUCTION(M) \
405 M(GraphEntry, kNoGC) \
406 M(JoinEntry, kNoGC) \
407 M(TargetEntry, kNoGC) \
408 M(FunctionEntry, kNoGC) \
409 M(NativeEntry, kNoGC) \
410 M(OsrEntry, kNoGC) \
411 M(IndirectEntry, kNoGC) \
412 M(CatchBlockEntry, kNoGC) \
413 M(Phi, kNoGC) \
414 M(Redefinition, kNoGC) \
415 M(ReachabilityFence, kNoGC) \
416 M(Parameter, kNoGC) \
417 M(NativeParameter, kNoGC) \
418 M(LoadIndexedUnsafe, kNoGC) \
419 M(StoreIndexedUnsafe, kNoGC) \
420 M(MemoryCopy, kNoGC) \
421 M(TailCall, kNoGC) \
422 M(ParallelMove, kNoGC) \
423 M(MoveArgument, kNoGC) \
424 M(Return, kNoGC) \
425 M(NativeReturn, kNoGC) \
426 M(Throw, kNoGC) \
427 M(ReThrow, kNoGC) \
428 M(Stop, kNoGC) \
429 M(Goto, kNoGC) \
430 M(IndirectGoto, kNoGC) \
431 M(Branch, kNoGC) \
432 M(AssertAssignable, _) \
433 M(AssertSubtype, _) \
434 M(AssertBoolean, _) \
435 M(SpecialParameter, kNoGC) \
436 M(ClosureCall, _) \
437 M(FfiCall, _) \
438 M(CCall, kNoGC) \
439 M(RawStoreField, kNoGC) \
440 M(InstanceCall, _) \
441 M(PolymorphicInstanceCall, _) \
442 M(DispatchTableCall, _) \
443 M(StaticCall, _) \
444 M(LoadLocal, kNoGC) \
445 M(DropTemps, kNoGC) \
446 M(MakeTemp, kNoGC) \
447 M(StoreLocal, kNoGC) \
448 M(StrictCompare, kNoGC) \
449 M(EqualityCompare, kNoGC) \
450 M(RelationalOp, kNoGC) \
451 M(NativeCall, _) \
452 M(DebugStepCheck, _) \
453 M(RecordCoverage, kNoGC) \
454 M(LoadIndexed, kNoGC) \
455 M(LoadCodeUnits, _) \
456 M(StoreIndexed, kNoGC) \
457 M(StoreField, _) \
458 M(LoadStaticField, _) \
459 M(StoreStaticField, kNoGC) \
460 M(BooleanNegate, kNoGC) \
461 M(InstanceOf, _) \
462 M(CreateArray, _) \
463 M(AllocateObject, _) \
464 M(AllocateClosure, _) \
465 M(AllocateRecord, _) \
466 M(AllocateSmallRecord, _) \
467 M(AllocateTypedData, _) \
468 M(LoadField, _) \
469 M(LoadUntagged, kNoGC) \
470 M(LoadClassId, kNoGC) \
471 M(InstantiateType, _) \
472 M(InstantiateTypeArguments, _) \
473 M(AllocateContext, _) \
474 M(AllocateUninitializedContext, _) \
475 M(CloneContext, _) \
476 M(BinarySmiOp, kNoGC) \
477 M(BinaryInt32Op, kNoGC) \
478 M(HashDoubleOp, kNoGC) \
479 M(HashIntegerOp, kNoGC) \
480 M(UnarySmiOp, kNoGC) \
481 M(UnaryDoubleOp, kNoGC) \
482 M(CheckStackOverflow, _) \
483 M(SmiToDouble, kNoGC) \
484 M(Int32ToDouble, kNoGC) \
485 M(Int64ToDouble, kNoGC) \
486 M(DoubleToInteger, _) \
487 M(DoubleToSmi, kNoGC) \
488 M(DoubleToDouble, kNoGC) \
489 M(DoubleToFloat, kNoGC) \
490 M(FloatToDouble, kNoGC) \
491 M(CheckClass, kNoGC) \
492 M(CheckClassId, kNoGC) \
493 M(CheckSmi, kNoGC) \
494 M(CheckNull, kNoGC) \
495 M(CheckCondition, kNoGC) \
496 M(Constant, kNoGC) \
497 M(UnboxedConstant, kNoGC) \
498 M(CheckEitherNonSmi, kNoGC) \
499 M(BinaryDoubleOp, kNoGC) \
500 M(DoubleTestOp, kNoGC) \
501 M(MathUnary, kNoGC) \
502 M(MathMinMax, kNoGC) \
503 M(Box, _) \
504 M(Unbox, kNoGC) \
505 M(BoxInt64, _) \
506 M(UnboxInt64, kNoGC) \
507 M(CaseInsensitiveCompare, kNoGC) \
508 M(BinaryInt64Op, kNoGC) \
509 M(ShiftInt64Op, kNoGC) \
510 M(SpeculativeShiftInt64Op, kNoGC) \
511 M(UnaryInt64Op, kNoGC) \
512 M(CheckArrayBound, kNoGC) \
513 M(GenericCheckBound, kNoGC) \
514 M(CheckWritable, kNoGC) \
515 M(Constraint, kNoGC) \
516 M(StringToCharCode, kNoGC) \
517 M(OneByteStringFromCharCode, kNoGC) \
518 M(Utf8Scan, kNoGC) \
519 M(InvokeMathCFunction, kNoGC) \
520 M(TruncDivMod, kNoGC) \
521 /*We could be more precise about when these 2 instructions can trigger GC.*/ \
522 M(GuardFieldClass, _) \
523 M(GuardFieldLength, _) \
524 M(GuardFieldType, _) \
525 M(IfThenElse, kNoGC) \
526 M(MaterializeObject, _) \
527 M(TestSmi, kNoGC) \
528 M(TestCids, kNoGC) \
529 M(ExtractNthOutput, kNoGC) \
530 M(MakePair, kNoGC) \
531 M(BinaryUint32Op, kNoGC) \
532 M(ShiftUint32Op, kNoGC) \
533 M(SpeculativeShiftUint32Op, kNoGC) \
534 M(UnaryUint32Op, kNoGC) \
535 M(BoxUint32, _) \
536 M(UnboxUint32, kNoGC) \
537 M(BoxInt32, _) \
538 M(UnboxInt32, kNoGC) \
539 M(BoxSmallInt, kNoGC) \
540 M(IntConverter, kNoGC) \
541 M(BitCast, kNoGC) \
542 M(Call1ArgStub, _) \
543 M(LoadThread, kNoGC) \
544 M(Deoptimize, kNoGC) \
545 M(SimdOp, kNoGC) \
546 M(Suspend, _)
547
548#define FOR_EACH_ABSTRACT_INSTRUCTION(M) \
549 M(Allocation, _) \
550 M(ArrayAllocation, _) \
551 M(BinaryIntegerOp, _) \
552 M(BlockEntry, _) \
553 M(BoxInteger, _) \
554 M(Comparison, _) \
555 M(InstanceCallBase, _) \
556 M(ShiftIntegerOp, _) \
557 M(UnaryIntegerOp, _) \
558 M(UnboxInteger, _)
559
560#define FORWARD_DECLARATION(type, attrs) class type##Instr;
561FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
562FOR_EACH_ABSTRACT_INSTRUCTION(FORWARD_DECLARATION)
563#undef FORWARD_DECLARATION
564
565#define DEFINE_INSTRUCTION_TYPE_CHECK(type) \
566 virtual type##Instr* As##type() { return this; } \
567 virtual const type##Instr* As##type() const { return this; } \
568 virtual const char* DebugName() const { return #type; }
569
570// Functions required in all concrete instruction classes.
571#define DECLARE_INSTRUCTION_NO_BACKEND(type) \
572 virtual Tag tag() const { return k##type; } \
573 virtual void Accept(InstructionVisitor* visitor); \
574 DEFINE_INSTRUCTION_TYPE_CHECK(type)
575
576#define DECLARE_INSTRUCTION_BACKEND() \
577 virtual LocationSummary* MakeLocationSummary(Zone* zone, bool optimizing) \
578 const; \
579 virtual void EmitNativeCode(FlowGraphCompiler* compiler);
580
581// Functions required in all concrete instruction classes.
582#define DECLARE_INSTRUCTION(type) \
583 DECLARE_INSTRUCTION_NO_BACKEND(type) \
584 DECLARE_INSTRUCTION_BACKEND()
585
586// Functions required in all abstract instruction classes.
587#define DECLARE_ABSTRACT_INSTRUCTION(type) \
588 /* Prevents allocating an instance of abstract instruction */ \
589 /* even if it has a concrete base class. */ \
590 virtual Tag tag() const = 0; \
591 DEFINE_INSTRUCTION_TYPE_CHECK(type)
592
593#define DECLARE_COMPARISON_METHODS \
594 virtual LocationSummary* MakeLocationSummary(Zone* zone, bool optimizing) \
595 const; \
596 virtual Condition EmitComparisonCode(FlowGraphCompiler* compiler, \
597 BranchLabels labels);
598
599#define DECLARE_COMPARISON_INSTRUCTION(type) \
600 DECLARE_INSTRUCTION_NO_BACKEND(type) \
601 DECLARE_COMPARISON_METHODS
602
603template <typename T, bool is_enum>
604struct unwrap_enum {};
605
606template <typename T>
607struct unwrap_enum<T, true> {
608 using type = std::underlying_type_t<T>;
609};
610
611template <typename T>
612struct unwrap_enum<T, false> {
613 using type = T;
614};
615
616template <typename T>
617using serializable_type_t =
618 typename unwrap_enum<std::remove_cv_t<T>, std::is_enum<T>::value>::type;
619
620#define WRITE_INSTRUCTION_FIELD(type, name) \
621 s->Write<serializable_type_t<type>>( \
622 static_cast<serializable_type_t<type>>(name));
623#define READ_INSTRUCTION_FIELD(type, name) \
624 , name(static_cast<std::remove_cv_t<type>>( \
625 d->Read<serializable_type_t<type>>()))
626#define DECLARE_INSTRUCTION_FIELD(type, name) type name;
627
628// Every instruction class should declare its serialization via
629// DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS, DECLARE_EMPTY_SERIALIZATION
630// or DECLARE_CUSTOM_SERIALIZATION.
631// If instruction class has fields which reference other instructions,
632// then it should also use DECLARE_EXTRA_SERIALIZATION and serialize
633// those references in WriteExtra/ReadExtra methods.
634#define DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Instr, BaseClass, FieldList) \
635 public: \
636 virtual void WriteTo(FlowGraphSerializer* s) { \
637 BaseClass::WriteTo(s); \
638 FieldList(WRITE_INSTRUCTION_FIELD) \
639 } \
640 explicit Instr(FlowGraphDeserializer* d) \
641 : BaseClass(d) FieldList(READ_INSTRUCTION_FIELD) {} \
642 \
643 private: \
644 FieldList(DECLARE_INSTRUCTION_FIELD)
645
646#define DECLARE_CUSTOM_SERIALIZATION(Instr) \
647 public: \
648 virtual void WriteTo(FlowGraphSerializer* s); \
649 explicit Instr(FlowGraphDeserializer* d);
650
651#define DECLARE_EMPTY_SERIALIZATION(Instr, BaseClass) \
652 public: \
653 explicit Instr(FlowGraphDeserializer* d) : BaseClass(d) {}
654
655#define DECLARE_EXTRA_SERIALIZATION \
656 public: \
657 virtual void WriteExtra(FlowGraphSerializer* s); \
658 virtual void ReadExtra(FlowGraphDeserializer* d);
659
660#if defined(INCLUDE_IL_PRINTER)
661#define PRINT_TO_SUPPORT virtual void PrintTo(BaseTextBuffer* f) const;
662#define PRINT_OPERANDS_TO_SUPPORT \
663 virtual void PrintOperandsTo(BaseTextBuffer* f) const;
664#define DECLARE_ATTRIBUTES(...) \
665 auto GetAttributes() const { return std::make_tuple(__VA_ARGS__); } \
666 static auto GetAttributeNames() { return std::make_tuple(#__VA_ARGS__); }
667#else
668#define PRINT_TO_SUPPORT
669#define PRINT_OPERANDS_TO_SUPPORT
670#define DECLARE_ATTRIBUTES(...)
671#endif // defined(INCLUDE_IL_PRINTER)
672
673// Together with CidRange, this represents a mapping from a range of class-ids
674// to a method for a given selector (method name). Also can contain an
675// indication of how frequently a given method has been called at a call site.
676// This information can be harvested from the inline caches (ICs).
677struct TargetInfo : public CidRange {
678 TargetInfo(intptr_t cid_start_arg,
679 intptr_t cid_end_arg,
680 const Function* target_arg,
681 intptr_t count_arg,
682 StaticTypeExactnessState exactness)
683 : CidRange(cid_start_arg, cid_end_arg),
684 target(target_arg),
685 count(count_arg),
686 exactness(exactness) {
687 DEBUG_ASSERT(target->IsNotTemporaryScopedHandle());
688 }
689 const Function* target;
690 intptr_t count;
691 StaticTypeExactnessState exactness;
692
693 DISALLOW_COPY_AND_ASSIGN(TargetInfo);
694};
695
696// A set of class-ids, arranged in ranges. Used for the CheckClass
697// and PolymorphicInstanceCall instructions.
698class Cids : public ZoneAllocated {
699 public:
700 explicit Cids(Zone* zone) : cid_ranges_(zone, 6) {}
701 // Creates the off-heap Cids object that reflects the contents
702 // of the on-VM-heap IC data.
703 // Ranges of Cids are merged if there is only one target function and
704 // it is used for all cids in the gaps between ranges.
705 static Cids* CreateForArgument(Zone* zone,
706 const BinaryFeedback& binary_feedback,
707 int argument_number);
708 static Cids* CreateMonomorphic(Zone* zone, intptr_t cid);
709
710 bool Equals(const Cids& other) const;
711
712 bool HasClassId(intptr_t cid) const;
713
714 void Add(CidRange* target) { cid_ranges_.Add(target); }
715
716 CidRange& operator[](intptr_t index) const { return *cid_ranges_[index]; }
717
718 CidRange* At(int index) const { return cid_ranges_[index]; }
719
720 intptr_t length() const { return cid_ranges_.length(); }
721
722 void SetLength(intptr_t len) { cid_ranges_.SetLength(len); }
723
724 bool is_empty() const { return cid_ranges_.is_empty(); }
725
726 void Sort(int compare(CidRange* const* a, CidRange* const* b)) {
727 cid_ranges_.Sort(compare);
728 }
729
730 bool IsMonomorphic() const;
731 intptr_t MonomorphicReceiverCid() const;
732 intptr_t ComputeLowestCid() const;
733 intptr_t ComputeHighestCid() const;
734
735 protected:
736 GrowableArray<CidRange*> cid_ranges_;
737
738 private:
739 DISALLOW_IMPLICIT_CONSTRUCTORS(Cids);
740};
741
742class CallTargets : public Cids {
743 public:
744 explicit CallTargets(Zone* zone) : Cids(zone) {}
745
746 static const CallTargets* CreateMonomorphic(Zone* zone,
747 intptr_t receiver_cid,
748 const Function& target);
749
750 // Creates the off-heap CallTargets object that reflects the contents
751 // of the on-VM-heap IC data.
752 static const CallTargets* Create(Zone* zone, const ICData& ic_data);
753
754 // This variant also expands the class-ids to neighbouring classes that
755 // inherit the same method.
756 static const CallTargets* CreateAndExpand(Zone* zone, const ICData& ic_data);
757
758 TargetInfo* TargetAt(int i) const { return static_cast<TargetInfo*>(At(i)); }
759
760 intptr_t AggregateCallCount() const;
761
762 StaticTypeExactnessState MonomorphicExactness() const;
763 bool HasSingleTarget() const;
764 bool HasSingleRecognizedTarget() const;
765 const Function& FirstTarget() const;
766 const Function& MostPopularTarget() const;
767
768 void Print() const;
769
770 bool ReceiverIs(intptr_t cid) const {
771 return IsMonomorphic() && MonomorphicReceiverCid() == cid;
772 }
773 bool ReceiverIsSmiOrMint() const {
774 if (cid_ranges_.is_empty()) {
775 return false;
776 }
777 for (intptr_t i = 0, n = cid_ranges_.length(); i < n; i++) {
778 for (intptr_t j = cid_ranges_[i]->cid_start; j <= cid_ranges_[i]->cid_end;
779 j++) {
780 if (j != kSmiCid && j != kMintCid) {
781 return false;
782 }
783 }
784 }
785 return true;
786 }
787
788 void Write(FlowGraphSerializer* s) const;
789 explicit CallTargets(FlowGraphDeserializer* d);
790
791 private:
792 void CreateHelper(Zone* zone, const ICData& ic_data);
793 void MergeIntoRanges();
794};
795
796// Represents type feedback for the binary operators, and a few recognized
797// static functions (see MethodRecognizer::NumArgsCheckedForStaticCall).
798class BinaryFeedback : public ZoneAllocated {
799 public:
800 explicit BinaryFeedback(Zone* zone) : feedback_(zone, 2) {}
801
802 static const BinaryFeedback* Create(Zone* zone, const ICData& ic_data);
803 static const BinaryFeedback* CreateMonomorphic(Zone* zone,
804 intptr_t receiver_cid,
805 intptr_t argument_cid);
806
807 bool ArgumentIs(intptr_t cid) const {
808 if (feedback_.is_empty()) {
809 return false;
810 }
811 for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
812 if (feedback_[i].second != cid) {
813 return false;
814 }
815 }
816 return true;
817 }
818
819 bool OperandsAreEither(intptr_t cid_a, intptr_t cid_b) const {
820 if (feedback_.is_empty()) {
821 return false;
822 }
823 for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
824 if ((feedback_[i].first != cid_a) && (feedback_[i].first != cid_b)) {
825 return false;
826 }
827 if ((feedback_[i].second != cid_a) && (feedback_[i].second != cid_b)) {
828 return false;
829 }
830 }
831 return true;
832 }
833 bool OperandsAreSmiOrNull() const {
834 return OperandsAreEither(cid_a: kSmiCid, cid_b: kNullCid);
835 }
836 bool OperandsAreSmiOrMint() const {
837 return OperandsAreEither(cid_a: kSmiCid, cid_b: kMintCid);
838 }
839 bool OperandsAreSmiOrDouble() const {
840 return OperandsAreEither(cid_a: kSmiCid, cid_b: kDoubleCid);
841 }
842
843 bool OperandsAre(intptr_t cid) const {
844 if (feedback_.length() != 1) return false;
845 return (feedback_[0].first == cid) && (feedback_[0].second == cid);
846 }
847
848 bool IncludesOperands(intptr_t cid) const {
849 for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
850 if ((feedback_[i].first == cid) && (feedback_[i].second == cid)) {
851 return true;
852 }
853 }
854 return false;
855 }
856
857 private:
858 GrowableArray<std::pair<intptr_t, intptr_t>> feedback_;
859
860 friend class Cids;
861};
862
863typedef GrowableArray<Value*> InputsArray;
864typedef ZoneGrowableArray<MoveArgumentInstr*> MoveArgumentsArray;
865
866template <typename Trait>
867class InstructionIndexedPropertyIterable {
868 public:
869 struct Iterator {
870 const Instruction* instr;
871 intptr_t index;
872
873 decltype(Trait::At(instr, index)) operator*() const {
874 return Trait::At(instr, index);
875 }
876 Iterator& operator++() {
877 index++;
878 return *this;
879 }
880
881 bool operator==(const Iterator& other) {
882 return instr == other.instr && index == other.index;
883 }
884
885 bool operator!=(const Iterator& other) { return !(*this == other); }
886 };
887
888 explicit InstructionIndexedPropertyIterable(const Instruction* instr)
889 : instr_(instr) {}
890
891 Iterator begin() const { return {instr_, 0}; }
892 Iterator end() const { return {instr_, Trait::Length(instr_)}; }
893
894 private:
895 const Instruction* instr_;
896};
897
898class ValueListIterable {
899 public:
900 struct Iterator {
901 Value* value;
902
903 Value* operator*() const { return value; }
904
905 Iterator& operator++() {
906 value = value->next_use();
907 return *this;
908 }
909
910 bool operator==(const Iterator& other) { return value == other.value; }
911
912 bool operator!=(const Iterator& other) { return !(*this == other); }
913 };
914
915 explicit ValueListIterable(Value* value) : value_(value) {}
916
917 Iterator begin() const { return {.value: value_}; }
918 Iterator end() const { return {.value: nullptr}; }
919
920 private:
921 Value* value_;
922};
923
924class Instruction : public ZoneAllocated {
925 public:
926#define DECLARE_TAG(type, attrs) k##type,
927 enum Tag { FOR_EACH_INSTRUCTION(DECLARE_TAG) kNumInstructions };
928#undef DECLARE_TAG
929
930 static const intptr_t kInstructionAttrs[kNumInstructions];
931
932 enum SpeculativeMode {
933 // Types of inputs should be checked when unboxing for this instruction.
934 kGuardInputs,
935 // Each input is guaranteed to have a valid type for the input
936 // representation and its type should not be checked when unboxing.
937 kNotSpeculative
938 };
939
940 // If the source has the inlining ID of the root function, then don't set
941 // the inlining ID to that; instead, treat it as unset.
942 explicit Instruction(const InstructionSource& source,
943 intptr_t deopt_id = DeoptId::kNone)
944 : deopt_id_(deopt_id), inlining_id_(source.inlining_id) {}
945
946 explicit Instruction(intptr_t deopt_id = DeoptId::kNone)
947 : Instruction(InstructionSource(), deopt_id) {}
948
949 virtual ~Instruction() {}
950
951 virtual Tag tag() const = 0;
952
953 virtual intptr_t statistics_tag() const { return tag(); }
954
955 intptr_t deopt_id() const {
956 ASSERT(ComputeCanDeoptimize() || ComputeCanDeoptimizeAfterCall() ||
957 CanBecomeDeoptimizationTarget() || MayThrow() ||
958 CompilerState::Current().is_aot());
959 return GetDeoptId();
960 }
961
962 static const ICData* GetICData(
963 const ZoneGrowableArray<const ICData*>& ic_data_array,
964 intptr_t deopt_id,
965 bool is_static_call);
966
967 virtual TokenPosition token_pos() const { return TokenPosition::kNoSource; }
968
969 // Returns the source information for this instruction.
970 InstructionSource source() const {
971 return InstructionSource(token_pos(), inlining_id());
972 }
973
974 virtual intptr_t InputCount() const = 0;
975 virtual Value* InputAt(intptr_t i) const = 0;
976 void SetInputAt(intptr_t i, Value* value) {
977 ASSERT(value != nullptr);
978 value->set_instruction(this);
979 value->set_use_index(i);
980 RawSetInputAt(i, value);
981 }
982
983 struct InputsTrait {
984 static Definition* At(const Instruction* instr, intptr_t index) {
985 return instr->InputAt(i: index)->definition();
986 }
987
988 static intptr_t Length(const Instruction* instr) {
989 return instr->InputCount();
990 }
991 };
992
993 using InputsIterable = InstructionIndexedPropertyIterable<InputsTrait>;
994
995 InputsIterable inputs() { return InputsIterable(this); }
996
997 // Remove all inputs (including in the environment) from their
998 // definition's use lists.
999 void UnuseAllInputs();
1000
1001 // Call instructions override this function and return the number of
1002 // pushed arguments.
1003 virtual intptr_t ArgumentCount() const { return 0; }
1004 inline Value* ArgumentValueAt(intptr_t index) const;
1005 inline Definition* ArgumentAt(intptr_t index) const;
1006
1007 // Sets array of MoveArgument instructions.
1008 virtual void SetMoveArguments(MoveArgumentsArray* move_arguments) {
1009 UNREACHABLE();
1010 }
1011 // Returns array of MoveArgument instructions
1012 virtual MoveArgumentsArray* GetMoveArguments() const {
1013 UNREACHABLE();
1014 return nullptr;
1015 }
1016 // Replace inputs with separate MoveArgument instructions detached from call.
1017 virtual void ReplaceInputsWithMoveArguments(
1018 MoveArgumentsArray* move_arguments) {
1019 UNREACHABLE();
1020 }
1021 bool HasMoveArguments() const { return GetMoveArguments() != nullptr; }
1022
1023 // Replaces direct uses of arguments with uses of corresponding MoveArgument
1024 // instructions.
1025 void RepairArgumentUsesInEnvironment() const;
1026
1027 // Returns true, if this instruction can deoptimize with its current inputs.
1028 // This property can change if we add or remove redefinitions that constrain
1029 // the type or the range of input operands during compilation.
1030 virtual bool ComputeCanDeoptimize() const = 0;
1031
1032 virtual bool ComputeCanDeoptimizeAfterCall() const {
1033 // TODO(dartbug.com/45213): Incrementally migrate IR instructions from using
1034 // [ComputeCanDeoptimize] to either [ComputeCanDeoptimizeAfterCall] if they
1035 // can only lazy deoptimize.
1036 return false;
1037 }
1038
1039 // Once we removed the deopt environment, we assume that this
1040 // instruction can't deoptimize.
1041 bool CanDeoptimize() const {
1042 return env() != nullptr &&
1043 (ComputeCanDeoptimize() || ComputeCanDeoptimizeAfterCall());
1044 }
1045
1046 // Visiting support.
1047 virtual void Accept(InstructionVisitor* visitor) = 0;
1048
1049 Instruction* previous() const { return previous_; }
1050 void set_previous(Instruction* instr) {
1051 ASSERT(!IsBlockEntry());
1052 previous_ = instr;
1053 }
1054
1055 Instruction* next() const { return next_; }
1056 void set_next(Instruction* instr) {
1057 ASSERT(!IsGraphEntry());
1058 ASSERT(!IsReturn());
1059 ASSERT(!IsBranch() || (instr == nullptr));
1060 ASSERT(!IsPhi());
1061 ASSERT(instr == nullptr || !instr->IsBlockEntry());
1062 // TODO(fschneider): Also add Throw and ReThrow to the list of instructions
1063 // that do not have a successor. Currently, the graph builder will continue
1064 // to append instruction in case of a Throw inside an expression. This
1065 // condition should be handled in the graph builder
1066 next_ = instr;
1067 }
1068
1069 // Link together two instruction.
1070 void LinkTo(Instruction* next) {
1071 ASSERT(this != next);
1072 this->set_next(next);
1073 next->set_previous(this);
1074 }
1075
1076 // Removed this instruction from the graph, after use lists have been
1077 // computed. If the instruction is a definition with uses, those uses are
1078 // unaffected (so the instruction can be reinserted, e.g., hoisting).
1079 Instruction* RemoveFromGraph(bool return_previous = true);
1080
1081 // Normal instructions can have 0 (inside a block) or 1 (last instruction in
1082 // a block) successors. Branch instruction with >1 successors override this
1083 // function.
1084 virtual intptr_t SuccessorCount() const;
1085 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
1086
1087 struct SuccessorsTrait {
1088 static BlockEntryInstr* At(const Instruction* instr, intptr_t index) {
1089 return instr->SuccessorAt(index);
1090 }
1091
1092 static intptr_t Length(const Instruction* instr) {
1093 return instr->SuccessorCount();
1094 }
1095 };
1096
1097 using SuccessorsIterable =
1098 InstructionIndexedPropertyIterable<SuccessorsTrait>;
1099
1100 inline SuccessorsIterable successors() const {
1101 return SuccessorsIterable(this);
1102 }
1103
1104 void Goto(JoinEntryInstr* entry);
1105
1106 virtual const char* DebugName() const = 0;
1107
1108#if defined(DEBUG)
1109 // Checks that the field stored in an instruction has proper form:
1110 // - must be a zone-handle
1111 // - In background compilation, must be cloned.
1112 // Aborts if field is not OK.
1113 void CheckField(const Field& field) const;
1114#else
1115 void CheckField(const Field& field) const {}
1116#endif // DEBUG
1117
1118 // Printing support.
1119 const char* ToCString() const;
1120 PRINT_TO_SUPPORT
1121 PRINT_OPERANDS_TO_SUPPORT
1122
1123#define DECLARE_INSTRUCTION_TYPE_CHECK(Name, Type) \
1124 bool Is##Name() const { return (As##Name() != nullptr); } \
1125 Type* As##Name() { \
1126 auto const_this = static_cast<const Instruction*>(this); \
1127 return const_cast<Type*>(const_this->As##Name()); \
1128 } \
1129 virtual const Type* As##Name() const { return nullptr; }
1130#define INSTRUCTION_TYPE_CHECK(Name, Attrs) \
1131 DECLARE_INSTRUCTION_TYPE_CHECK(Name, Name##Instr)
1132
1133 DECLARE_INSTRUCTION_TYPE_CHECK(Definition, Definition)
1134 DECLARE_INSTRUCTION_TYPE_CHECK(BlockEntryWithInitialDefs,
1135 BlockEntryWithInitialDefs)
1136 DECLARE_INSTRUCTION_TYPE_CHECK(CheckBoundBase, CheckBoundBase)
1137 FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
1138 FOR_EACH_ABSTRACT_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
1139
1140#undef INSTRUCTION_TYPE_CHECK
1141#undef DECLARE_INSTRUCTION_TYPE_CHECK
1142
1143 template <typename T>
1144 T* Cast() {
1145 return static_cast<T*>(this);
1146 }
1147
1148 template <typename T>
1149 const T* Cast() const {
1150 return static_cast<const T*>(this);
1151 }
1152
1153 // Returns structure describing location constraints required
1154 // to emit native code for this instruction.
1155 LocationSummary* locs() {
1156 ASSERT(locs_ != nullptr);
1157 return locs_;
1158 }
1159
1160 bool HasLocs() const { return locs_ != nullptr; }
1161
1162 virtual LocationSummary* MakeLocationSummary(Zone* zone,
1163 bool is_optimizing) const = 0;
1164
1165 void InitializeLocationSummary(Zone* zone, bool optimizing) {
1166 ASSERT(locs_ == nullptr);
1167 locs_ = MakeLocationSummary(zone, is_optimizing: optimizing);
1168 }
1169
1170 // Makes a new call location summary (or uses `locs`) and initializes the
1171 // output register constraints depending on the representation of [instr].
1172 static LocationSummary* MakeCallSummary(Zone* zone,
1173 const Instruction* instr,
1174 LocationSummary* locs = nullptr);
1175
1176 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { UNIMPLEMENTED(); }
1177
1178 Environment* env() const { return env_; }
1179 void SetEnvironment(Environment* deopt_env);
1180 void RemoveEnvironment();
1181 void ReplaceInEnvironment(Definition* current, Definition* replacement);
1182
1183 virtual intptr_t NumberOfInputsConsumedBeforeCall() const { return 0; }
1184
1185 // Different compiler passes can assign pass specific ids to the instruction.
1186 // Only one id can be stored at a time.
1187 intptr_t GetPassSpecificId(CompilerPass::Id pass) const {
1188 return (PassSpecificId::DecodePass(value: pass_specific_id_) == pass)
1189 ? PassSpecificId::DecodeId(value: pass_specific_id_)
1190 : PassSpecificId::kNoId;
1191 }
1192 void SetPassSpecificId(CompilerPass::Id pass, intptr_t id) {
1193 pass_specific_id_ = PassSpecificId::Encode(pass, id);
1194 }
1195 bool HasPassSpecificId(CompilerPass::Id pass) const {
1196 return (PassSpecificId::DecodePass(value: pass_specific_id_) == pass) &&
1197 (PassSpecificId::DecodeId(value: pass_specific_id_) !=
1198 PassSpecificId::kNoId);
1199 }
1200
1201 bool HasUnmatchedInputRepresentations() const;
1202
1203 // Returns representation expected for the input operand at the given index.
1204 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
1205 return kTagged;
1206 }
1207
1208 SpeculativeMode SpeculativeModeOfInputs() const {
1209 for (intptr_t i = 0; i < InputCount(); i++) {
1210 if (SpeculativeModeOfInput(index: i) == kGuardInputs) {
1211 return kGuardInputs;
1212 }
1213 }
1214 return kNotSpeculative;
1215 }
1216
1217 // By default, instructions should check types of inputs when unboxing
1218 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
1219 return kGuardInputs;
1220 }
1221
1222 // Representation of the value produced by this computation.
1223 virtual Representation representation() const { return kTagged; }
1224
1225 bool WasEliminated() const { return next() == nullptr; }
1226
1227 // Returns deoptimization id that corresponds to the deoptimization target
1228 // that input operands conversions inserted for this instruction can jump
1229 // to.
1230 virtual intptr_t DeoptimizationTarget() const {
1231 UNREACHABLE();
1232 return DeoptId::kNone;
1233 }
1234
1235 // Returns a replacement for the instruction or nullptr if the instruction can
1236 // be eliminated. By default returns the this instruction which means no
1237 // change.
1238 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
1239
1240 // Insert this instruction before 'next' after use lists are computed.
1241 // Instructions cannot be inserted before a block entry or any other
1242 // instruction without a previous instruction.
1243 void InsertBefore(Instruction* next) { InsertAfter(prev: next->previous()); }
1244
1245 // Insert this instruction after 'prev' after use lists are computed.
1246 void InsertAfter(Instruction* prev);
1247
1248 // Append an instruction to the current one and return the tail.
1249 // This function updated def-use chains of the newly appended
1250 // instruction.
1251 Instruction* AppendInstruction(Instruction* tail);
1252
1253 // Returns true if CSE and LICM are allowed for this instruction.
1254 virtual bool AllowsCSE() const { return false; }
1255
1256 // Returns true if this instruction has any side-effects besides storing.
1257 // See StoreFieldInstr::HasUnknownSideEffects() for rationale.
1258 virtual bool HasUnknownSideEffects() const = 0;
1259
1260 // Whether this instruction can call Dart code without going through
1261 // the runtime.
1262 //
1263 // Must be true for any instruction which can call Dart code without
1264 // first creating an exit frame to transition into the runtime.
1265 //
1266 // See also WriteBarrierElimination and Thread::RememberLiveTemporaries().
1267 virtual bool CanCallDart() const { return false; }
1268
1269 virtual bool CanTriggerGC() const;
1270
1271 // Get the block entry for this instruction.
1272 virtual BlockEntryInstr* GetBlock();
1273
1274 virtual intptr_t inlining_id() const { return inlining_id_; }
1275 virtual void set_inlining_id(intptr_t value) {
1276 ASSERT(value >= 0);
1277 ASSERT(!has_inlining_id() || inlining_id_ == value);
1278 inlining_id_ = value;
1279 }
1280 virtual bool has_inlining_id() const { return inlining_id_ >= 0; }
1281
1282 // Returns a hash code for use with hash maps.
1283 virtual uword Hash() const;
1284
1285 // Compares two instructions. Returns true, iff:
1286 // 1. They have the same tag.
1287 // 2. All input operands are Equals.
1288 // 3. They satisfy AttributesEqual.
1289 bool Equals(const Instruction& other) const;
1290
1291 // Compare attributes of a instructions (except input operands and tag).
1292 // All instructions that participate in CSE have to override this function.
1293 // This function can assume that the argument has the same type as this.
1294 virtual bool AttributesEqual(const Instruction& other) const {
1295 UNREACHABLE();
1296 return false;
1297 }
1298
1299 virtual void InheritDeoptTarget(Zone* zone, Instruction* other);
1300
1301 bool NeedsEnvironment() const {
1302 return ComputeCanDeoptimize() || ComputeCanDeoptimizeAfterCall() ||
1303 CanBecomeDeoptimizationTarget() || MayThrow();
1304 }
1305
1306 virtual bool CanBecomeDeoptimizationTarget() const { return false; }
1307
1308 void InheritDeoptTargetAfter(FlowGraph* flow_graph,
1309 Definition* call,
1310 Definition* result);
1311
1312 virtual bool MayThrow() const = 0;
1313
1314 bool IsDominatedBy(Instruction* dom);
1315
1316 void ClearEnv() { env_ = nullptr; }
1317
1318 void Unsupported(FlowGraphCompiler* compiler);
1319
1320 static bool SlowPathSharingSupported(bool is_optimizing) {
1321#if defined(TARGET_ARCH_IA32)
1322 return false;
1323#else
1324 return FLAG_enable_slow_path_sharing && FLAG_precompiled_mode &&
1325 is_optimizing;
1326#endif
1327 }
1328
1329 virtual bool UseSharedSlowPathStub(bool is_optimizing) const { return false; }
1330
1331 // 'RegisterKindForResult()' returns the register kind necessary to hold the
1332 // result.
1333 //
1334 // This is not virtual because instructions should override representation()
1335 // instead.
1336 Location::Kind RegisterKindForResult() const {
1337 const Representation rep = representation();
1338 if ((rep == kUnboxedFloat) || (rep == kUnboxedDouble) ||
1339 (rep == kUnboxedFloat32x4) || (rep == kUnboxedInt32x4) ||
1340 (rep == kUnboxedFloat64x2)) {
1341 return Location::kFpuRegister;
1342 }
1343 return Location::kRegister;
1344 }
1345
1346 DECLARE_CUSTOM_SERIALIZATION(Instruction)
1347 DECLARE_EXTRA_SERIALIZATION
1348
1349 protected:
1350 // GetDeoptId and/or CopyDeoptIdFrom.
1351 friend class CallSiteInliner;
1352 friend class LICM;
1353 friend class ComparisonInstr;
1354 friend class Scheduler;
1355 friend class BlockEntryInstr;
1356 friend class CatchBlockEntryInstr; // deopt_id_
1357 friend class DebugStepCheckInstr; // deopt_id_
1358 friend class StrictCompareInstr; // deopt_id_
1359
1360 // Fetch deopt id without checking if this computation can deoptimize.
1361 intptr_t GetDeoptId() const { return deopt_id_; }
1362
1363 void CopyDeoptIdFrom(const Instruction& instr) {
1364 deopt_id_ = instr.deopt_id_;
1365 }
1366
1367 // Write/read locs and environment, but not inputs.
1368 // Used when one instruction embeds another and reuses their inputs
1369 // (e.g. Branch/IfThenElse/CheckCondition wrap Comparison).
1370 void WriteExtraWithoutInputs(FlowGraphSerializer* s);
1371 void ReadExtraWithoutInputs(FlowGraphDeserializer* d);
1372
1373 private:
1374 friend class BranchInstr; // For RawSetInputAt.
1375 friend class IfThenElseInstr; // For RawSetInputAt.
1376 friend class CheckConditionInstr; // For RawSetInputAt.
1377
1378 virtual void RawSetInputAt(intptr_t i, Value* value) = 0;
1379
1380 class PassSpecificId {
1381 public:
1382 static intptr_t Encode(CompilerPass::Id pass, intptr_t id) {
1383 return (id << kPassBits) | pass;
1384 }
1385
1386 static CompilerPass::Id DecodePass(intptr_t value) {
1387 return static_cast<CompilerPass::Id>(value & Utils::NBitMask(n: kPassBits));
1388 }
1389
1390 static intptr_t DecodeId(intptr_t value) { return (value >> kPassBits); }
1391
1392 static constexpr intptr_t kNoId = -1;
1393
1394 private:
1395 static constexpr intptr_t kPassBits = 8;
1396 static_assert(CompilerPass::kNumPasses <= (1 << kPassBits),
1397 "Pass Id does not fit into the bit field");
1398 };
1399
1400 intptr_t deopt_id_ = DeoptId::kNone;
1401 intptr_t pass_specific_id_ = PassSpecificId::kNoId;
1402 Instruction* previous_ = nullptr;
1403 Instruction* next_ = nullptr;
1404 Environment* env_ = nullptr;
1405 LocationSummary* locs_ = nullptr;
1406 intptr_t inlining_id_;
1407
1408 DISALLOW_COPY_AND_ASSIGN(Instruction);
1409};
1410
1411struct BranchLabels {
1412 compiler::Label* true_label;
1413 compiler::Label* false_label;
1414 compiler::Label* fall_through;
1415};
1416
1417class PureInstruction : public Instruction {
1418 public:
1419 explicit PureInstruction(intptr_t deopt_id) : Instruction(deopt_id) {}
1420 explicit PureInstruction(const InstructionSource& source, intptr_t deopt_id)
1421 : Instruction(source, deopt_id) {}
1422
1423 virtual bool AllowsCSE() const { return true; }
1424 virtual bool HasUnknownSideEffects() const { return false; }
1425
1426 DECLARE_EMPTY_SERIALIZATION(PureInstruction, Instruction)
1427};
1428
1429// Types to be used as ThrowsTrait for TemplateInstruction/TemplateDefinition.
1430struct Throws {
1431 static constexpr bool kCanThrow = true;
1432};
1433
1434struct NoThrow {
1435 static constexpr bool kCanThrow = false;
1436};
1437
1438// Types to be used as CSETrait for TemplateInstruction/TemplateDefinition.
1439// Pure instructions are those that allow CSE and have no effects and
1440// no dependencies.
1441template <typename DefaultBase, typename PureBase>
1442struct Pure {
1443 typedef PureBase Base;
1444};
1445
1446template <typename DefaultBase, typename PureBase>
1447struct NoCSE {
1448 typedef DefaultBase Base;
1449};
1450
1451template <intptr_t N,
1452 typename ThrowsTrait,
1453 template <typename Default, typename Pure> class CSETrait = NoCSE>
1454class TemplateInstruction
1455 : public CSETrait<Instruction, PureInstruction>::Base {
1456 public:
1457 using BaseClass = typename CSETrait<Instruction, PureInstruction>::Base;
1458
1459 explicit TemplateInstruction(intptr_t deopt_id = DeoptId::kNone)
1460 : BaseClass(deopt_id), inputs_() {}
1461
1462 TemplateInstruction(const InstructionSource& source,
1463 intptr_t deopt_id = DeoptId::kNone)
1464 : BaseClass(source, deopt_id), inputs_() {}
1465
1466 virtual intptr_t InputCount() const { return N; }
1467 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
1468
1469 virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
1470
1471 DECLARE_EMPTY_SERIALIZATION(TemplateInstruction, BaseClass)
1472
1473 protected:
1474 EmbeddedArray<Value*, N> inputs_;
1475
1476 private:
1477 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
1478};
1479
1480class MoveOperands : public ZoneAllocated {
1481 public:
1482 MoveOperands(Location dest, Location src) : dest_(dest), src_(src) {}
1483 MoveOperands(const MoveOperands& other)
1484 : ZoneAllocated(), dest_(other.dest_), src_(other.src_) {}
1485
1486 MoveOperands& operator=(const MoveOperands& other) {
1487 dest_ = other.dest_;
1488 src_ = other.src_;
1489 return *this;
1490 }
1491
1492 Location src() const { return src_; }
1493 Location dest() const { return dest_; }
1494
1495 Location* src_slot() { return &src_; }
1496 Location* dest_slot() { return &dest_; }
1497
1498 void set_src(const Location& value) { src_ = value; }
1499 void set_dest(const Location& value) { dest_ = value; }
1500
1501 // The parallel move resolver marks moves as "in-progress" by clearing the
1502 // destination (but not the source).
1503 Location MarkPending() {
1504 ASSERT(!IsPending());
1505 Location dest = dest_;
1506 dest_ = Location::NoLocation();
1507 return dest;
1508 }
1509
1510 void ClearPending(Location dest) {
1511 ASSERT(IsPending());
1512 dest_ = dest;
1513 }
1514
1515 bool IsPending() const {
1516 ASSERT(!src_.IsInvalid() || dest_.IsInvalid());
1517 return dest_.IsInvalid() && !src_.IsInvalid();
1518 }
1519
1520 // True if this move a move from the given location.
1521 bool Blocks(Location loc) const {
1522 return !IsEliminated() && src_.Equals(other: loc);
1523 }
1524
1525 // A move is redundant if it's been eliminated, if its source and
1526 // destination are the same, or if its destination is unneeded.
1527 bool IsRedundant() const {
1528 return IsEliminated() || dest_.IsInvalid() || src_.Equals(other: dest_);
1529 }
1530
1531 // We clear both operands to indicate move that's been eliminated.
1532 void Eliminate() { src_ = dest_ = Location::NoLocation(); }
1533 bool IsEliminated() const {
1534 ASSERT(!src_.IsInvalid() || dest_.IsInvalid());
1535 return src_.IsInvalid();
1536 }
1537
1538 void Write(FlowGraphSerializer* s) const;
1539 explicit MoveOperands(FlowGraphDeserializer* d);
1540
1541 private:
1542 Location dest_;
1543 Location src_;
1544};
1545
1546class ParallelMoveInstr : public TemplateInstruction<0, NoThrow> {
1547 public:
1548 ParallelMoveInstr() : moves_(4) {}
1549
1550 DECLARE_INSTRUCTION(ParallelMove)
1551
1552 virtual bool ComputeCanDeoptimize() const { return false; }
1553
1554 virtual bool HasUnknownSideEffects() const {
1555 UNREACHABLE(); // This instruction never visited by optimization passes.
1556 return false;
1557 }
1558
1559 const GrowableArray<MoveOperands*>& moves() const { return moves_; }
1560
1561 MoveOperands* AddMove(Location dest, Location src) {
1562 MoveOperands* move = new MoveOperands(dest, src);
1563 moves_.Add(move);
1564 return move;
1565 }
1566
1567 MoveOperands* MoveOperandsAt(intptr_t index) const { return moves_[index]; }
1568
1569 intptr_t NumMoves() const { return moves_.length(); }
1570
1571 bool IsRedundant() const;
1572
1573 virtual TokenPosition token_pos() const {
1574 return TokenPosition::kParallelMove;
1575 }
1576
1577 const MoveSchedule& move_schedule() const {
1578 ASSERT(move_schedule_ != nullptr);
1579 return *move_schedule_;
1580 }
1581
1582 void set_move_schedule(const MoveSchedule& schedule) {
1583 move_schedule_ = &schedule;
1584 }
1585
1586 PRINT_TO_SUPPORT
1587 DECLARE_EMPTY_SERIALIZATION(ParallelMoveInstr, TemplateInstruction)
1588 DECLARE_EXTRA_SERIALIZATION
1589
1590 private:
1591 GrowableArray<MoveOperands*> moves_; // Elements cannot be null.
1592 const MoveSchedule* move_schedule_ = nullptr;
1593
1594 DISALLOW_COPY_AND_ASSIGN(ParallelMoveInstr);
1595};
1596
1597// Basic block entries are administrative nodes. There is a distinguished
1598// graph entry with no predecessor. Joins are the only nodes with multiple
1599// predecessors. Targets are all other basic block entries. The types
1600// enforce edge-split form---joins are forbidden as the successors of
1601// branches.
1602class BlockEntryInstr : public TemplateInstruction<0, NoThrow> {
1603 public:
1604 virtual intptr_t PredecessorCount() const = 0;
1605 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const = 0;
1606
1607 intptr_t preorder_number() const { return preorder_number_; }
1608 void set_preorder_number(intptr_t number) { preorder_number_ = number; }
1609
1610 intptr_t postorder_number() const { return postorder_number_; }
1611 void set_postorder_number(intptr_t number) { postorder_number_ = number; }
1612
1613 intptr_t block_id() const { return block_id_; }
1614
1615 // NOTE: These are SSA positions and not token positions. These are used by
1616 // the register allocator.
1617 void set_start_pos(intptr_t pos) { start_pos_ = pos; }
1618 intptr_t start_pos() const { return start_pos_; }
1619 void set_end_pos(intptr_t pos) { end_pos_ = pos; }
1620 intptr_t end_pos() const { return end_pos_; }
1621
1622 BlockEntryInstr* dominator() const { return dominator_; }
1623 BlockEntryInstr* ImmediateDominator() const;
1624
1625 const GrowableArray<BlockEntryInstr*>& dominated_blocks() {
1626 return dominated_blocks_;
1627 }
1628
1629 void AddDominatedBlock(BlockEntryInstr* block) {
1630 ASSERT(!block->IsFunctionEntry() || this->IsGraphEntry());
1631 block->set_dominator(this);
1632 dominated_blocks_.Add(block);
1633 }
1634 void ClearDominatedBlocks() { dominated_blocks_.Clear(); }
1635
1636 bool Dominates(BlockEntryInstr* other) const;
1637
1638 Instruction* last_instruction() const { return last_instruction_; }
1639 void set_last_instruction(Instruction* instr) { last_instruction_ = instr; }
1640
1641 ParallelMoveInstr* parallel_move() const { return parallel_move_; }
1642
1643 bool HasParallelMove() const { return parallel_move_ != nullptr; }
1644
1645 bool HasNonRedundantParallelMove() const {
1646 return HasParallelMove() && !parallel_move()->IsRedundant();
1647 }
1648
1649 ParallelMoveInstr* GetParallelMove() {
1650 if (parallel_move_ == nullptr) {
1651 parallel_move_ = new ParallelMoveInstr();
1652 }
1653 return parallel_move_;
1654 }
1655
1656 // Discover basic-block structure of the current block. Must be called
1657 // on all graph blocks in preorder to yield valid results. As a side effect,
1658 // the block entry instructions in the graph are assigned preorder numbers.
1659 // The array 'preorder' maps preorder block numbers to the block entry
1660 // instruction with that number. The depth first spanning tree is recorded
1661 // in the array 'parent', which maps preorder block numbers to the preorder
1662 // number of the block's spanning-tree parent. As a side effect of this
1663 // function, the set of basic block predecessors (e.g., block entry
1664 // instructions of predecessor blocks) and also the last instruction in the
1665 // block is recorded in each entry instruction. Returns true when called the
1666 // first time on this particular block within one graph traversal, and false
1667 // on all successive calls.
1668 bool DiscoverBlock(BlockEntryInstr* predecessor,
1669 GrowableArray<BlockEntryInstr*>* preorder,
1670 GrowableArray<intptr_t>* parent);
1671
1672 virtual bool CanBecomeDeoptimizationTarget() const {
1673 // BlockEntry environment is copied to Goto and Branch instructions
1674 // when we insert new blocks targeting this block.
1675 return true;
1676 }
1677
1678 virtual bool ComputeCanDeoptimize() const { return false; }
1679
1680 virtual bool HasUnknownSideEffects() const { return false; }
1681
1682 intptr_t try_index() const { return try_index_; }
1683 void set_try_index(intptr_t index) { try_index_ = index; }
1684
1685 // True for blocks inside a try { } region.
1686 bool InsideTryBlock() const { return try_index_ != kInvalidTryIndex; }
1687
1688 // Loop related methods.
1689 LoopInfo* loop_info() const { return loop_info_; }
1690 void set_loop_info(LoopInfo* loop_info) { loop_info_ = loop_info; }
1691 bool IsLoopHeader() const;
1692 intptr_t NestingDepth() const;
1693
1694 virtual BlockEntryInstr* GetBlock() { return this; }
1695
1696 virtual TokenPosition token_pos() const {
1697 return TokenPosition::kControlFlow;
1698 }
1699
1700 // Helper to mutate the graph during inlining. This block should be
1701 // replaced with new_block as a predecessor of all of this block's
1702 // successors.
1703 void ReplaceAsPredecessorWith(BlockEntryInstr* new_block);
1704
1705 void set_block_id(intptr_t block_id) { block_id_ = block_id; }
1706
1707 // Stack-based IR bookkeeping.
1708 intptr_t stack_depth() const { return stack_depth_; }
1709 void set_stack_depth(intptr_t s) { stack_depth_ = s; }
1710
1711 // For all instruction in this block: Remove all inputs (including in the
1712 // environment) from their definition's use lists for all instructions.
1713 void ClearAllInstructions();
1714
1715 class InstructionsIterable {
1716 public:
1717 explicit InstructionsIterable(BlockEntryInstr* block) : block_(block) {}
1718
1719 inline ForwardInstructionIterator begin() const;
1720 inline ForwardInstructionIterator end() const;
1721
1722 private:
1723 BlockEntryInstr* block_;
1724 };
1725
1726 InstructionsIterable instructions() { return InstructionsIterable(this); }
1727
1728 DECLARE_ABSTRACT_INSTRUCTION(BlockEntry)
1729
1730 DECLARE_CUSTOM_SERIALIZATION(BlockEntryInstr)
1731 DECLARE_EXTRA_SERIALIZATION
1732
1733 protected:
1734 BlockEntryInstr(intptr_t block_id,
1735 intptr_t try_index,
1736 intptr_t deopt_id,
1737 intptr_t stack_depth)
1738 : TemplateInstruction(deopt_id),
1739 block_id_(block_id),
1740 try_index_(try_index),
1741 stack_depth_(stack_depth),
1742 dominated_blocks_(1) {}
1743
1744 // Perform a depth first search to find OSR entry and
1745 // link it to the given graph entry.
1746 bool FindOsrEntryAndRelink(GraphEntryInstr* graph_entry,
1747 Instruction* parent,
1748 BitVector* block_marks);
1749
1750 private:
1751 virtual void ClearPredecessors() = 0;
1752 virtual void AddPredecessor(BlockEntryInstr* predecessor) = 0;
1753
1754 void set_dominator(BlockEntryInstr* instr) { dominator_ = instr; }
1755
1756 intptr_t block_id_;
1757 intptr_t try_index_;
1758 intptr_t preorder_number_ = -1;
1759 intptr_t postorder_number_ = -1;
1760 // Expected stack depth on entry (for stack-based IR only).
1761 intptr_t stack_depth_;
1762 // Starting and ending lifetime positions for this block. Used by
1763 // the linear scan register allocator.
1764 intptr_t start_pos_ = -1;
1765 intptr_t end_pos_ = -1;
1766 // Immediate dominator, nullptr for graph entry.
1767 BlockEntryInstr* dominator_ = nullptr;
1768 // TODO(fschneider): Optimize the case of one child to save space.
1769 GrowableArray<BlockEntryInstr*> dominated_blocks_;
1770 Instruction* last_instruction_ = nullptr;
1771
1772 // Parallel move that will be used by linear scan register allocator to
1773 // connect live ranges at the start of the block.
1774 ParallelMoveInstr* parallel_move_ = nullptr;
1775
1776 // Closest enveloping loop in loop hierarchy (nullptr at nesting depth 0).
1777 LoopInfo* loop_info_ = nullptr;
1778
1779 DISALLOW_COPY_AND_ASSIGN(BlockEntryInstr);
1780};
1781
1782class ForwardInstructionIterator {
1783 public:
1784 ForwardInstructionIterator(const ForwardInstructionIterator& other) = default;
1785 ForwardInstructionIterator& operator=(
1786 const ForwardInstructionIterator& other) = default;
1787
1788 ForwardInstructionIterator() : current_(nullptr) {}
1789
1790 explicit ForwardInstructionIterator(BlockEntryInstr* block_entry)
1791 : current_(block_entry) {
1792 Advance();
1793 }
1794
1795 void Advance() {
1796 ASSERT(!Done());
1797 current_ = current_->next();
1798 }
1799
1800 bool Done() const { return current_ == nullptr; }
1801
1802 // Removes 'current_' from graph and sets 'current_' to previous instruction.
1803 void RemoveCurrentFromGraph();
1804
1805 Instruction* Current() const { return current_; }
1806
1807 Instruction* operator*() const { return Current(); }
1808
1809 bool operator==(const ForwardInstructionIterator& other) const {
1810 return current_ == other.current_;
1811 }
1812
1813 bool operator!=(const ForwardInstructionIterator& other) const {
1814 return !(*this == other);
1815 }
1816
1817 ForwardInstructionIterator& operator++() {
1818 Advance();
1819 return *this;
1820 }
1821
1822 private:
1823 Instruction* current_;
1824};
1825
1826ForwardInstructionIterator BlockEntryInstr::InstructionsIterable::begin()
1827 const {
1828 return ForwardInstructionIterator(block_);
1829}
1830
1831ForwardInstructionIterator BlockEntryInstr::InstructionsIterable::end() const {
1832 return ForwardInstructionIterator();
1833}
1834
1835class BackwardInstructionIterator : public ValueObject {
1836 public:
1837 explicit BackwardInstructionIterator(BlockEntryInstr* block_entry)
1838 : block_entry_(block_entry), current_(block_entry->last_instruction()) {
1839 ASSERT(block_entry_->previous() == nullptr);
1840 }
1841
1842 void Advance() {
1843 ASSERT(!Done());
1844 current_ = current_->previous();
1845 }
1846
1847 bool Done() const { return current_ == block_entry_; }
1848
1849 void RemoveCurrentFromGraph();
1850
1851 Instruction* Current() const { return current_; }
1852
1853 private:
1854 BlockEntryInstr* block_entry_;
1855 Instruction* current_;
1856};
1857
1858// Base class shared by all block entries which define initial definitions.
1859//
1860// The initial definitions define parameters, special parameters and constants.
1861class BlockEntryWithInitialDefs : public BlockEntryInstr {
1862 public:
1863 BlockEntryWithInitialDefs(intptr_t block_id,
1864 intptr_t try_index,
1865 intptr_t deopt_id,
1866 intptr_t stack_depth)
1867 : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth) {}
1868
1869 GrowableArray<Definition*>* initial_definitions() {
1870 return &initial_definitions_;
1871 }
1872 const GrowableArray<Definition*>* initial_definitions() const {
1873 return &initial_definitions_;
1874 }
1875
1876 virtual BlockEntryWithInitialDefs* AsBlockEntryWithInitialDefs() {
1877 return this;
1878 }
1879 virtual const BlockEntryWithInitialDefs* AsBlockEntryWithInitialDefs() const {
1880 return this;
1881 }
1882
1883 DECLARE_CUSTOM_SERIALIZATION(BlockEntryWithInitialDefs)
1884 DECLARE_EXTRA_SERIALIZATION
1885
1886 protected:
1887 void PrintInitialDefinitionsTo(BaseTextBuffer* f) const;
1888
1889 private:
1890 GrowableArray<Definition*> initial_definitions_;
1891
1892 DISALLOW_COPY_AND_ASSIGN(BlockEntryWithInitialDefs);
1893};
1894
1895class GraphEntryInstr : public BlockEntryWithInitialDefs {
1896 public:
1897 GraphEntryInstr(const ParsedFunction& parsed_function, intptr_t osr_id);
1898
1899 DECLARE_INSTRUCTION(GraphEntry)
1900
1901 virtual intptr_t PredecessorCount() const { return 0; }
1902 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
1903 UNREACHABLE();
1904 return nullptr;
1905 }
1906 virtual intptr_t SuccessorCount() const;
1907 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
1908
1909 void AddCatchEntry(CatchBlockEntryInstr* entry) { catch_entries_.Add(entry); }
1910
1911 CatchBlockEntryInstr* GetCatchEntry(intptr_t index);
1912
1913 void AddIndirectEntry(IndirectEntryInstr* entry) {
1914 indirect_entries_.Add(entry);
1915 }
1916
1917 ConstantInstr* constant_null();
1918
1919 void RelinkToOsrEntry(Zone* zone, intptr_t max_block_id);
1920 bool IsCompiledForOsr() const;
1921 intptr_t osr_id() const { return osr_id_; }
1922
1923 intptr_t entry_count() const { return entry_count_; }
1924 void set_entry_count(intptr_t count) { entry_count_ = count; }
1925
1926 intptr_t spill_slot_count() const { return spill_slot_count_; }
1927 void set_spill_slot_count(intptr_t count) {
1928 ASSERT(count >= 0);
1929 spill_slot_count_ = count;
1930 }
1931
1932 // Returns true if this flow graph needs a stack frame.
1933 bool NeedsFrame() const { return needs_frame_; }
1934 void MarkFrameless() { needs_frame_ = false; }
1935
1936 // Number of stack slots reserved for compiling try-catch. For functions
1937 // without try-catch, this is 0. Otherwise, it is the number of local
1938 // variables.
1939 intptr_t fixed_slot_count() const { return fixed_slot_count_; }
1940 void set_fixed_slot_count(intptr_t count) {
1941 ASSERT(count >= 0);
1942 fixed_slot_count_ = count;
1943 }
1944 FunctionEntryInstr* normal_entry() const { return normal_entry_; }
1945 FunctionEntryInstr* unchecked_entry() const { return unchecked_entry_; }
1946 void set_normal_entry(FunctionEntryInstr* entry) { normal_entry_ = entry; }
1947 void set_unchecked_entry(FunctionEntryInstr* target) {
1948 unchecked_entry_ = target;
1949 }
1950 OsrEntryInstr* osr_entry() const { return osr_entry_; }
1951 void set_osr_entry(OsrEntryInstr* entry) { osr_entry_ = entry; }
1952
1953 const ParsedFunction& parsed_function() const { return parsed_function_; }
1954
1955 const GrowableArray<CatchBlockEntryInstr*>& catch_entries() const {
1956 return catch_entries_;
1957 }
1958
1959 const GrowableArray<IndirectEntryInstr*>& indirect_entries() const {
1960 return indirect_entries_;
1961 }
1962
1963 bool HasSingleEntryPoint() const {
1964 return catch_entries().is_empty() && unchecked_entry() == nullptr;
1965 }
1966
1967 PRINT_TO_SUPPORT
1968 DECLARE_CUSTOM_SERIALIZATION(GraphEntryInstr)
1969 DECLARE_EXTRA_SERIALIZATION
1970
1971 private:
1972 GraphEntryInstr(const ParsedFunction& parsed_function,
1973 intptr_t osr_id,
1974 intptr_t deopt_id);
1975
1976 virtual void ClearPredecessors() {}
1977 virtual void AddPredecessor(BlockEntryInstr* predecessor) { UNREACHABLE(); }
1978
1979 const ParsedFunction& parsed_function_;
1980 FunctionEntryInstr* normal_entry_ = nullptr;
1981 FunctionEntryInstr* unchecked_entry_ = nullptr;
1982 OsrEntryInstr* osr_entry_ = nullptr;
1983 GrowableArray<CatchBlockEntryInstr*> catch_entries_;
1984 // Indirect targets are blocks reachable only through indirect gotos.
1985 GrowableArray<IndirectEntryInstr*> indirect_entries_;
1986 const intptr_t osr_id_;
1987 intptr_t entry_count_;
1988 intptr_t spill_slot_count_;
1989 intptr_t fixed_slot_count_; // For try-catch in optimized code.
1990 bool needs_frame_ = true;
1991
1992 DISALLOW_COPY_AND_ASSIGN(GraphEntryInstr);
1993};
1994
1995class JoinEntryInstr : public BlockEntryInstr {
1996 public:
1997 JoinEntryInstr(intptr_t block_id,
1998 intptr_t try_index,
1999 intptr_t deopt_id,
2000 intptr_t stack_depth = 0)
2001 : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth),
2002 phis_(nullptr),
2003 predecessors_(2) // Two is the assumed to be the common case.
2004 {}
2005
2006 DECLARE_INSTRUCTION(JoinEntry)
2007
2008 virtual intptr_t PredecessorCount() const { return predecessors_.length(); }
2009 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
2010 return predecessors_[index];
2011 }
2012
2013 // Returns -1 if pred is not in the list.
2014 intptr_t IndexOfPredecessor(BlockEntryInstr* pred) const;
2015
2016 ZoneGrowableArray<PhiInstr*>* phis() const { return phis_; }
2017
2018 PhiInstr* InsertPhi(intptr_t var_index, intptr_t var_count);
2019 void RemoveDeadPhis(Definition* replacement);
2020
2021 void InsertPhi(PhiInstr* phi);
2022 void RemovePhi(PhiInstr* phi);
2023
2024 virtual bool HasUnknownSideEffects() const { return false; }
2025
2026 PRINT_TO_SUPPORT
2027
2028#define FIELD_LIST(F) F(ZoneGrowableArray<PhiInstr*>*, phis_)
2029
2030 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(JoinEntryInstr,
2031 BlockEntryInstr,
2032 FIELD_LIST)
2033#undef FIELD_LIST
2034 DECLARE_EXTRA_SERIALIZATION
2035
2036 private:
2037 // Classes that have access to predecessors_ when inlining.
2038 friend class BlockEntryInstr;
2039 friend class InlineExitCollector;
2040 friend class PolymorphicInliner;
2041 friend class IndirectEntryInstr; // Access in il_printer.cc.
2042
2043 // Direct access to phis_ in order to resize it due to phi elimination.
2044 friend class ConstantPropagator;
2045 friend class DeadCodeElimination;
2046
2047 virtual void ClearPredecessors() { predecessors_.Clear(); }
2048 virtual void AddPredecessor(BlockEntryInstr* predecessor);
2049
2050 GrowableArray<BlockEntryInstr*> predecessors_;
2051
2052 DISALLOW_COPY_AND_ASSIGN(JoinEntryInstr);
2053};
2054
2055class PhiIterator : public ValueObject {
2056 public:
2057 explicit PhiIterator(JoinEntryInstr* join) : phis_(join->phis()), index_(0) {}
2058
2059 void Advance() {
2060 ASSERT(!Done());
2061 index_++;
2062 }
2063
2064 bool Done() const {
2065 return (phis_ == nullptr) || (index_ >= phis_->length());
2066 }
2067
2068 PhiInstr* Current() const { return (*phis_)[index_]; }
2069
2070 // Removes current phi from graph and sets current to previous phi.
2071 void RemoveCurrentFromGraph();
2072
2073 private:
2074 ZoneGrowableArray<PhiInstr*>* phis_;
2075 intptr_t index_;
2076};
2077
2078class TargetEntryInstr : public BlockEntryInstr {
2079 public:
2080 TargetEntryInstr(intptr_t block_id,
2081 intptr_t try_index,
2082 intptr_t deopt_id,
2083 intptr_t stack_depth = 0)
2084 : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth),
2085 edge_weight_(0.0) {}
2086
2087 DECLARE_INSTRUCTION(TargetEntry)
2088
2089 double edge_weight() const { return edge_weight_; }
2090 void set_edge_weight(double weight) { edge_weight_ = weight; }
2091 void adjust_edge_weight(double scale_factor) { edge_weight_ *= scale_factor; }
2092
2093 virtual intptr_t PredecessorCount() const {
2094 return (predecessor_ == nullptr) ? 0 : 1;
2095 }
2096 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
2097 ASSERT((index == 0) && (predecessor_ != nullptr));
2098 return predecessor_;
2099 }
2100
2101 PRINT_TO_SUPPORT
2102
2103#define FIELD_LIST(F) F(double, edge_weight_)
2104 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(TargetEntryInstr,
2105 BlockEntryInstr,
2106 FIELD_LIST)
2107#undef FIELD_LIST
2108
2109 private:
2110 friend class BlockEntryInstr; // Access to predecessor_ when inlining.
2111
2112 virtual void ClearPredecessors() { predecessor_ = nullptr; }
2113 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
2114 ASSERT(predecessor_ == nullptr);
2115 predecessor_ = predecessor;
2116 }
2117
2118 // Not serialized, set in DiscoverBlocks.
2119 BlockEntryInstr* predecessor_ = nullptr;
2120
2121 DISALLOW_COPY_AND_ASSIGN(TargetEntryInstr);
2122};
2123
2124// Represents an entrypoint to a function which callers can invoke (i.e. not
2125// used for OSR entries).
2126//
2127// The flow graph builder might decide to create multiple entrypoints
2128// (e.g. checked/unchecked entrypoints) and will attach those to the
2129// [GraphEntryInstr].
2130//
2131// Every entrypoint has it's own initial definitions. The SSA renaming
2132// will insert phi's for parameter instructions if necessary.
2133class FunctionEntryInstr : public BlockEntryWithInitialDefs {
2134 public:
2135 FunctionEntryInstr(GraphEntryInstr* graph_entry,
2136 intptr_t block_id,
2137 intptr_t try_index,
2138 intptr_t deopt_id)
2139 : BlockEntryWithInitialDefs(block_id,
2140 try_index,
2141 deopt_id,
2142 /*stack_depth=*/0),
2143 graph_entry_(graph_entry) {}
2144
2145 DECLARE_INSTRUCTION(FunctionEntry)
2146
2147 virtual intptr_t PredecessorCount() const {
2148 return (graph_entry_ == nullptr) ? 0 : 1;
2149 }
2150 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
2151 ASSERT(index == 0 && graph_entry_ != nullptr);
2152 return graph_entry_;
2153 }
2154
2155 GraphEntryInstr* graph_entry() const { return graph_entry_; }
2156
2157 PRINT_TO_SUPPORT
2158 DECLARE_CUSTOM_SERIALIZATION(FunctionEntryInstr)
2159
2160 private:
2161 virtual void ClearPredecessors() { graph_entry_ = nullptr; }
2162 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
2163 ASSERT(graph_entry_ == nullptr && predecessor->IsGraphEntry());
2164 graph_entry_ = predecessor->AsGraphEntry();
2165 }
2166
2167 GraphEntryInstr* graph_entry_;
2168
2169 DISALLOW_COPY_AND_ASSIGN(FunctionEntryInstr);
2170};
2171
2172// Represents entry into a function from native code.
2173//
2174// Native entries are not allowed to have regular parameters. They should use
2175// NativeParameter instead (which doesn't count as an initial definition).
2176class NativeEntryInstr : public FunctionEntryInstr {
2177 public:
2178 NativeEntryInstr(const compiler::ffi::CallbackMarshaller& marshaller,
2179 GraphEntryInstr* graph_entry,
2180 intptr_t block_id,
2181 intptr_t try_index,
2182 intptr_t deopt_id)
2183 : FunctionEntryInstr(graph_entry, block_id, try_index, deopt_id),
2184 marshaller_(marshaller) {}
2185
2186 DECLARE_INSTRUCTION(NativeEntry)
2187
2188 PRINT_TO_SUPPORT
2189
2190#define FIELD_LIST(F) \
2191 F(const compiler::ffi::CallbackMarshaller&, marshaller_)
2192
2193 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeEntryInstr,
2194 FunctionEntryInstr,
2195 FIELD_LIST)
2196#undef FIELD_LIST
2197
2198 private:
2199 void SaveArguments(FlowGraphCompiler* compiler) const;
2200 void SaveArgument(FlowGraphCompiler* compiler,
2201 const compiler::ffi::NativeLocation& loc) const;
2202};
2203
2204// Represents an OSR entrypoint to a function.
2205//
2206// The OSR entry has it's own initial definitions.
2207class OsrEntryInstr : public BlockEntryWithInitialDefs {
2208 public:
2209 OsrEntryInstr(GraphEntryInstr* graph_entry,
2210 intptr_t block_id,
2211 intptr_t try_index,
2212 intptr_t deopt_id,
2213 intptr_t stack_depth)
2214 : BlockEntryWithInitialDefs(block_id, try_index, deopt_id, stack_depth),
2215 graph_entry_(graph_entry) {}
2216
2217 DECLARE_INSTRUCTION(OsrEntry)
2218
2219 virtual intptr_t PredecessorCount() const {
2220 return (graph_entry_ == nullptr) ? 0 : 1;
2221 }
2222 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
2223 ASSERT(index == 0 && graph_entry_ != nullptr);
2224 return graph_entry_;
2225 }
2226
2227 GraphEntryInstr* graph_entry() const { return graph_entry_; }
2228
2229 PRINT_TO_SUPPORT
2230 DECLARE_CUSTOM_SERIALIZATION(OsrEntryInstr)
2231
2232 private:
2233 virtual void ClearPredecessors() { graph_entry_ = nullptr; }
2234 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
2235 ASSERT(graph_entry_ == nullptr && predecessor->IsGraphEntry());
2236 graph_entry_ = predecessor->AsGraphEntry();
2237 }
2238
2239 GraphEntryInstr* graph_entry_;
2240
2241 DISALLOW_COPY_AND_ASSIGN(OsrEntryInstr);
2242};
2243
2244class IndirectEntryInstr : public JoinEntryInstr {
2245 public:
2246 IndirectEntryInstr(intptr_t block_id,
2247 intptr_t indirect_id,
2248 intptr_t try_index,
2249 intptr_t deopt_id)
2250 : JoinEntryInstr(block_id, try_index, deopt_id),
2251 indirect_id_(indirect_id) {}
2252
2253 DECLARE_INSTRUCTION(IndirectEntry)
2254
2255 intptr_t indirect_id() const { return indirect_id_; }
2256
2257 PRINT_TO_SUPPORT
2258
2259#define FIELD_LIST(F) F(const intptr_t, indirect_id_)
2260
2261 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(IndirectEntryInstr,
2262 JoinEntryInstr,
2263 FIELD_LIST)
2264#undef FIELD_LIST
2265};
2266
2267class CatchBlockEntryInstr : public BlockEntryWithInitialDefs {
2268 public:
2269 CatchBlockEntryInstr(bool is_generated,
2270 intptr_t block_id,
2271 intptr_t try_index,
2272 GraphEntryInstr* graph_entry,
2273 const Array& handler_types,
2274 intptr_t catch_try_index,
2275 bool needs_stacktrace,
2276 intptr_t deopt_id,
2277 const LocalVariable* exception_var,
2278 const LocalVariable* stacktrace_var,
2279 const LocalVariable* raw_exception_var,
2280 const LocalVariable* raw_stacktrace_var)
2281 : BlockEntryWithInitialDefs(block_id,
2282 try_index,
2283 deopt_id,
2284 /*stack_depth=*/0),
2285 graph_entry_(graph_entry),
2286 predecessor_(nullptr),
2287 catch_handler_types_(Array::ZoneHandle(ptr: handler_types.ptr())),
2288 catch_try_index_(catch_try_index),
2289 exception_var_(exception_var),
2290 stacktrace_var_(stacktrace_var),
2291 raw_exception_var_(raw_exception_var),
2292 raw_stacktrace_var_(raw_stacktrace_var),
2293 needs_stacktrace_(needs_stacktrace),
2294 is_generated_(is_generated) {}
2295
2296 DECLARE_INSTRUCTION(CatchBlockEntry)
2297
2298 virtual intptr_t PredecessorCount() const {
2299 return (predecessor_ == nullptr) ? 0 : 1;
2300 }
2301 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
2302 ASSERT((index == 0) && (predecessor_ != nullptr));
2303 return predecessor_;
2304 }
2305
2306 GraphEntryInstr* graph_entry() const { return graph_entry_; }
2307
2308 const LocalVariable* exception_var() const { return exception_var_; }
2309 const LocalVariable* stacktrace_var() const { return stacktrace_var_; }
2310
2311 const LocalVariable* raw_exception_var() const { return raw_exception_var_; }
2312 const LocalVariable* raw_stacktrace_var() const {
2313 return raw_stacktrace_var_;
2314 }
2315
2316 bool needs_stacktrace() const { return needs_stacktrace_; }
2317
2318 bool is_generated() const { return is_generated_; }
2319
2320 // Returns try index for the try block to which this catch handler
2321 // corresponds.
2322 intptr_t catch_try_index() const { return catch_try_index_; }
2323
2324 const Array& catch_handler_types() const { return catch_handler_types_; }
2325
2326 PRINT_TO_SUPPORT
2327 DECLARE_CUSTOM_SERIALIZATION(CatchBlockEntryInstr)
2328
2329 private:
2330 friend class BlockEntryInstr; // Access to predecessor_ when inlining.
2331
2332 virtual void ClearPredecessors() { predecessor_ = nullptr; }
2333 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
2334 ASSERT(predecessor_ == nullptr);
2335 predecessor_ = predecessor;
2336 }
2337
2338 GraphEntryInstr* graph_entry_;
2339 BlockEntryInstr* predecessor_;
2340 const Array& catch_handler_types_;
2341 const intptr_t catch_try_index_;
2342 const LocalVariable* exception_var_;
2343 const LocalVariable* stacktrace_var_;
2344 const LocalVariable* raw_exception_var_;
2345 const LocalVariable* raw_stacktrace_var_;
2346 const bool needs_stacktrace_;
2347 bool is_generated_;
2348
2349 DISALLOW_COPY_AND_ASSIGN(CatchBlockEntryInstr);
2350};
2351
2352// If the result of the allocation is not stored into any field, passed
2353// as an argument or used in a phi then it can't alias with any other
2354// SSA value.
2355class AliasIdentity : public ValueObject {
2356 public:
2357 // It is unknown if value has aliases.
2358 static AliasIdentity Unknown() { return AliasIdentity(kUnknown); }
2359
2360 // It is known that value can have aliases.
2361 static AliasIdentity Aliased() { return AliasIdentity(kAliased); }
2362
2363 // It is known that value has no aliases.
2364 static AliasIdentity NotAliased() { return AliasIdentity(kNotAliased); }
2365
2366 // It is known that value has no aliases and it was selected by
2367 // allocation sinking pass as a candidate.
2368 static AliasIdentity AllocationSinkingCandidate() {
2369 return AliasIdentity(kAllocationSinkingCandidate);
2370 }
2371
2372#define FOR_EACH_ALIAS_IDENTITY_VALUE(V) \
2373 V(Unknown, 0) \
2374 V(NotAliased, 1) \
2375 V(Aliased, 2) \
2376 V(AllocationSinkingCandidate, 3)
2377
2378 const char* ToCString() {
2379 switch (value_) {
2380#define VALUE_CASE(name, val) \
2381 case k##name: \
2382 return #name;
2383 FOR_EACH_ALIAS_IDENTITY_VALUE(VALUE_CASE)
2384#undef VALUE_CASE
2385 default:
2386 UNREACHABLE();
2387 return nullptr;
2388 }
2389 }
2390
2391 bool IsUnknown() const { return value_ == kUnknown; }
2392 bool IsAliased() const { return value_ == kAliased; }
2393 bool IsNotAliased() const { return (value_ & kNotAliased) != 0; }
2394 bool IsAllocationSinkingCandidate() const {
2395 return value_ == kAllocationSinkingCandidate;
2396 }
2397
2398 AliasIdentity(const AliasIdentity& other)
2399 : ValueObject(), value_(other.value_) {}
2400
2401 AliasIdentity& operator=(const AliasIdentity& other) {
2402 value_ = other.value_;
2403 return *this;
2404 }
2405
2406 void Write(FlowGraphSerializer* s) const;
2407 explicit AliasIdentity(FlowGraphDeserializer* d);
2408
2409 private:
2410 explicit AliasIdentity(intptr_t value) : value_(value) {}
2411
2412#define VALUE_DEFN(name, val) k##name = val,
2413 enum { FOR_EACH_ALIAS_IDENTITY_VALUE(VALUE_DEFN) };
2414#undef VALUE_DEFN
2415
2416// Undef the FOR_EACH helper macro, since the enum is private.
2417#undef FOR_EACH_ALIAS_IDENTITY_VALUE
2418
2419 COMPILE_ASSERT((kUnknown & kNotAliased) == 0);
2420 COMPILE_ASSERT((kAliased & kNotAliased) == 0);
2421 COMPILE_ASSERT((kAllocationSinkingCandidate & kNotAliased) != 0);
2422
2423 intptr_t value_;
2424};
2425
2426// Abstract super-class of all instructions that define a value (Bind, Phi).
2427class Definition : public Instruction {
2428 public:
2429 explicit Definition(intptr_t deopt_id = DeoptId::kNone)
2430 : Instruction(deopt_id) {}
2431
2432 explicit Definition(const InstructionSource& source,
2433 intptr_t deopt_id = DeoptId::kNone)
2434 : Instruction(source, deopt_id) {}
2435
2436 // Overridden by definitions that have call counts.
2437 virtual intptr_t CallCount() const { return -1; }
2438
2439 intptr_t temp_index() const { return temp_index_; }
2440 void set_temp_index(intptr_t index) { temp_index_ = index; }
2441 void ClearTempIndex() { temp_index_ = -1; }
2442 bool HasTemp() const { return temp_index_ >= 0; }
2443
2444 intptr_t ssa_temp_index() const { return ssa_temp_index_; }
2445 void set_ssa_temp_index(intptr_t index) {
2446 ASSERT(index >= 0);
2447 ssa_temp_index_ = index;
2448 }
2449 bool HasSSATemp() const { return ssa_temp_index_ >= 0; }
2450 void ClearSSATempIndex() { ssa_temp_index_ = -1; }
2451
2452 intptr_t vreg(intptr_t index) const {
2453 ASSERT((index >= 0) && (index < location_count()));
2454 if (ssa_temp_index_ == -1) return -1;
2455 return ssa_temp_index_ * kMaxLocationCount + index;
2456 }
2457 intptr_t location_count() const { return LocationCount(rep: representation()); }
2458 bool HasPairRepresentation() const { return location_count() == 2; }
2459
2460 // Compile time type of the definition, which may be requested before type
2461 // propagation during graph building.
2462 CompileType* Type() {
2463 if (type_ == nullptr) {
2464 auto type = new CompileType(ComputeType());
2465 type->set_owner(this);
2466 set_type(type);
2467 }
2468 return type_;
2469 }
2470
2471 bool HasType() const { return (type_ != nullptr); }
2472
2473 inline bool IsInt64Definition();
2474
2475 bool IsInt32Definition() {
2476 return IsBinaryInt32Op() || IsBoxInt32() || IsUnboxInt32() ||
2477 IsIntConverter();
2478 }
2479
2480 // Compute compile type for this definition. It is safe to use this
2481 // approximation even before type propagator was run (e.g. during graph
2482 // building).
2483 virtual CompileType ComputeType() const { return CompileType::Dynamic(); }
2484
2485 // Update CompileType of the definition. Returns true if the type has changed.
2486 virtual bool RecomputeType() { return false; }
2487
2488 PRINT_OPERANDS_TO_SUPPORT
2489 PRINT_TO_SUPPORT
2490
2491 bool UpdateType(CompileType new_type) {
2492 if (type_ == nullptr) {
2493 auto type = new CompileType(new_type);
2494 type->set_owner(this);
2495 set_type(type);
2496 return true;
2497 }
2498
2499 if (type_->IsNone() || !type_->IsEqualTo(other: &new_type)) {
2500 *type_ = new_type;
2501 return true;
2502 }
2503
2504 return false;
2505 }
2506
2507 bool HasUses() const {
2508 return (input_use_list_ != nullptr) || (env_use_list_ != nullptr);
2509 }
2510 bool HasOnlyUse(Value* use) const;
2511 bool HasOnlyInputUse(Value* use) const;
2512
2513 Value* input_use_list() const { return input_use_list_; }
2514 void set_input_use_list(Value* head) { input_use_list_ = head; }
2515
2516 Value* env_use_list() const { return env_use_list_; }
2517 void set_env_use_list(Value* head) { env_use_list_ = head; }
2518
2519 ValueListIterable input_uses() const {
2520 return ValueListIterable(input_use_list_);
2521 }
2522
2523 void AddInputUse(Value* value) { Value::AddToList(value, list: &input_use_list_); }
2524 void AddEnvUse(Value* value) { Value::AddToList(value, list: &env_use_list_); }
2525
2526 // Replace uses of this definition with uses of other definition or value.
2527 // Precondition: use lists must be properly calculated.
2528 // Postcondition: use lists and use values are still valid.
2529 void ReplaceUsesWith(Definition* other);
2530
2531 // Replace this definition with another instruction. Use the provided result
2532 // definition to replace uses of the original definition. If replacing during
2533 // iteration, pass the iterator so that the instruction can be replaced
2534 // without affecting iteration order, otherwise pass a nullptr iterator.
2535 void ReplaceWithResult(Instruction* replacement,
2536 Definition* replacement_for_uses,
2537 ForwardInstructionIterator* iterator);
2538
2539 // Replace this definition and all uses with another definition. If
2540 // replacing during iteration, pass the iterator so that the instruction
2541 // can be replaced without affecting iteration order, otherwise pass a
2542 // nullptr iterator.
2543 void ReplaceWith(Definition* other, ForwardInstructionIterator* iterator);
2544
2545 // A value in the constant propagation lattice.
2546 // - non-constant sentinel
2547 // - a constant (any non-sentinel value)
2548 // - unknown sentinel
2549 Object& constant_value();
2550
2551 virtual void InferRange(RangeAnalysis* analysis, Range* range);
2552
2553 Range* range() const { return range_; }
2554 void set_range(const Range&);
2555
2556 // Definitions can be canonicalized only into definitions to ensure
2557 // this check statically we override base Canonicalize with a Canonicalize
2558 // returning Definition (return type is covariant).
2559 virtual Definition* Canonicalize(FlowGraph* flow_graph);
2560
2561 static constexpr intptr_t kReplacementMarker = -2;
2562
2563 Definition* Replacement() {
2564 if (ssa_temp_index_ == kReplacementMarker) {
2565 return reinterpret_cast<Definition*>(temp_index_);
2566 }
2567 return this;
2568 }
2569
2570 void SetReplacement(Definition* other) {
2571 ASSERT(ssa_temp_index_ >= 0);
2572 ASSERT(WasEliminated());
2573 ssa_temp_index_ = kReplacementMarker;
2574 temp_index_ = reinterpret_cast<intptr_t>(other);
2575 }
2576
2577 virtual AliasIdentity Identity() const { return AliasIdentity::Unknown(); }
2578
2579 virtual void SetIdentity(AliasIdentity identity) { UNREACHABLE(); }
2580
2581 // Find the original definition of [this] by following through any
2582 // redefinition and check instructions.
2583 Definition* OriginalDefinition();
2584
2585 // If this definition is a redefinition (in a broad sense, this includes
2586 // CheckArrayBound and CheckNull instructions) return [Value] corresponding
2587 // to the input which is being redefined.
2588 // Otherwise return [nullptr].
2589 virtual Value* RedefinedValue() const;
2590
2591 // Find the original definition of [this].
2592 //
2593 // This is an extension of [OriginalDefinition] which also follows through any
2594 // boxing/unboxing and constraint instructions.
2595 Definition* OriginalDefinitionIgnoreBoxingAndConstraints();
2596
2597 // Helper method to determine if definition denotes an array length.
2598 static bool IsArrayLength(Definition* def);
2599
2600 virtual Definition* AsDefinition() { return this; }
2601 virtual const Definition* AsDefinition() const { return this; }
2602
2603 DECLARE_CUSTOM_SERIALIZATION(Definition)
2604
2605 protected:
2606 friend class RangeAnalysis;
2607 friend class Value;
2608
2609 Range* range_ = nullptr;
2610
2611 void set_type(CompileType* type) {
2612 ASSERT(type->owner() == this);
2613 type_ = type;
2614 }
2615
2616#if defined(INCLUDE_IL_PRINTER)
2617 const char* TypeAsCString() const {
2618 return HasType() ? type_->ToCString() : "";
2619 }
2620#endif
2621
2622 private:
2623 intptr_t temp_index_ = -1;
2624 intptr_t ssa_temp_index_ = -1;
2625 Value* input_use_list_ = nullptr;
2626 Value* env_use_list_ = nullptr;
2627
2628 Object* constant_value_ = nullptr;
2629 CompileType* type_ = nullptr;
2630
2631 DISALLOW_COPY_AND_ASSIGN(Definition);
2632};
2633
2634// Change a value's definition after use lists have been computed.
2635inline void Value::BindTo(Definition* def) {
2636 RemoveFromUseList();
2637 set_definition(def);
2638 def->AddInputUse(value: this);
2639}
2640
2641inline void Value::BindToEnvironment(Definition* def) {
2642 RemoveFromUseList();
2643 set_definition(def);
2644 def->AddEnvUse(value: this);
2645}
2646
2647class PureDefinition : public Definition {
2648 public:
2649 explicit PureDefinition(intptr_t deopt_id) : Definition(deopt_id) {}
2650 explicit PureDefinition(const InstructionSource& source, intptr_t deopt_id)
2651 : Definition(source, deopt_id) {}
2652
2653 virtual bool AllowsCSE() const { return true; }
2654 virtual bool HasUnknownSideEffects() const { return false; }
2655
2656 DECLARE_EMPTY_SERIALIZATION(PureDefinition, Definition)
2657};
2658
2659template <intptr_t N,
2660 typename ThrowsTrait,
2661 template <typename Impure, typename Pure> class CSETrait = NoCSE>
2662class TemplateDefinition : public CSETrait<Definition, PureDefinition>::Base {
2663 public:
2664 using BaseClass = typename CSETrait<Definition, PureDefinition>::Base;
2665
2666 explicit TemplateDefinition(intptr_t deopt_id = DeoptId::kNone)
2667 : BaseClass(deopt_id), inputs_() {}
2668 TemplateDefinition(const InstructionSource& source,
2669 intptr_t deopt_id = DeoptId::kNone)
2670 : BaseClass(source, deopt_id), inputs_() {}
2671
2672 virtual intptr_t InputCount() const { return N; }
2673 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
2674
2675 virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
2676
2677 DECLARE_EMPTY_SERIALIZATION(TemplateDefinition, BaseClass)
2678 protected:
2679 EmbeddedArray<Value*, N> inputs_;
2680
2681 private:
2682 friend class BranchInstr;
2683 friend class IfThenElseInstr;
2684
2685 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
2686};
2687
2688class VariadicDefinition : public Definition {
2689 public:
2690 explicit VariadicDefinition(InputsArray&& inputs,
2691 intptr_t deopt_id = DeoptId::kNone)
2692 : Definition(deopt_id), inputs_(std::move(inputs)) {
2693 for (intptr_t i = 0, n = inputs_.length(); i < n; ++i) {
2694 SetInputAt(i, inputs_[i]);
2695 }
2696 }
2697 VariadicDefinition(InputsArray&& inputs,
2698 const InstructionSource& source,
2699 intptr_t deopt_id = DeoptId::kNone)
2700 : Definition(source, deopt_id), inputs_(std::move(inputs)) {
2701 for (intptr_t i = 0, n = inputs_.length(); i < n; ++i) {
2702 SetInputAt(i, inputs_[i]);
2703 }
2704 }
2705 explicit VariadicDefinition(const intptr_t num_inputs,
2706 intptr_t deopt_id = DeoptId::kNone)
2707 : Definition(deopt_id), inputs_(num_inputs) {
2708 inputs_.EnsureLength(num_inputs, nullptr);
2709 }
2710 VariadicDefinition(const intptr_t num_inputs,
2711 const InstructionSource& source,
2712 intptr_t deopt_id = DeoptId::kNone)
2713 : Definition(source, deopt_id), inputs_(num_inputs) {
2714 inputs_.EnsureLength(num_inputs, nullptr);
2715 }
2716
2717 intptr_t InputCount() const { return inputs_.length(); }
2718 Value* InputAt(intptr_t i) const { return inputs_[i]; }
2719
2720 DECLARE_CUSTOM_SERIALIZATION(VariadicDefinition)
2721
2722 protected:
2723 InputsArray inputs_;
2724
2725 private:
2726 void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
2727};
2728
2729class PhiInstr : public VariadicDefinition {
2730 public:
2731 PhiInstr(JoinEntryInstr* block, intptr_t num_inputs)
2732 : VariadicDefinition(num_inputs),
2733 block_(block),
2734 representation_(kTagged),
2735 is_alive_(false),
2736 is_receiver_(kUnknownReceiver) {}
2737
2738 // Get the block entry for that instruction.
2739 virtual BlockEntryInstr* GetBlock() { return block(); }
2740 JoinEntryInstr* block() const { return block_; }
2741
2742 virtual CompileType ComputeType() const;
2743 virtual bool RecomputeType();
2744
2745 virtual bool ComputeCanDeoptimize() const { return false; }
2746
2747 virtual bool HasUnknownSideEffects() const { return false; }
2748
2749 // Phi is alive if it reaches a non-environment use.
2750 bool is_alive() const { return is_alive_; }
2751 void mark_alive() { is_alive_ = true; }
2752 void mark_dead() { is_alive_ = false; }
2753
2754 virtual Representation RequiredInputRepresentation(intptr_t i) const {
2755 return representation_;
2756 }
2757
2758 virtual Representation representation() const { return representation_; }
2759
2760 virtual void set_representation(Representation r) { representation_ = r; }
2761
2762 // Only Int32 phis in JIT mode are unboxed optimistically.
2763 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
2764 return (CompilerState::Current().is_aot() ||
2765 (representation_ != kUnboxedInt32))
2766 ? kNotSpeculative
2767 : kGuardInputs;
2768 }
2769
2770 virtual uword Hash() const {
2771 UNREACHABLE();
2772 return 0;
2773 }
2774
2775 DECLARE_INSTRUCTION(Phi)
2776
2777 virtual void InferRange(RangeAnalysis* analysis, Range* range);
2778
2779 BitVector* reaching_defs() const { return reaching_defs_; }
2780
2781 void set_reaching_defs(BitVector* reaching_defs) {
2782 reaching_defs_ = reaching_defs;
2783 }
2784
2785 virtual bool MayThrow() const { return false; }
2786
2787 // A phi is redundant if all input operands are the same.
2788 bool IsRedundant() const;
2789
2790 // A phi is redundant if all input operands are redefinitions of the same
2791 // value. Returns the replacement for this phi if it is redundant.
2792 // The replacement is selected among values redefined by inputs.
2793 Definition* GetReplacementForRedundantPhi() const;
2794
2795 virtual Definition* Canonicalize(FlowGraph* flow_graph);
2796
2797 PRINT_TO_SUPPORT
2798 DECLARE_CUSTOM_SERIALIZATION(PhiInstr)
2799
2800 enum ReceiverType { kUnknownReceiver = -1, kNotReceiver = 0, kReceiver = 1 };
2801
2802 ReceiverType is_receiver() const {
2803 return static_cast<ReceiverType>(is_receiver_);
2804 }
2805
2806 void set_is_receiver(ReceiverType is_receiver) { is_receiver_ = is_receiver; }
2807
2808 private:
2809 // Direct access to inputs_ in order to resize it due to unreachable
2810 // predecessors.
2811 friend class ConstantPropagator;
2812
2813 JoinEntryInstr* block_;
2814 Representation representation_;
2815 BitVector* reaching_defs_ = nullptr;
2816 bool is_alive_;
2817 int8_t is_receiver_;
2818
2819 DISALLOW_COPY_AND_ASSIGN(PhiInstr);
2820};
2821
2822// This instruction represents an incoming parameter for a function entry,
2823// or incoming value for OSR entry or incoming value for a catch entry.
2824// [env_index] is a position of the parameter in the flow graph environment.
2825// [param_index] is a position of the function parameter, or -1 if
2826// this instruction doesn't correspond to a real function parameter.
2827class ParameterInstr : public TemplateDefinition<0, NoThrow> {
2828 public:
2829 // [param_index] when ParameterInstr doesn't correspond to
2830 // a function parameter.
2831 static constexpr intptr_t kNotFunctionParameter = -1;
2832
2833 ParameterInstr(intptr_t env_index,
2834 intptr_t param_index,
2835 intptr_t param_offset,
2836 BlockEntryInstr* block,
2837 Representation representation,
2838 Register base_reg = FPREG)
2839 : env_index_(env_index),
2840 param_index_(param_index),
2841 param_offset_(param_offset),
2842 base_reg_(base_reg),
2843 representation_(representation),
2844 block_(block) {}
2845
2846 DECLARE_INSTRUCTION(Parameter)
2847 DECLARE_ATTRIBUTES(index())
2848
2849 // Index of the parameter in the flow graph environment.
2850 intptr_t env_index() const { return env_index_; }
2851 intptr_t index() const { return env_index(); }
2852
2853 // Index of the real function parameter
2854 // (between 0 and function.NumParameters()), or -1.
2855 intptr_t param_index() const { return param_index_; }
2856
2857 intptr_t param_offset() const { return param_offset_; }
2858 Register base_reg() const { return base_reg_; }
2859
2860 // Get the block entry for that instruction.
2861 virtual BlockEntryInstr* GetBlock() { return block_; }
2862 void set_block(BlockEntryInstr* block) { block_ = block; }
2863
2864 virtual Representation representation() const { return representation_; }
2865
2866 virtual Representation RequiredInputRepresentation(intptr_t index) const {
2867 UNREACHABLE();
2868 return kTagged;
2869 }
2870
2871 virtual bool ComputeCanDeoptimize() const { return false; }
2872
2873 virtual bool HasUnknownSideEffects() const { return false; }
2874
2875 virtual uword Hash() const {
2876 UNREACHABLE();
2877 return 0;
2878 }
2879
2880 virtual CompileType ComputeType() const;
2881
2882 PRINT_OPERANDS_TO_SUPPORT
2883
2884#define FIELD_LIST(F) \
2885 F(const intptr_t, env_index_) \
2886 F(const intptr_t, param_index_) \
2887 /* The offset (in words) of the last slot of the parameter, relative */ \
2888 /* to the first parameter. */ \
2889 /* It is used in the FlowGraphAllocator when it sets the assigned */ \
2890 /* location and spill slot for the parameter definition. */ \
2891 F(const intptr_t, param_offset_) \
2892 F(const Register, base_reg_) \
2893 F(const Representation, representation_)
2894
2895 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ParameterInstr,
2896 TemplateDefinition,
2897 FIELD_LIST)
2898#undef FIELD_LIST
2899
2900 private:
2901 BlockEntryInstr* block_ = nullptr;
2902
2903 DISALLOW_COPY_AND_ASSIGN(ParameterInstr);
2904};
2905
2906// Native parameters are not treated as initial definitions because they cannot
2907// be inlined and are only usable in optimized code. The location must be a
2908// stack location relative to the position of the stack (SPREG) after
2909// register-based arguments have been saved on entry to a native call. See
2910// NativeEntryInstr::EmitNativeCode for more details.
2911//
2912// TOOD(33549): Unify with ParameterInstr.
2913class NativeParameterInstr : public TemplateDefinition<0, NoThrow> {
2914 public:
2915 NativeParameterInstr(const compiler::ffi::CallbackMarshaller& marshaller,
2916 intptr_t def_index)
2917 : marshaller_(marshaller), def_index_(def_index) {}
2918
2919 DECLARE_INSTRUCTION(NativeParameter)
2920
2921 virtual Representation representation() const {
2922 return marshaller_.RepInFfiCall(def_index_global: def_index_);
2923 }
2924
2925 virtual bool ComputeCanDeoptimize() const { return false; }
2926
2927 virtual bool HasUnknownSideEffects() const { return false; }
2928
2929 // TODO(sjindel): We can make this more precise.
2930 virtual CompileType ComputeType() const { return CompileType::Dynamic(); }
2931
2932 PRINT_OPERANDS_TO_SUPPORT
2933
2934#define FIELD_LIST(F) \
2935 F(const compiler::ffi::CallbackMarshaller&, marshaller_) \
2936 F(const intptr_t, def_index_)
2937
2938 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeParameterInstr,
2939 TemplateDefinition,
2940 FIELD_LIST)
2941#undef FIELD_LIST
2942
2943 private:
2944 DISALLOW_COPY_AND_ASSIGN(NativeParameterInstr);
2945};
2946
2947// Stores a tagged pointer to a slot accessible from a fixed register. It has
2948// the form:
2949//
2950// base_reg[index + #constant] = value
2951//
2952// Input 0: A tagged Smi [index]
2953// Input 1: A tagged pointer [value]
2954// offset: A signed constant offset which fits into 8 bits
2955//
2956// Currently this instruction uses pinpoints the register to be FP.
2957//
2958// This low-level instruction is non-inlinable since it makes assumptions about
2959// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
2960class StoreIndexedUnsafeInstr : public TemplateInstruction<2, NoThrow> {
2961 public:
2962 StoreIndexedUnsafeInstr(Value* index, Value* value, intptr_t offset)
2963 : offset_(offset) {
2964 SetInputAt(i: kIndexPos, value: index);
2965 SetInputAt(i: kValuePos, value);
2966 }
2967
2968 enum { kIndexPos = 0, kValuePos = 1 };
2969
2970 DECLARE_INSTRUCTION(StoreIndexedUnsafe)
2971
2972 virtual Representation RequiredInputRepresentation(intptr_t index) const {
2973 ASSERT(index == kIndexPos || index == kValuePos);
2974 return kTagged;
2975 }
2976 virtual bool ComputeCanDeoptimize() const { return false; }
2977 virtual bool HasUnknownSideEffects() const { return false; }
2978
2979 virtual bool AttributesEqual(const Instruction& other) const {
2980 return other.AsStoreIndexedUnsafe()->offset() == offset();
2981 }
2982
2983 Value* index() const { return inputs_[kIndexPos]; }
2984 Value* value() const { return inputs_[kValuePos]; }
2985 Register base_reg() const { return FPREG; }
2986 intptr_t offset() const { return offset_; }
2987
2988 PRINT_OPERANDS_TO_SUPPORT
2989
2990#define FIELD_LIST(F) F(const intptr_t, offset_)
2991
2992 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreIndexedUnsafeInstr,
2993 TemplateInstruction,
2994 FIELD_LIST)
2995#undef FIELD_LIST
2996
2997 private:
2998 DISALLOW_COPY_AND_ASSIGN(StoreIndexedUnsafeInstr);
2999};
3000
3001// Loads a value from slot accessable from a fixed register. It has
3002// the form:
3003//
3004// base_reg[index + #constant]
3005//
3006// Input 0: A tagged Smi [index]
3007// offset: A signed constant offset which fits into 8 bits
3008//
3009// Currently this instruction uses pinpoints the register to be FP.
3010//
3011// This lowlevel instruction is non-inlinable since it makes assumptions about
3012// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
3013class LoadIndexedUnsafeInstr : public TemplateDefinition<1, NoThrow> {
3014 public:
3015 LoadIndexedUnsafeInstr(Value* index,
3016 intptr_t offset,
3017 CompileType result_type,
3018 Representation representation = kTagged)
3019 : offset_(offset), representation_(representation) {
3020 UpdateType(new_type: result_type);
3021 SetInputAt(i: 0, value: index);
3022 }
3023
3024 DECLARE_INSTRUCTION(LoadIndexedUnsafe)
3025
3026 virtual Representation RequiredInputRepresentation(intptr_t index) const {
3027 ASSERT(index == 0);
3028 return kTagged;
3029 }
3030 virtual bool ComputeCanDeoptimize() const { return false; }
3031 virtual bool HasUnknownSideEffects() const { return false; }
3032
3033 virtual bool AttributesEqual(const Instruction& other) const {
3034 return other.AsLoadIndexedUnsafe()->offset() == offset();
3035 }
3036
3037 virtual Representation representation() const { return representation_; }
3038
3039 Value* index() const { return InputAt(i: 0); }
3040 Register base_reg() const { return FPREG; }
3041 intptr_t offset() const { return offset_; }
3042
3043 PRINT_OPERANDS_TO_SUPPORT
3044
3045#define FIELD_LIST(F) \
3046 F(const intptr_t, offset_) \
3047 F(const Representation, representation_)
3048
3049 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadIndexedUnsafeInstr,
3050 TemplateDefinition,
3051 FIELD_LIST)
3052#undef FIELD_LIST
3053
3054 private:
3055 DISALLOW_COPY_AND_ASSIGN(LoadIndexedUnsafeInstr);
3056};
3057
3058class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
3059 public:
3060 MemoryCopyInstr(Value* src,
3061 Value* dest,
3062 Value* src_start,
3063 Value* dest_start,
3064 Value* length,
3065 classid_t src_cid,
3066 classid_t dest_cid,
3067 bool unboxed_length)
3068 : src_cid_(src_cid),
3069 dest_cid_(dest_cid),
3070 element_size_(Instance::ElementSizeFor(cid: src_cid)),
3071 unboxed_length_(unboxed_length) {
3072 ASSERT(IsArrayTypeSupported(src_cid));
3073 ASSERT(IsArrayTypeSupported(dest_cid));
3074 ASSERT(Instance::ElementSizeFor(src_cid) ==
3075 Instance::ElementSizeFor(dest_cid));
3076 SetInputAt(i: kSrcPos, value: src);
3077 SetInputAt(i: kDestPos, value: dest);
3078 SetInputAt(i: kSrcStartPos, value: src_start);
3079 SetInputAt(i: kDestStartPos, value: dest_start);
3080 SetInputAt(i: kLengthPos, value: length);
3081 }
3082
3083 enum {
3084 kSrcPos = 0,
3085 kDestPos = 1,
3086 kSrcStartPos = 2,
3087 kDestStartPos = 3,
3088 kLengthPos = 4
3089 };
3090
3091 DECLARE_INSTRUCTION(MemoryCopy)
3092
3093 virtual Representation RequiredInputRepresentation(intptr_t index) const {
3094 if (index == kLengthPos && unboxed_length_) {
3095 return kUnboxedIntPtr;
3096 }
3097 // All inputs are tagged (for now).
3098 return kTagged;
3099 }
3100
3101 virtual bool ComputeCanDeoptimize() const { return false; }
3102 virtual bool HasUnknownSideEffects() const { return true; }
3103
3104 virtual bool AttributesEqual(const Instruction& other) const { return true; }
3105
3106 Value* src() const { return inputs_[kSrcPos]; }
3107 Value* dest() const { return inputs_[kDestPos]; }
3108 Value* src_start() const { return inputs_[kSrcStartPos]; }
3109 Value* dest_start() const { return inputs_[kDestStartPos]; }
3110 Value* length() const { return inputs_[kLengthPos]; }
3111
3112 intptr_t element_size() const { return element_size_; }
3113 bool unboxed_length() const { return unboxed_length_; }
3114
3115 // Optimizes MemoryCopyInstr with constant parameters to use larger moves.
3116 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
3117
3118#define FIELD_LIST(F) \
3119 F(classid_t, src_cid_) \
3120 F(classid_t, dest_cid_) \
3121 F(intptr_t, element_size_) \
3122 F(bool, unboxed_length_)
3123
3124 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MemoryCopyInstr,
3125 TemplateInstruction,
3126 FIELD_LIST)
3127#undef FIELD_LIST
3128
3129 private:
3130 // Set array_reg to point to the index indicated by start (contained in
3131 // start_loc) of the typed data or string in array (contained in array_reg).
3132 void EmitComputeStartPointer(FlowGraphCompiler* compiler,
3133 classid_t array_cid,
3134 Register array_reg,
3135 Location start_loc);
3136
3137 static bool IsArrayTypeSupported(classid_t array_cid) {
3138 if (IsTypedDataBaseClassId(index: array_cid)) {
3139 return true;
3140 }
3141 switch (array_cid) {
3142 case kOneByteStringCid:
3143 case kTwoByteStringCid:
3144 case kExternalOneByteStringCid:
3145 case kExternalTwoByteStringCid:
3146 return true;
3147 default:
3148 return false;
3149 }
3150 }
3151
3152 DISALLOW_COPY_AND_ASSIGN(MemoryCopyInstr);
3153};
3154
3155// Unwinds the current frame and tail calls a target.
3156//
3157// The return address saved by the original caller of this frame will be in it's
3158// usual location (stack or LR). The arguments descriptor supplied by the
3159// original caller will be put into ARGS_DESC_REG.
3160//
3161// This lowlevel instruction is non-inlinable since it makes assumptions about
3162// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
3163class TailCallInstr : public TemplateInstruction<1, Throws, Pure> {
3164 public:
3165 TailCallInstr(const Code& code, Value* arg_desc) : code_(code) {
3166 SetInputAt(i: 0, value: arg_desc);
3167 }
3168
3169 DECLARE_INSTRUCTION(TailCall)
3170
3171 const Code& code() const { return code_; }
3172
3173 // Two tailcalls can be canonicalized into one instruction if both have the
3174 // same destination.
3175 virtual bool AttributesEqual(const Instruction& other) const {
3176 return &other.AsTailCall()->code() == &code();
3177 }
3178
3179 // Since no code after this instruction will be executed, there will be no
3180 // side-effects for the following code.
3181 virtual bool HasUnknownSideEffects() const { return false; }
3182 virtual bool ComputeCanDeoptimize() const { return false; }
3183
3184 PRINT_OPERANDS_TO_SUPPORT
3185
3186#define FIELD_LIST(F) F(const Code&, code_)
3187
3188 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(TailCallInstr,
3189 TemplateInstruction,
3190 FIELD_LIST)
3191#undef FIELD_LIST
3192
3193 private:
3194 DISALLOW_COPY_AND_ASSIGN(TailCallInstr);
3195};
3196
3197// Move the given argument value into the place where callee expects it.
3198// Currently all outgoing arguments are located in [SP+idx]
3199class MoveArgumentInstr : public TemplateDefinition<1, NoThrow> {
3200 public:
3201 explicit MoveArgumentInstr(Value* value,
3202 Representation representation,
3203 intptr_t sp_relative_index)
3204 : representation_(representation), sp_relative_index_(sp_relative_index) {
3205 SetInputAt(i: 0, value);
3206 }
3207
3208 DECLARE_INSTRUCTION(MoveArgument)
3209
3210 intptr_t sp_relative_index() const { return sp_relative_index_; }
3211
3212 virtual CompileType ComputeType() const;
3213
3214 Value* value() const { return InputAt(i: 0); }
3215
3216 virtual bool ComputeCanDeoptimize() const { return false; }
3217
3218 virtual bool HasUnknownSideEffects() const { return false; }
3219
3220 virtual TokenPosition token_pos() const {
3221 return TokenPosition::kMoveArgument;
3222 }
3223
3224 virtual Representation representation() const { return representation_; }
3225
3226 virtual Representation RequiredInputRepresentation(intptr_t index) const {
3227 ASSERT(index == 0);
3228 return representation();
3229 }
3230
3231 PRINT_OPERANDS_TO_SUPPORT
3232
3233#define FIELD_LIST(F) \
3234 F(const Representation, representation_) \
3235 F(const intptr_t, sp_relative_index_)
3236
3237 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MoveArgumentInstr,
3238 TemplateDefinition,
3239 FIELD_LIST)
3240#undef FIELD_LIST
3241
3242 private:
3243 DISALLOW_COPY_AND_ASSIGN(MoveArgumentInstr);
3244};
3245
3246inline Value* Instruction::ArgumentValueAt(intptr_t index) const {
3247 MoveArgumentsArray* move_arguments = GetMoveArguments();
3248 return move_arguments != nullptr ? (*move_arguments)[index]->value()
3249 : InputAt(i: index);
3250}
3251
3252inline Definition* Instruction::ArgumentAt(intptr_t index) const {
3253 return ArgumentValueAt(index)->definition();
3254}
3255
3256class ReturnInstr : public TemplateInstruction<1, NoThrow> {
3257 public:
3258 ReturnInstr(const InstructionSource& source,
3259 Value* value,
3260 intptr_t deopt_id,
3261 Representation representation = kTagged)
3262 : TemplateInstruction(source, deopt_id),
3263 token_pos_(source.token_pos),
3264 representation_(representation) {
3265 SetInputAt(i: 0, value);
3266 }
3267
3268 DECLARE_INSTRUCTION(Return)
3269
3270 virtual TokenPosition token_pos() const { return token_pos_; }
3271 Value* value() const { return inputs_[0]; }
3272
3273 virtual bool CanBecomeDeoptimizationTarget() const {
3274 // Return instruction might turn into a Goto instruction after inlining.
3275 // Every Goto must have an environment.
3276 return true;
3277 }
3278
3279 virtual bool ComputeCanDeoptimize() const { return false; }
3280
3281 virtual bool HasUnknownSideEffects() const { return false; }
3282
3283 virtual bool AttributesEqual(const Instruction& other) const {
3284 auto const other_return = other.AsReturn();
3285 return token_pos() == other_return->token_pos();
3286 }
3287
3288 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
3289 ASSERT(index == 0);
3290 return kNotSpeculative;
3291 }
3292
3293 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
3294
3295 virtual Representation representation() const { return representation_; }
3296
3297 virtual Representation RequiredInputRepresentation(intptr_t index) const {
3298 ASSERT(index == 0);
3299 return representation_;
3300 }
3301
3302#define FIELD_LIST(F) \
3303 F(const TokenPosition, token_pos_) \
3304 F(const Representation, representation_)
3305
3306 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ReturnInstr,
3307 TemplateInstruction,
3308 FIELD_LIST)
3309#undef FIELD_LIST
3310
3311 private:
3312 const Code& GetReturnStub(FlowGraphCompiler* compiler) const;
3313
3314 DISALLOW_COPY_AND_ASSIGN(ReturnInstr);
3315};
3316
3317// Represents a return from a Dart function into native code.
3318class NativeReturnInstr : public ReturnInstr {
3319 public:
3320 NativeReturnInstr(const InstructionSource& source,
3321 Value* value,
3322 const compiler::ffi::CallbackMarshaller& marshaller,
3323 intptr_t deopt_id)
3324 : ReturnInstr(source, value, deopt_id), marshaller_(marshaller) {}
3325
3326 DECLARE_INSTRUCTION(NativeReturn)
3327
3328 PRINT_OPERANDS_TO_SUPPORT
3329
3330 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
3331 ASSERT(idx == 0);
3332 return marshaller_.RepInFfiCall(def_index_global: compiler::ffi::kResultIndex);
3333 }
3334
3335 virtual bool CanBecomeDeoptimizationTarget() const {
3336 // Unlike ReturnInstr, NativeReturnInstr cannot be inlined (because it's
3337 // returning into native code).
3338 return false;
3339 }
3340
3341#define FIELD_LIST(F) F(const compiler::ffi::CallbackMarshaller&, marshaller_)
3342
3343 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeReturnInstr,
3344 ReturnInstr,
3345 FIELD_LIST)
3346#undef FIELD_LIST
3347
3348 private:
3349 void EmitReturnMoves(FlowGraphCompiler* compiler);
3350
3351 DISALLOW_COPY_AND_ASSIGN(NativeReturnInstr);
3352};
3353
3354class ThrowInstr : public TemplateInstruction<1, Throws> {
3355 public:
3356 explicit ThrowInstr(const InstructionSource& source,
3357 intptr_t deopt_id,
3358 Value* exception)
3359 : TemplateInstruction(source, deopt_id), token_pos_(source.token_pos) {
3360 SetInputAt(i: 0, value: exception);
3361 }
3362
3363 DECLARE_INSTRUCTION(Throw)
3364
3365 virtual TokenPosition token_pos() const { return token_pos_; }
3366 Value* exception() const { return inputs_[0]; }
3367
3368 virtual bool ComputeCanDeoptimize() const {
3369 return !CompilerState::Current().is_aot();
3370 }
3371
3372 virtual bool HasUnknownSideEffects() const { return false; }
3373
3374#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
3375
3376 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ThrowInstr,
3377 TemplateInstruction,
3378 FIELD_LIST)
3379#undef FIELD_LIST
3380
3381 private:
3382 DISALLOW_COPY_AND_ASSIGN(ThrowInstr);
3383};
3384
3385class ReThrowInstr : public TemplateInstruction<2, Throws> {
3386 public:
3387 // 'catch_try_index' can be kInvalidTryIndex if the
3388 // rethrow has been artificially generated by the parser.
3389 ReThrowInstr(const InstructionSource& source,
3390 intptr_t catch_try_index,
3391 intptr_t deopt_id,
3392 Value* exception,
3393 Value* stacktrace)
3394 : TemplateInstruction(source, deopt_id),
3395 token_pos_(source.token_pos),
3396 catch_try_index_(catch_try_index) {
3397 SetInputAt(i: 0, value: exception);
3398 SetInputAt(i: 1, value: stacktrace);
3399 }
3400
3401 DECLARE_INSTRUCTION(ReThrow)
3402
3403 virtual TokenPosition token_pos() const { return token_pos_; }
3404 intptr_t catch_try_index() const { return catch_try_index_; }
3405 Value* exception() const { return inputs_[0]; }
3406 Value* stacktrace() const { return inputs_[1]; }
3407
3408 virtual bool ComputeCanDeoptimize() const {
3409 return !CompilerState::Current().is_aot();
3410 }
3411
3412 virtual bool HasUnknownSideEffects() const { return false; }
3413
3414#define FIELD_LIST(F) \
3415 F(const TokenPosition, token_pos_) \
3416 F(const intptr_t, catch_try_index_)
3417
3418 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ReThrowInstr,
3419 TemplateInstruction,
3420 FIELD_LIST)
3421#undef FIELD_LIST
3422
3423 private:
3424 DISALLOW_COPY_AND_ASSIGN(ReThrowInstr);
3425};
3426
3427class StopInstr : public TemplateInstruction<0, NoThrow> {
3428 public:
3429 explicit StopInstr(const char* message) : message_(message) {
3430 ASSERT(message != nullptr);
3431 }
3432
3433 const char* message() const { return message_; }
3434
3435 DECLARE_INSTRUCTION(Stop);
3436
3437 virtual bool ComputeCanDeoptimize() const { return false; }
3438
3439 virtual bool HasUnknownSideEffects() const { return false; }
3440
3441#define FIELD_LIST(F) F(const char*, message_)
3442
3443 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StopInstr,
3444 TemplateInstruction,
3445 FIELD_LIST)
3446#undef FIELD_LIST
3447
3448 private:
3449 DISALLOW_COPY_AND_ASSIGN(StopInstr);
3450};
3451
3452class GotoInstr : public TemplateInstruction<0, NoThrow> {
3453 public:
3454 explicit GotoInstr(JoinEntryInstr* entry, intptr_t deopt_id)
3455 : TemplateInstruction(deopt_id),
3456 edge_weight_(0.0),
3457 parallel_move_(nullptr),
3458 successor_(entry) {}
3459
3460 DECLARE_INSTRUCTION(Goto)
3461
3462 BlockEntryInstr* block() const { return block_; }
3463 void set_block(BlockEntryInstr* block) { block_ = block; }
3464
3465 JoinEntryInstr* successor() const { return successor_; }
3466 void set_successor(JoinEntryInstr* successor) { successor_ = successor; }
3467 virtual intptr_t SuccessorCount() const;
3468 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
3469
3470 double edge_weight() const { return edge_weight_; }
3471 void set_edge_weight(double weight) { edge_weight_ = weight; }
3472 void adjust_edge_weight(double scale_factor) { edge_weight_ *= scale_factor; }
3473
3474 virtual bool CanBecomeDeoptimizationTarget() const {
3475 // Goto instruction can be used as a deoptimization target when LICM
3476 // hoists instructions out of the loop.
3477 return true;
3478 }
3479
3480 // May require a deoptimization target for int32 Phi input conversions.
3481 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
3482
3483 virtual bool ComputeCanDeoptimize() const { return false; }
3484
3485 virtual bool HasUnknownSideEffects() const { return false; }
3486
3487 ParallelMoveInstr* parallel_move() const { return parallel_move_; }
3488
3489 bool HasParallelMove() const { return parallel_move_ != nullptr; }
3490
3491 bool HasNonRedundantParallelMove() const {
3492 return HasParallelMove() && !parallel_move()->IsRedundant();
3493 }
3494
3495 ParallelMoveInstr* GetParallelMove() {
3496 if (parallel_move_ == nullptr) {
3497 parallel_move_ = new ParallelMoveInstr();
3498 }
3499 return parallel_move_;
3500 }
3501
3502 virtual TokenPosition token_pos() const {
3503 return TokenPosition::kControlFlow;
3504 }
3505
3506 PRINT_TO_SUPPORT
3507
3508#define FIELD_LIST(F) \
3509 F(double, edge_weight_) \
3510 /* Parallel move that will be used by linear scan register allocator to */ \
3511 /* connect live ranges at the end of the block and resolve phis. */ \
3512 F(ParallelMoveInstr*, parallel_move_)
3513
3514 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(GotoInstr,
3515 TemplateInstruction,
3516 FIELD_LIST)
3517#undef FIELD_LIST
3518 DECLARE_EXTRA_SERIALIZATION
3519
3520 private:
3521 BlockEntryInstr* block_ = nullptr;
3522 JoinEntryInstr* successor_ = nullptr;
3523
3524 DISALLOW_COPY_AND_ASSIGN(GotoInstr);
3525};
3526
3527// IndirectGotoInstr represents a dynamically computed jump. Only
3528// IndirectEntryInstr targets are valid targets of an indirect goto. The
3529// concrete target index to jump to is given as a parameter to the indirect
3530// goto.
3531//
3532// In order to preserve split-edge form, an indirect goto does not itself point
3533// to its targets. Instead, for each possible target, the successors_ field
3534// will contain an ordinary goto instruction that jumps to the target.
3535// TODO(zerny): Implement direct support instead of embedding gotos.
3536//
3537// The input to the [IndirectGotoInstr] is the target index to jump to.
3538// All targets of the [IndirectGotoInstr] are added via [AddSuccessor] and get
3539// increasing indices.
3540//
3541// The FlowGraphCompiler will - as a post-processing step - invoke
3542// [ComputeOffsetTable] of all [IndirectGotoInstr]s. In there we initialize a
3543// TypedDataInt32Array containing offsets of all [IndirectEntryInstr]s (the
3544// offsets are relative to start of the instruction payload).
3545//
3546// => See `FlowGraphCompiler::CompileGraph()`
3547// => See `IndirectGotoInstr::ComputeOffsetTable`
3548class IndirectGotoInstr : public TemplateInstruction<1, NoThrow> {
3549 public:
3550 IndirectGotoInstr(intptr_t target_count, Value* target_index)
3551 : offsets_(TypedData::ZoneHandle(ptr: TypedData::New(class_id: kTypedDataInt32ArrayCid,
3552 len: target_count,
3553 space: Heap::kOld))) {
3554 SetInputAt(0, target_index);
3555 }
3556
3557 DECLARE_INSTRUCTION(IndirectGoto)
3558
3559 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
3560 ASSERT(idx == 0);
3561 return kTagged;
3562 }
3563
3564 void AddSuccessor(TargetEntryInstr* successor) {
3565 ASSERT(successor->next()->IsGoto());
3566 ASSERT(successor->next()->AsGoto()->successor()->IsIndirectEntry());
3567 successors_.Add(successor);
3568 }
3569
3570 virtual intptr_t SuccessorCount() const { return successors_.length(); }
3571 virtual TargetEntryInstr* SuccessorAt(intptr_t index) const {
3572 ASSERT(index < SuccessorCount());
3573 return successors_[index];
3574 }
3575
3576 virtual bool ComputeCanDeoptimize() const { return false; }
3577 virtual bool CanBecomeDeoptimizationTarget() const { return false; }
3578
3579 virtual bool HasUnknownSideEffects() const { return false; }
3580
3581 Value* offset() const { return inputs_[0]; }
3582 void ComputeOffsetTable(FlowGraphCompiler* compiler);
3583
3584 PRINT_TO_SUPPORT
3585
3586 DECLARE_CUSTOM_SERIALIZATION(IndirectGotoInstr)
3587 DECLARE_EXTRA_SERIALIZATION
3588
3589 private:
3590 GrowableArray<TargetEntryInstr*> successors_;
3591 const TypedData& offsets_;
3592
3593 DISALLOW_COPY_AND_ASSIGN(IndirectGotoInstr);
3594};
3595
3596class ComparisonInstr : public Definition {
3597 public:
3598 Value* left() const { return InputAt(i: 0); }
3599 Value* right() const { return InputAt(i: 1); }
3600
3601 virtual TokenPosition token_pos() const { return token_pos_; }
3602 Token::Kind kind() const { return kind_; }
3603 DECLARE_ATTRIBUTES(kind())
3604
3605 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right) = 0;
3606
3607 // Emits instructions to do the comparison and branch to the true or false
3608 // label depending on the result. This implementation will call
3609 // EmitComparisonCode and then generate the branch instructions afterwards.
3610 virtual void EmitBranchCode(FlowGraphCompiler* compiler, BranchInstr* branch);
3611
3612 // Used by EmitBranchCode and EmitNativeCode depending on whether the boolean
3613 // is to be turned into branches or instantiated. May return a valid
3614 // condition in which case the caller is expected to emit a branch to the
3615 // true label based on that condition (or a branch to the false label on the
3616 // opposite condition). May also branch directly to the labels.
3617 virtual Condition EmitComparisonCode(FlowGraphCompiler* compiler,
3618 BranchLabels labels) = 0;
3619
3620 // Emits code that generates 'true' or 'false', depending on the comparison.
3621 // This implementation will call EmitComparisonCode. If EmitComparisonCode
3622 // does not use the labels (merely returning a condition) then EmitNativeCode
3623 // may be able to use the condition to avoid a branch.
3624 virtual void EmitNativeCode(FlowGraphCompiler* compiler);
3625
3626 void SetDeoptId(const Instruction& instr) { CopyDeoptIdFrom(instr); }
3627
3628 // Operation class id is computed from collected ICData.
3629 void set_operation_cid(intptr_t value) { operation_cid_ = value; }
3630 intptr_t operation_cid() const { return operation_cid_; }
3631
3632 virtual void NegateComparison() { kind_ = Token::NegateComparison(op: kind_); }
3633
3634 virtual bool CanBecomeDeoptimizationTarget() const { return true; }
3635 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
3636
3637 virtual bool AttributesEqual(const Instruction& other) const {
3638 auto const other_comparison = other.AsComparison();
3639 return kind() == other_comparison->kind() &&
3640 (operation_cid() == other_comparison->operation_cid());
3641 }
3642
3643 DECLARE_ABSTRACT_INSTRUCTION(Comparison)
3644
3645#define FIELD_LIST(F) \
3646 F(const TokenPosition, token_pos_) \
3647 F(Token::Kind, kind_) \
3648 /* Set by optimizer. */ \
3649 F(intptr_t, operation_cid_)
3650
3651 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ComparisonInstr,
3652 Definition,
3653 FIELD_LIST)
3654#undef FIELD_LIST
3655
3656 protected:
3657 ComparisonInstr(const InstructionSource& source,
3658 Token::Kind kind,
3659 intptr_t deopt_id = DeoptId::kNone)
3660 : Definition(source, deopt_id),
3661 token_pos_(source.token_pos),
3662 kind_(kind),
3663 operation_cid_(kIllegalCid) {}
3664
3665 private:
3666 DISALLOW_COPY_AND_ASSIGN(ComparisonInstr);
3667};
3668
3669class PureComparison : public ComparisonInstr {
3670 public:
3671 virtual bool AllowsCSE() const { return true; }
3672 virtual bool HasUnknownSideEffects() const { return false; }
3673
3674 DECLARE_EMPTY_SERIALIZATION(PureComparison, ComparisonInstr)
3675 protected:
3676 PureComparison(const InstructionSource& source,
3677 Token::Kind kind,
3678 intptr_t deopt_id)
3679 : ComparisonInstr(source, kind, deopt_id) {}
3680};
3681
3682template <intptr_t N,
3683 typename ThrowsTrait,
3684 template <typename Impure, typename Pure> class CSETrait = NoCSE>
3685class TemplateComparison
3686 : public CSETrait<ComparisonInstr, PureComparison>::Base {
3687 public:
3688 using BaseClass = typename CSETrait<ComparisonInstr, PureComparison>::Base;
3689
3690 TemplateComparison(const InstructionSource& source,
3691 Token::Kind kind,
3692 intptr_t deopt_id = DeoptId::kNone)
3693 : BaseClass(source, kind, deopt_id), inputs_() {}
3694
3695 virtual intptr_t InputCount() const { return N; }
3696 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
3697
3698 virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
3699
3700 DECLARE_EMPTY_SERIALIZATION(TemplateComparison, BaseClass)
3701
3702 protected:
3703 EmbeddedArray<Value*, N> inputs_;
3704
3705 private:
3706 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
3707};
3708
3709class BranchInstr : public Instruction {
3710 public:
3711 explicit BranchInstr(ComparisonInstr* comparison, intptr_t deopt_id)
3712 : Instruction(deopt_id), comparison_(comparison) {
3713 ASSERT(comparison->env() == nullptr);
3714 for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
3715 comparison->InputAt(i)->set_instruction(this);
3716 }
3717 }
3718
3719 DECLARE_INSTRUCTION(Branch)
3720
3721 virtual intptr_t ArgumentCount() const {
3722 return comparison()->ArgumentCount();
3723 }
3724 virtual void SetMoveArguments(MoveArgumentsArray* move_arguments) {
3725 comparison()->SetMoveArguments(move_arguments);
3726 }
3727 virtual MoveArgumentsArray* GetMoveArguments() const {
3728 return comparison()->GetMoveArguments();
3729 }
3730
3731 intptr_t InputCount() const { return comparison()->InputCount(); }
3732
3733 Value* InputAt(intptr_t i) const { return comparison()->InputAt(i); }
3734
3735 virtual TokenPosition token_pos() const { return comparison_->token_pos(); }
3736 virtual intptr_t inlining_id() const { return comparison_->inlining_id(); }
3737 virtual void set_inlining_id(intptr_t value) {
3738 return comparison_->set_inlining_id(value);
3739 }
3740 virtual bool has_inlining_id() const {
3741 return comparison_->has_inlining_id();
3742 }
3743
3744 virtual bool ComputeCanDeoptimize() const {
3745 return comparison()->ComputeCanDeoptimize();
3746 }
3747
3748 virtual bool CanBecomeDeoptimizationTarget() const {
3749 return comparison()->CanBecomeDeoptimizationTarget();
3750 }
3751
3752 virtual bool HasUnknownSideEffects() const {
3753 return comparison()->HasUnknownSideEffects();
3754 }
3755
3756 virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
3757
3758 ComparisonInstr* comparison() const { return comparison_; }
3759 void SetComparison(ComparisonInstr* comp);
3760
3761 virtual intptr_t DeoptimizationTarget() const {
3762 return comparison()->DeoptimizationTarget();
3763 }
3764
3765 virtual Representation RequiredInputRepresentation(intptr_t i) const {
3766 return comparison()->RequiredInputRepresentation(idx: i);
3767 }
3768
3769 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
3770
3771 void set_constant_target(TargetEntryInstr* target) {
3772 ASSERT(target == true_successor() || target == false_successor());
3773 constant_target_ = target;
3774 }
3775 TargetEntryInstr* constant_target() const { return constant_target_; }
3776
3777 virtual void InheritDeoptTarget(Zone* zone, Instruction* other);
3778
3779 virtual bool MayThrow() const { return comparison()->MayThrow(); }
3780
3781 TargetEntryInstr* true_successor() const { return true_successor_; }
3782 TargetEntryInstr* false_successor() const { return false_successor_; }
3783
3784 TargetEntryInstr** true_successor_address() { return &true_successor_; }
3785 TargetEntryInstr** false_successor_address() { return &false_successor_; }
3786
3787 virtual intptr_t SuccessorCount() const;
3788 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
3789
3790 PRINT_TO_SUPPORT
3791
3792#define FIELD_LIST(F) F(ComparisonInstr*, comparison_)
3793
3794 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BranchInstr, Instruction, FIELD_LIST)
3795#undef FIELD_LIST
3796 DECLARE_EXTRA_SERIALIZATION
3797
3798 private:
3799 virtual void RawSetInputAt(intptr_t i, Value* value) {
3800 comparison()->RawSetInputAt(i, value);
3801 }
3802
3803 TargetEntryInstr* true_successor_ = nullptr;
3804 TargetEntryInstr* false_successor_ = nullptr;
3805 TargetEntryInstr* constant_target_ = nullptr;
3806
3807 DISALLOW_COPY_AND_ASSIGN(BranchInstr);
3808};
3809
3810class DeoptimizeInstr : public TemplateInstruction<0, NoThrow, Pure> {
3811 public:
3812 DeoptimizeInstr(ICData::DeoptReasonId deopt_reason, intptr_t deopt_id)
3813 : TemplateInstruction(deopt_id), deopt_reason_(deopt_reason) {}
3814
3815 virtual bool ComputeCanDeoptimize() const { return true; }
3816
3817 virtual bool AttributesEqual(const Instruction& other) const { return true; }
3818
3819 DECLARE_INSTRUCTION(Deoptimize)
3820
3821#define FIELD_LIST(F) F(const ICData::DeoptReasonId, deopt_reason_)
3822
3823 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DeoptimizeInstr,
3824 TemplateInstruction,
3825 FIELD_LIST)
3826#undef FIELD_LIST
3827
3828 private:
3829 DISALLOW_COPY_AND_ASSIGN(DeoptimizeInstr);
3830};
3831
3832class RedefinitionInstr : public TemplateDefinition<1, NoThrow> {
3833 public:
3834 explicit RedefinitionInstr(Value* value) : constrained_type_(nullptr) {
3835 SetInputAt(i: 0, value);
3836 }
3837
3838 DECLARE_INSTRUCTION(Redefinition)
3839
3840 Value* value() const { return inputs_[0]; }
3841
3842 virtual CompileType ComputeType() const;
3843 virtual bool RecomputeType();
3844
3845 virtual Definition* Canonicalize(FlowGraph* flow_graph);
3846
3847 void set_constrained_type(CompileType* type) { constrained_type_ = type; }
3848 CompileType* constrained_type() const { return constrained_type_; }
3849
3850 virtual bool ComputeCanDeoptimize() const { return false; }
3851 virtual bool HasUnknownSideEffects() const { return false; }
3852
3853 virtual Value* RedefinedValue() const;
3854
3855 PRINT_OPERANDS_TO_SUPPORT
3856
3857#define FIELD_LIST(F) F(CompileType*, constrained_type_)
3858
3859 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(RedefinitionInstr,
3860 TemplateDefinition,
3861 FIELD_LIST)
3862#undef FIELD_LIST
3863
3864 private:
3865 DISALLOW_COPY_AND_ASSIGN(RedefinitionInstr);
3866};
3867
3868// Keeps the value alive til after this point.
3869//
3870// The fence cannot be moved.
3871class ReachabilityFenceInstr : public TemplateInstruction<1, NoThrow> {
3872 public:
3873 explicit ReachabilityFenceInstr(Value* value) { SetInputAt(i: 0, value); }
3874
3875 DECLARE_INSTRUCTION(ReachabilityFence)
3876
3877 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
3878 return kNoRepresentation;
3879 }
3880
3881 Value* value() const { return inputs_[0]; }
3882
3883 virtual bool ComputeCanDeoptimize() const { return false; }
3884 virtual bool HasUnknownSideEffects() const { return false; }
3885
3886 PRINT_OPERANDS_TO_SUPPORT
3887
3888 DECLARE_EMPTY_SERIALIZATION(ReachabilityFenceInstr, TemplateInstruction)
3889
3890 private:
3891 DISALLOW_COPY_AND_ASSIGN(ReachabilityFenceInstr);
3892};
3893
3894class ConstraintInstr : public TemplateDefinition<1, NoThrow> {
3895 public:
3896 ConstraintInstr(Value* value, Range* constraint) : constraint_(constraint) {
3897 SetInputAt(i: 0, value);
3898 }
3899
3900 DECLARE_INSTRUCTION(Constraint)
3901
3902 virtual CompileType ComputeType() const;
3903
3904 virtual bool ComputeCanDeoptimize() const { return false; }
3905
3906 virtual bool HasUnknownSideEffects() const { return false; }
3907
3908 virtual bool AttributesEqual(const Instruction& other) const {
3909 UNREACHABLE();
3910 return false;
3911 }
3912
3913 Value* value() const { return inputs_[0]; }
3914 Range* constraint() const { return constraint_; }
3915
3916 virtual void InferRange(RangeAnalysis* analysis, Range* range);
3917
3918 // Constraints for branches have their target block stored in order
3919 // to find the comparison that generated the constraint:
3920 // target->predecessor->last_instruction->comparison.
3921 void set_target(TargetEntryInstr* target) { target_ = target; }
3922 TargetEntryInstr* target() const { return target_; }
3923
3924 PRINT_OPERANDS_TO_SUPPORT
3925
3926#define FIELD_LIST(F) F(Range*, constraint_)
3927
3928 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ConstraintInstr,
3929 TemplateDefinition,
3930 FIELD_LIST)
3931#undef FIELD_LIST
3932 DECLARE_EXTRA_SERIALIZATION
3933
3934 private:
3935 TargetEntryInstr* target_ = nullptr;
3936
3937 DISALLOW_COPY_AND_ASSIGN(ConstraintInstr);
3938};
3939
3940class ConstantInstr : public TemplateDefinition<0, NoThrow, Pure> {
3941 public:
3942 explicit ConstantInstr(const Object& value)
3943 : ConstantInstr(value, InstructionSource(TokenPosition::kConstant)) {}
3944 ConstantInstr(const Object& value, const InstructionSource& source);
3945
3946 DECLARE_INSTRUCTION(Constant)
3947 virtual CompileType ComputeType() const;
3948
3949 virtual Definition* Canonicalize(FlowGraph* flow_graph);
3950
3951 const Object& value() const { return value_; }
3952
3953 bool IsSmi() const { return compiler::target::IsSmi(a: value()); }
3954
3955 bool HasZeroRepresentation() const {
3956 switch (representation()) {
3957 case kTagged:
3958 case kUnboxedUint8:
3959 case kUnboxedUint16:
3960 case kUnboxedUint32:
3961 case kUnboxedInt32:
3962 case kUnboxedInt64:
3963 return IsSmi() && compiler::target::SmiValue(a: value()) == 0;
3964 case kUnboxedDouble:
3965 return compiler::target::IsDouble(a: value()) &&
3966 bit_cast<uint64_t>(source: compiler::target::DoubleValue(a: value())) == 0;
3967 default:
3968 return false;
3969 }
3970 }
3971
3972 virtual bool ComputeCanDeoptimize() const { return false; }
3973
3974 virtual void InferRange(RangeAnalysis* analysis, Range* range);
3975
3976 virtual bool AttributesEqual(const Instruction& other) const;
3977
3978 virtual TokenPosition token_pos() const { return token_pos_; }
3979
3980 void EmitMoveToLocation(FlowGraphCompiler* compiler,
3981 const Location& destination,
3982 Register tmp = kNoRegister,
3983 intptr_t pair_index = 0);
3984
3985 PRINT_OPERANDS_TO_SUPPORT
3986
3987#define FIELD_LIST(F) \
3988 F(const Object&, value_) \
3989 F(const TokenPosition, token_pos_)
3990
3991 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ConstantInstr,
3992 TemplateDefinition,
3993 FIELD_LIST)
3994#undef FIELD_LIST
3995
3996 private:
3997 DISALLOW_COPY_AND_ASSIGN(ConstantInstr);
3998};
3999
4000// Merged ConstantInstr -> UnboxedXXX into UnboxedConstantInstr.
4001// TODO(srdjan): Implemented currently for doubles only, should implement
4002// for other unboxing instructions.
4003class UnboxedConstantInstr : public ConstantInstr {
4004 public:
4005 explicit UnboxedConstantInstr(const Object& value,
4006 Representation representation);
4007
4008 virtual Representation representation() const { return representation_; }
4009
4010 // Either nullptr or the address of the unboxed constant.
4011 uword constant_address() const { return constant_address_; }
4012
4013 DECLARE_INSTRUCTION(UnboxedConstant)
4014 DECLARE_CUSTOM_SERIALIZATION(UnboxedConstantInstr)
4015
4016 private:
4017 const Representation representation_;
4018 uword
4019 constant_address_; // Either nullptr or points to the untagged constant.
4020
4021 DISALLOW_COPY_AND_ASSIGN(UnboxedConstantInstr);
4022};
4023
4024// Checks that one type is a subtype of another (e.g. for type parameter bounds
4025// checking). Throws a TypeError otherwise. Both types are instantiated at
4026// runtime as necessary.
4027class AssertSubtypeInstr : public TemplateInstruction<5, Throws, Pure> {
4028 public:
4029 enum {
4030 kInstantiatorTAVPos = 0,
4031 kFunctionTAVPos = 1,
4032 kSubTypePos = 2,
4033 kSuperTypePos = 3,
4034 kDstNamePos = 4,
4035 };
4036
4037 AssertSubtypeInstr(const InstructionSource& source,
4038 Value* instantiator_type_arguments,
4039 Value* function_type_arguments,
4040 Value* sub_type,
4041 Value* super_type,
4042 Value* dst_name,
4043 intptr_t deopt_id)
4044 : TemplateInstruction(source, deopt_id), token_pos_(source.token_pos) {
4045 SetInputAt(i: kInstantiatorTAVPos, value: instantiator_type_arguments);
4046 SetInputAt(i: kFunctionTAVPos, value: function_type_arguments);
4047 SetInputAt(i: kSubTypePos, value: sub_type);
4048 SetInputAt(i: kSuperTypePos, value: super_type);
4049 SetInputAt(i: kDstNamePos, value: dst_name);
4050 }
4051
4052 DECLARE_INSTRUCTION(AssertSubtype);
4053
4054 Value* instantiator_type_arguments() const {
4055 return inputs_[kInstantiatorTAVPos];
4056 }
4057 Value* function_type_arguments() const { return inputs_[kFunctionTAVPos]; }
4058 Value* sub_type() const { return inputs_[kSubTypePos]; }
4059 Value* super_type() const { return inputs_[kSuperTypePos]; }
4060 Value* dst_name() const { return inputs_[kDstNamePos]; }
4061
4062 virtual TokenPosition token_pos() const { return token_pos_; }
4063
4064 virtual bool ComputeCanDeoptimize() const { return false; }
4065 virtual bool ComputeCanDeoptimizeAfterCall() const {
4066 return !CompilerState::Current().is_aot();
4067 }
4068 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
4069 return InputCount();
4070 }
4071
4072 virtual bool CanBecomeDeoptimizationTarget() const { return true; }
4073
4074 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
4075
4076 virtual bool AttributesEqual(const Instruction& other) const { return true; }
4077
4078 PRINT_OPERANDS_TO_SUPPORT
4079
4080#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
4081
4082 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AssertSubtypeInstr,
4083 TemplateInstruction,
4084 FIELD_LIST)
4085#undef FIELD_LIST
4086
4087 private:
4088 DISALLOW_COPY_AND_ASSIGN(AssertSubtypeInstr);
4089};
4090
4091class AssertAssignableInstr : public TemplateDefinition<4, Throws, Pure> {
4092 public:
4093#define FOR_EACH_ASSERT_ASSIGNABLE_KIND(V) \
4094 V(ParameterCheck) \
4095 V(InsertedByFrontend) \
4096 V(FromSource) \
4097 V(Unknown)
4098
4099#define KIND_DEFN(name) k##name,
4100 enum Kind { FOR_EACH_ASSERT_ASSIGNABLE_KIND(KIND_DEFN) };
4101#undef KIND_DEFN
4102
4103 static const char* KindToCString(Kind kind);
4104 static bool ParseKind(const char* str, Kind* out);
4105
4106 enum {
4107 kInstancePos = 0,
4108 kDstTypePos = 1,
4109 kInstantiatorTAVPos = 2,
4110 kFunctionTAVPos = 3,
4111 kNumInputs = 4,
4112 };
4113
4114 AssertAssignableInstr(const InstructionSource& source,
4115 Value* value,
4116 Value* dst_type,
4117 Value* instantiator_type_arguments,
4118 Value* function_type_arguments,
4119 const String& dst_name,
4120 intptr_t deopt_id,
4121 Kind kind = kUnknown)
4122 : TemplateDefinition(source, deopt_id),
4123 token_pos_(source.token_pos),
4124 dst_name_(dst_name),
4125 kind_(kind) {
4126 ASSERT(!dst_name.IsNull());
4127 SetInputAt(i: kInstancePos, value);
4128 SetInputAt(i: kDstTypePos, value: dst_type);
4129 SetInputAt(i: kInstantiatorTAVPos, value: instantiator_type_arguments);
4130 SetInputAt(i: kFunctionTAVPos, value: function_type_arguments);
4131 }
4132
4133 virtual intptr_t statistics_tag() const;
4134
4135 DECLARE_INSTRUCTION(AssertAssignable)
4136 virtual CompileType ComputeType() const;
4137 virtual bool RecomputeType();
4138
4139 Value* value() const { return inputs_[kInstancePos]; }
4140 Value* dst_type() const { return inputs_[kDstTypePos]; }
4141 Value* instantiator_type_arguments() const {
4142 return inputs_[kInstantiatorTAVPos];
4143 }
4144 Value* function_type_arguments() const { return inputs_[kFunctionTAVPos]; }
4145
4146 virtual TokenPosition token_pos() const { return token_pos_; }
4147 const String& dst_name() const { return dst_name_; }
4148
4149 virtual bool ComputeCanDeoptimize() const { return false; }
4150 virtual bool ComputeCanDeoptimizeAfterCall() const {
4151 return !CompilerState::Current().is_aot();
4152 }
4153 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
4154#if !defined(TARGET_ARCH_IA32)
4155 return InputCount();
4156#else
4157 // The ia32 implementation calls the stub by pushing the input registers
4158 // in the same order onto the stack thereby making the deopt-env correct.
4159 // (Due to lack of registers we cannot use all-argument calling convention
4160 // as in other architectures.)
4161 return 0;
4162#endif
4163 }
4164
4165 virtual bool CanBecomeDeoptimizationTarget() const {
4166 // AssertAssignable instructions that are specialized by the optimizer
4167 // (e.g. replaced with CheckClass) need a deoptimization descriptor before.
4168 return true;
4169 }
4170
4171 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4172
4173 virtual bool AttributesEqual(const Instruction& other) const { return true; }
4174
4175 virtual Value* RedefinedValue() const;
4176
4177 virtual void InferRange(RangeAnalysis* analysis, Range* range);
4178
4179 PRINT_OPERANDS_TO_SUPPORT
4180
4181#define FIELD_LIST(F) \
4182 F(const TokenPosition, token_pos_) \
4183 F(const String&, dst_name_) \
4184 F(const Kind, kind_)
4185
4186 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AssertAssignableInstr,
4187 TemplateDefinition,
4188 FIELD_LIST)
4189#undef FIELD_LIST
4190
4191 private:
4192 DISALLOW_COPY_AND_ASSIGN(AssertAssignableInstr);
4193};
4194
4195class AssertBooleanInstr : public TemplateDefinition<1, Throws, Pure> {
4196 public:
4197 AssertBooleanInstr(const InstructionSource& source,
4198 Value* value,
4199 intptr_t deopt_id)
4200 : TemplateDefinition(source, deopt_id), token_pos_(source.token_pos) {
4201 SetInputAt(i: 0, value);
4202 }
4203
4204 DECLARE_INSTRUCTION(AssertBoolean)
4205 virtual CompileType ComputeType() const;
4206
4207 virtual TokenPosition token_pos() const { return token_pos_; }
4208 Value* value() const { return inputs_[0]; }
4209
4210 virtual bool ComputeCanDeoptimize() const { return false; }
4211 virtual bool ComputeCanDeoptimizeAfterCall() const {
4212 return !CompilerState::Current().is_aot();
4213 }
4214 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
4215 return InputCount();
4216 }
4217
4218 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4219
4220 virtual bool AttributesEqual(const Instruction& other) const { return true; }
4221
4222 virtual Value* RedefinedValue() const;
4223
4224 PRINT_OPERANDS_TO_SUPPORT
4225
4226#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
4227
4228 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AssertBooleanInstr,
4229 TemplateDefinition,
4230 FIELD_LIST)
4231#undef FIELD_LIST
4232
4233 private:
4234 DISALLOW_COPY_AND_ASSIGN(AssertBooleanInstr);
4235};
4236
4237// Denotes a special parameter, currently either the context of a closure,
4238// the type arguments of a generic function or an arguments descriptor.
4239class SpecialParameterInstr : public TemplateDefinition<0, NoThrow> {
4240 public:
4241#define FOR_EACH_SPECIAL_PARAMETER_KIND(M) \
4242 M(Context) \
4243 M(TypeArgs) \
4244 M(ArgDescriptor) \
4245 M(Exception) \
4246 M(StackTrace)
4247
4248#define KIND_DECL(name) k##name,
4249 enum SpecialParameterKind { FOR_EACH_SPECIAL_PARAMETER_KIND(KIND_DECL) };
4250#undef KIND_DECL
4251
4252 // Defined as a static intptr_t instead of inside the enum since some
4253 // switch statements depend on the exhaustibility checking.
4254#define KIND_INC(name) +1
4255 static constexpr intptr_t kNumKinds =
4256 0 FOR_EACH_SPECIAL_PARAMETER_KIND(KIND_INC);
4257#undef KIND_INC
4258
4259 static const char* KindToCString(SpecialParameterKind k);
4260 static bool ParseKind(const char* str, SpecialParameterKind* out);
4261
4262 SpecialParameterInstr(SpecialParameterKind kind,
4263 intptr_t deopt_id,
4264 BlockEntryInstr* block)
4265 : TemplateDefinition(deopt_id), kind_(kind), block_(block) {}
4266
4267 DECLARE_INSTRUCTION(SpecialParameter)
4268
4269 virtual BlockEntryInstr* GetBlock() { return block_; }
4270
4271 virtual CompileType ComputeType() const;
4272
4273 virtual bool ComputeCanDeoptimize() const { return false; }
4274
4275 virtual bool HasUnknownSideEffects() const { return false; }
4276
4277 virtual bool AttributesEqual(const Instruction& other) const {
4278 return kind() == other.AsSpecialParameter()->kind();
4279 }
4280 SpecialParameterKind kind() const { return kind_; }
4281
4282 const char* ToCString() const;
4283
4284 PRINT_OPERANDS_TO_SUPPORT
4285
4286#define FIELD_LIST(F) F(const SpecialParameterKind, kind_)
4287
4288 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(SpecialParameterInstr,
4289 TemplateDefinition,
4290 FIELD_LIST)
4291#undef FIELD_LIST
4292 DECLARE_EXTRA_SERIALIZATION
4293
4294 private:
4295 BlockEntryInstr* block_ = nullptr;
4296 DISALLOW_COPY_AND_ASSIGN(SpecialParameterInstr);
4297};
4298
4299struct ArgumentsInfo {
4300 ArgumentsInfo(intptr_t type_args_len,
4301 intptr_t count_with_type_args,
4302 intptr_t size_with_type_args,
4303 const Array& argument_names)
4304 : type_args_len(type_args_len),
4305 count_with_type_args(count_with_type_args),
4306 size_with_type_args(size_with_type_args),
4307 count_without_type_args(count_with_type_args -
4308 (type_args_len > 0 ? 1 : 0)),
4309 size_without_type_args(size_with_type_args -
4310 (type_args_len > 0 ? 1 : 0)),
4311 argument_names(argument_names) {}
4312
4313 ArrayPtr ToArgumentsDescriptor() const {
4314 return ArgumentsDescriptor::New(type_args_len, num_arguments: count_without_type_args,
4315 size_arguments: size_without_type_args, optional_arguments_names: argument_names);
4316 }
4317
4318 const intptr_t type_args_len;
4319 const intptr_t count_with_type_args;
4320 const intptr_t size_with_type_args;
4321 const intptr_t count_without_type_args;
4322 const intptr_t size_without_type_args;
4323 const Array& argument_names;
4324};
4325
4326template <intptr_t kExtraInputs>
4327class TemplateDartCall : public VariadicDefinition {
4328 public:
4329 TemplateDartCall(intptr_t deopt_id,
4330 intptr_t type_args_len,
4331 const Array& argument_names,
4332 InputsArray&& inputs,
4333 const InstructionSource& source)
4334 : VariadicDefinition(std::move(inputs), source, deopt_id),
4335 type_args_len_(type_args_len),
4336 argument_names_(argument_names),
4337 token_pos_(source.token_pos) {
4338 DEBUG_ASSERT(argument_names.IsNotTemporaryScopedHandle());
4339 ASSERT(InputCount() >= kExtraInputs);
4340 }
4341
4342 inline StringPtr Selector();
4343
4344 virtual bool MayThrow() const { return true; }
4345 virtual bool CanCallDart() const { return true; }
4346
4347 virtual bool ComputeCanDeoptimize() const { return false; }
4348 virtual bool ComputeCanDeoptimizeAfterCall() const {
4349 return !CompilerState::Current().is_aot();
4350 }
4351 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
4352 return kExtraInputs;
4353 }
4354
4355 intptr_t FirstArgIndex() const { return type_args_len_ > 0 ? 1 : 0; }
4356 Value* Receiver() const { return this->ArgumentValueAt(index: FirstArgIndex()); }
4357 intptr_t ArgumentCountWithoutTypeArgs() const {
4358 return ArgumentCount() - FirstArgIndex();
4359 }
4360 intptr_t ArgumentsSizeWithoutTypeArgs() const {
4361 return ArgumentsSize() - FirstArgIndex();
4362 }
4363 // ArgumentCount() includes the type argument vector if any.
4364 // Caution: Must override Instruction::ArgumentCount().
4365 intptr_t ArgumentCount() const {
4366 return move_arguments_ != nullptr ? move_arguments_->length()
4367 : InputCount() - kExtraInputs;
4368 }
4369 virtual intptr_t ArgumentsSize() const { return ArgumentCount(); }
4370
4371 virtual void SetMoveArguments(MoveArgumentsArray* move_arguments) {
4372 ASSERT(move_arguments_ == nullptr);
4373 move_arguments_ = move_arguments;
4374 }
4375 virtual MoveArgumentsArray* GetMoveArguments() const {
4376 return move_arguments_;
4377 }
4378 virtual void ReplaceInputsWithMoveArguments(
4379 MoveArgumentsArray* move_arguments) {
4380 ASSERT(move_arguments_ == nullptr);
4381 ASSERT(move_arguments->length() == ArgumentCount());
4382 SetMoveArguments(move_arguments);
4383 ASSERT(InputCount() == ArgumentCount() + kExtraInputs);
4384 const intptr_t extra_inputs_base = InputCount() - kExtraInputs;
4385 for (intptr_t i = 0, n = ArgumentCount(); i < n; ++i) {
4386 InputAt(i)->RemoveFromUseList();
4387 }
4388 for (intptr_t i = 0; i < kExtraInputs; ++i) {
4389 SetInputAt(i, value: InputAt(i: extra_inputs_base + i));
4390 }
4391 inputs_.TruncateTo(kExtraInputs);
4392 }
4393 intptr_t type_args_len() const { return type_args_len_; }
4394 const Array& argument_names() const { return argument_names_; }
4395 virtual TokenPosition token_pos() const { return token_pos_; }
4396 ArrayPtr GetArgumentsDescriptor() const {
4397 return ArgumentsDescriptor::New(
4398 type_args_len(), ArgumentCountWithoutTypeArgs(),
4399 ArgumentsSizeWithoutTypeArgs(), argument_names());
4400 }
4401
4402#define FIELD_LIST(F) \
4403 F(const intptr_t, type_args_len_) \
4404 F(const Array&, argument_names_) \
4405 F(const TokenPosition, token_pos_)
4406
4407 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(TemplateDartCall,
4408 VariadicDefinition,
4409 FIELD_LIST)
4410#undef FIELD_LIST
4411 DECLARE_EXTRA_SERIALIZATION
4412
4413 private:
4414 MoveArgumentsArray* move_arguments_ = nullptr;
4415
4416 DISALLOW_COPY_AND_ASSIGN(TemplateDartCall);
4417};
4418
4419class ClosureCallInstr : public TemplateDartCall<1> {
4420 public:
4421 ClosureCallInstr(const Function& target_function,
4422 InputsArray&& inputs,
4423 intptr_t type_args_len,
4424 const Array& argument_names,
4425 const InstructionSource& source,
4426 intptr_t deopt_id)
4427 : TemplateDartCall(deopt_id,
4428 type_args_len,
4429 argument_names,
4430 std::move(inputs),
4431 source),
4432 target_function_(target_function) {
4433 DEBUG_ASSERT(target_function.IsNotTemporaryScopedHandle());
4434 }
4435
4436 DECLARE_INSTRUCTION(ClosureCall)
4437
4438 const Function& target_function() const { return target_function_; }
4439
4440 // TODO(kmillikin): implement exact call counts for closure calls.
4441 virtual intptr_t CallCount() const { return 1; }
4442
4443 virtual bool HasUnknownSideEffects() const { return true; }
4444
4445 PRINT_OPERANDS_TO_SUPPORT
4446
4447#define FIELD_LIST(F) F(const Function&, target_function_)
4448 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ClosureCallInstr,
4449 TemplateDartCall,
4450 FIELD_LIST)
4451#undef FIELD_LIST
4452
4453 private:
4454 DISALLOW_COPY_AND_ASSIGN(ClosureCallInstr);
4455};
4456
4457// Common base class for various kinds of instance call instructions
4458// (InstanceCallInstr, PolymorphicInstanceCallInstr).
4459class InstanceCallBaseInstr : public TemplateDartCall<0> {
4460 public:
4461 InstanceCallBaseInstr(const InstructionSource& source,
4462 const String& function_name,
4463 Token::Kind token_kind,
4464 InputsArray&& arguments,
4465 intptr_t type_args_len,
4466 const Array& argument_names,
4467 const ICData* ic_data,
4468 intptr_t deopt_id,
4469 const Function& interface_target,
4470 const Function& tearoff_interface_target)
4471 : TemplateDartCall(deopt_id,
4472 type_args_len,
4473 argument_names,
4474 std::move(arguments),
4475 source),
4476 ic_data_(ic_data),
4477 function_name_(function_name),
4478 token_kind_(token_kind),
4479 interface_target_(interface_target),
4480 tearoff_interface_target_(tearoff_interface_target),
4481 result_type_(nullptr),
4482 has_unique_selector_(false),
4483 entry_kind_(Code::EntryKind::kNormal),
4484 receiver_is_not_smi_(false),
4485 is_call_on_this_(false) {
4486 DEBUG_ASSERT(function_name.IsNotTemporaryScopedHandle());
4487 DEBUG_ASSERT(interface_target.IsNotTemporaryScopedHandle());
4488 DEBUG_ASSERT(tearoff_interface_target.IsNotTemporaryScopedHandle());
4489 ASSERT(InputCount() > 0);
4490 ASSERT(Token::IsBinaryOperator(token_kind) ||
4491 Token::IsEqualityOperator(token_kind) ||
4492 Token::IsRelationalOperator(token_kind) ||
4493 Token::IsUnaryOperator(token_kind) ||
4494 Token::IsIndexOperator(token_kind) ||
4495 Token::IsTypeTestOperator(token_kind) ||
4496 Token::IsTypeCastOperator(token_kind) || token_kind == Token::kGET ||
4497 token_kind == Token::kSET || token_kind == Token::kILLEGAL);
4498 }
4499
4500 const ICData* ic_data() const { return ic_data_; }
4501 bool HasICData() const {
4502 return (ic_data() != nullptr) && !ic_data()->IsNull();
4503 }
4504
4505 // ICData can be replaced by optimizer.
4506 void set_ic_data(const ICData* value) { ic_data_ = value; }
4507
4508 const String& function_name() const { return function_name_; }
4509 Token::Kind token_kind() const { return token_kind_; }
4510 const Function& interface_target() const { return interface_target_; }
4511 const Function& tearoff_interface_target() const {
4512 return tearoff_interface_target_;
4513 }
4514
4515 bool has_unique_selector() const { return has_unique_selector_; }
4516 void set_has_unique_selector(bool b) { has_unique_selector_ = b; }
4517
4518 virtual CompileType ComputeType() const;
4519
4520 virtual bool CanBecomeDeoptimizationTarget() const {
4521 // Instance calls that are specialized by the optimizer need a
4522 // deoptimization descriptor before the call.
4523 return true;
4524 }
4525
4526 virtual bool HasUnknownSideEffects() const { return true; }
4527
4528 void SetResultType(Zone* zone, CompileType new_type) {
4529 result_type_ = new (zone) CompileType(new_type);
4530 }
4531
4532 CompileType* result_type() const { return result_type_; }
4533
4534 intptr_t result_cid() const {
4535 if (result_type_ == nullptr) {
4536 return kDynamicCid;
4537 }
4538 return result_type_->ToCid();
4539 }
4540
4541 FunctionPtr ResolveForReceiverClass(const Class& cls, bool allow_add = true);
4542
4543 Code::EntryKind entry_kind() const { return entry_kind_; }
4544 void set_entry_kind(Code::EntryKind value) { entry_kind_ = value; }
4545
4546 void mark_as_call_on_this() { is_call_on_this_ = true; }
4547 bool is_call_on_this() const { return is_call_on_this_; }
4548
4549 DECLARE_ABSTRACT_INSTRUCTION(InstanceCallBase);
4550
4551 bool receiver_is_not_smi() const { return receiver_is_not_smi_; }
4552 void set_receiver_is_not_smi(bool value) { receiver_is_not_smi_ = value; }
4553
4554 // Tries to prove that the receiver will not be a Smi based on the
4555 // interface target, CompileType and hints from TFA.
4556 void UpdateReceiverSminess(Zone* zone);
4557
4558 bool CanReceiverBeSmiBasedOnInterfaceTarget(Zone* zone) const;
4559
4560 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
4561 if (type_args_len() > 0) {
4562 if (idx == 0) {
4563 return kGuardInputs;
4564 }
4565 idx--;
4566 }
4567 if (interface_target_.IsNull()) return kGuardInputs;
4568 return interface_target_.is_unboxed_parameter_at(idx) ? kNotSpeculative
4569 : kGuardInputs;
4570 }
4571
4572 virtual intptr_t ArgumentsSize() const;
4573
4574 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
4575
4576 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
4577
4578 virtual Representation representation() const;
4579
4580#define FIELD_LIST(F) \
4581 F(const ICData*, ic_data_) \
4582 F(const String&, function_name_) \
4583 /* Binary op, unary op, kGET or kILLEGAL. */ \
4584 F(const Token::Kind, token_kind_) \
4585 F(const Function&, interface_target_) \
4586 F(const Function&, tearoff_interface_target_) \
4587 /* Inferred result type. */ \
4588 F(CompileType*, result_type_) \
4589 F(bool, has_unique_selector_) \
4590 F(Code::EntryKind, entry_kind_) \
4591 F(bool, receiver_is_not_smi_) \
4592 F(bool, is_call_on_this_)
4593
4594 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstanceCallBaseInstr,
4595 TemplateDartCall,
4596 FIELD_LIST)
4597#undef FIELD_LIST
4598
4599 protected:
4600 friend class CallSpecializer;
4601 void set_ic_data(ICData* value) { ic_data_ = value; }
4602 void set_result_type(CompileType* result_type) { result_type_ = result_type; }
4603
4604 private:
4605 DISALLOW_COPY_AND_ASSIGN(InstanceCallBaseInstr);
4606};
4607
4608class InstanceCallInstr : public InstanceCallBaseInstr {
4609 public:
4610 InstanceCallInstr(
4611 const InstructionSource& source,
4612 const String& function_name,
4613 Token::Kind token_kind,
4614 InputsArray&& arguments,
4615 intptr_t type_args_len,
4616 const Array& argument_names,
4617 intptr_t checked_argument_count,
4618 const ZoneGrowableArray<const ICData*>& ic_data_array,
4619 intptr_t deopt_id,
4620 const Function& interface_target = Function::null_function(),
4621 const Function& tearoff_interface_target = Function::null_function())
4622 : InstanceCallBaseInstr(
4623 source,
4624 function_name,
4625 token_kind,
4626 std::move(arguments),
4627 type_args_len,
4628 argument_names,
4629 GetICData(ic_data_array, deopt_id, /*is_static_call=*/false),
4630 deopt_id,
4631 interface_target,
4632 tearoff_interface_target),
4633 checked_argument_count_(checked_argument_count),
4634 receivers_static_type_(nullptr) {}
4635
4636 InstanceCallInstr(
4637 const InstructionSource& source,
4638 const String& function_name,
4639 Token::Kind token_kind,
4640 InputsArray&& arguments,
4641 intptr_t type_args_len,
4642 const Array& argument_names,
4643 intptr_t checked_argument_count,
4644 intptr_t deopt_id,
4645 const Function& interface_target = Function::null_function(),
4646 const Function& tearoff_interface_target = Function::null_function())
4647 : InstanceCallBaseInstr(source,
4648 function_name,
4649 token_kind,
4650 std::move(arguments),
4651 type_args_len,
4652 argument_names,
4653 /*ic_data=*/nullptr,
4654 deopt_id,
4655 interface_target,
4656 tearoff_interface_target),
4657 checked_argument_count_(checked_argument_count),
4658 receivers_static_type_(nullptr) {}
4659
4660 DECLARE_INSTRUCTION(InstanceCall)
4661
4662 intptr_t checked_argument_count() const { return checked_argument_count_; }
4663
4664 virtual intptr_t CallCount() const {
4665 return ic_data() == nullptr ? 0 : ic_data()->AggregateCount();
4666 }
4667
4668 void set_receivers_static_type(const AbstractType* receiver_type) {
4669 ASSERT(receiver_type != nullptr);
4670 receivers_static_type_ = receiver_type;
4671 }
4672
4673 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4674
4675 PRINT_OPERANDS_TO_SUPPORT
4676
4677 bool MatchesCoreName(const String& name);
4678
4679 const class BinaryFeedback& BinaryFeedback();
4680 void SetBinaryFeedback(const class BinaryFeedback* binary) {
4681 binary_ = binary;
4682 }
4683
4684 const CallTargets& Targets();
4685 void SetTargets(const CallTargets* targets) { targets_ = targets; }
4686
4687 void EnsureICData(FlowGraph* graph);
4688
4689#define FIELD_LIST(F) \
4690 F(const intptr_t, checked_argument_count_) \
4691 F(const AbstractType*, receivers_static_type_)
4692
4693 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstanceCallInstr,
4694 InstanceCallBaseInstr,
4695 FIELD_LIST)
4696#undef FIELD_LIST
4697
4698 private:
4699 const CallTargets* targets_ = nullptr;
4700 const class BinaryFeedback* binary_ = nullptr;
4701
4702 DISALLOW_COPY_AND_ASSIGN(InstanceCallInstr);
4703};
4704
4705class PolymorphicInstanceCallInstr : public InstanceCallBaseInstr {
4706 public:
4707 // Generate a replacement polymorphic call instruction.
4708 static PolymorphicInstanceCallInstr* FromCall(Zone* zone,
4709 InstanceCallBaseInstr* call,
4710 const CallTargets& targets,
4711 bool complete) {
4712 ASSERT(!call->HasMoveArguments());
4713 InputsArray args(zone, call->ArgumentCount());
4714 for (intptr_t i = 0, n = call->ArgumentCount(); i < n; ++i) {
4715 args.Add(value: call->ArgumentValueAt(i)->CopyWithType(zone));
4716 }
4717 auto new_call = new (zone) PolymorphicInstanceCallInstr(
4718 call->source(), call->function_name(), call->token_kind(),
4719 std::move(args), call->type_args_len(), call->argument_names(),
4720 call->ic_data(), call->deopt_id(), call->interface_target(),
4721 call->tearoff_interface_target(), targets, complete);
4722 new_call->set_result_type(call->result_type());
4723 new_call->set_entry_kind(call->entry_kind());
4724 new_call->set_has_unique_selector(call->has_unique_selector());
4725 if (call->is_call_on_this()) {
4726 new_call->mark_as_call_on_this();
4727 }
4728 return new_call;
4729 }
4730
4731 bool complete() const { return complete_; }
4732
4733 virtual CompileType ComputeType() const;
4734
4735 bool HasOnlyDispatcherOrImplicitAccessorTargets() const;
4736
4737 const CallTargets& targets() const { return targets_; }
4738 intptr_t NumberOfChecks() const { return targets_.length(); }
4739
4740 bool IsSureToCallSingleRecognizedTarget() const;
4741
4742 virtual intptr_t CallCount() const;
4743
4744 // If this polymorphic call site was created to cover the remaining cids after
4745 // inlining then we need to keep track of the total number of calls including
4746 // the ones that we inlined. This is different from the CallCount above: Eg
4747 // if there were 100 calls originally, distributed across three class-ids in
4748 // the ratio 50, 40, 7, 3. The first two were inlined, so now we have only
4749 // 10 calls in the CallCount above, but the heuristics need to know that the
4750 // last two cids cover 7% and 3% of the calls, not 70% and 30%.
4751 intptr_t total_call_count() { return total_call_count_; }
4752
4753 void set_total_call_count(intptr_t count) { total_call_count_ = count; }
4754
4755 DECLARE_INSTRUCTION(PolymorphicInstanceCall)
4756
4757 virtual Definition* Canonicalize(FlowGraph* graph);
4758
4759 static TypePtr ComputeRuntimeType(const CallTargets& targets);
4760
4761 PRINT_OPERANDS_TO_SUPPORT
4762
4763#define FIELD_LIST(F) \
4764 F(const CallTargets&, targets_) \
4765 F(const bool, complete_) \
4766 F(intptr_t, total_call_count_)
4767
4768 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(PolymorphicInstanceCallInstr,
4769 InstanceCallBaseInstr,
4770 FIELD_LIST)
4771#undef FIELD_LIST
4772
4773 private:
4774 PolymorphicInstanceCallInstr(const InstructionSource& source,
4775 const String& function_name,
4776 Token::Kind token_kind,
4777 InputsArray&& arguments,
4778 intptr_t type_args_len,
4779 const Array& argument_names,
4780 const ICData* ic_data,
4781 intptr_t deopt_id,
4782 const Function& interface_target,
4783 const Function& tearoff_interface_target,
4784 const CallTargets& targets,
4785 bool complete)
4786 : InstanceCallBaseInstr(source,
4787 function_name,
4788 token_kind,
4789 std::move(arguments),
4790 type_args_len,
4791 argument_names,
4792 ic_data,
4793 deopt_id,
4794 interface_target,
4795 tearoff_interface_target),
4796 targets_(targets),
4797 complete_(complete) {
4798 ASSERT(targets.length() != 0);
4799 total_call_count_ = CallCount();
4800 }
4801
4802 friend class PolymorphicInliner;
4803
4804 DISALLOW_COPY_AND_ASSIGN(PolymorphicInstanceCallInstr);
4805};
4806
4807// Instance call using the global dispatch table.
4808//
4809// Takes untagged ClassId of the receiver as extra input.
4810class DispatchTableCallInstr : public TemplateDartCall<1> {
4811 public:
4812 DispatchTableCallInstr(const InstructionSource& source,
4813 const Function& interface_target,
4814 const compiler::TableSelector* selector,
4815 InputsArray&& arguments,
4816 intptr_t type_args_len,
4817 const Array& argument_names)
4818 : TemplateDartCall(DeoptId::kNone,
4819 type_args_len,
4820 argument_names,
4821 std::move(arguments),
4822 source),
4823 interface_target_(interface_target),
4824 selector_(selector) {
4825 ASSERT(selector != nullptr);
4826 DEBUG_ASSERT(interface_target_.IsNotTemporaryScopedHandle());
4827 ASSERT(InputCount() > 0);
4828 }
4829
4830 static DispatchTableCallInstr* FromCall(
4831 Zone* zone,
4832 const InstanceCallBaseInstr* call,
4833 Value* cid,
4834 const Function& interface_target,
4835 const compiler::TableSelector* selector);
4836
4837 DECLARE_INSTRUCTION(DispatchTableCall)
4838 DECLARE_ATTRIBUTES(selector_name())
4839
4840 const Function& interface_target() const { return interface_target_; }
4841 const compiler::TableSelector* selector() const { return selector_; }
4842 const char* selector_name() const {
4843 return String::Handle(ptr: interface_target().name()).ToCString();
4844 }
4845
4846 Value* class_id() const { return InputAt(i: InputCount() - 1); }
4847
4848 virtual CompileType ComputeType() const;
4849
4850 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4851
4852 virtual bool CanBecomeDeoptimizationTarget() const { return false; }
4853
4854 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
4855
4856 virtual bool HasUnknownSideEffects() const { return true; }
4857
4858 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
4859 if (type_args_len() > 0) {
4860 if (idx == 0) {
4861 return kGuardInputs;
4862 }
4863 idx--;
4864 }
4865 return interface_target_.is_unboxed_parameter_at(index: idx) ? kNotSpeculative
4866 : kGuardInputs;
4867 }
4868
4869 virtual intptr_t ArgumentsSize() const;
4870
4871 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
4872
4873 virtual Representation representation() const;
4874
4875 PRINT_OPERANDS_TO_SUPPORT
4876
4877#define FIELD_LIST(F) \
4878 F(const Function&, interface_target_) \
4879 F(const compiler::TableSelector*, selector_)
4880
4881 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DispatchTableCallInstr,
4882 TemplateDartCall,
4883 FIELD_LIST)
4884#undef FIELD_LIST
4885
4886 private:
4887 DISALLOW_COPY_AND_ASSIGN(DispatchTableCallInstr);
4888};
4889
4890class StrictCompareInstr : public TemplateComparison<2, NoThrow, Pure> {
4891 public:
4892 StrictCompareInstr(const InstructionSource& source,
4893 Token::Kind kind,
4894 Value* left,
4895 Value* right,
4896 bool needs_number_check,
4897 intptr_t deopt_id);
4898
4899 DECLARE_COMPARISON_INSTRUCTION(StrictCompare)
4900
4901 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
4902
4903 virtual CompileType ComputeType() const;
4904
4905 virtual bool ComputeCanDeoptimize() const { return false; }
4906
4907 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4908
4909 bool needs_number_check() const { return needs_number_check_; }
4910 void set_needs_number_check(bool value) { needs_number_check_ = value; }
4911
4912 bool AttributesEqual(const Instruction& other) const;
4913
4914 PRINT_OPERANDS_TO_SUPPORT;
4915
4916#define FIELD_LIST(F) \
4917 /* True if the comparison must check for double or Mint and */ \
4918 /* use value comparison instead. */ \
4919 F(bool, needs_number_check_)
4920
4921 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StrictCompareInstr,
4922 TemplateComparison,
4923 FIELD_LIST)
4924#undef FIELD_LIST
4925
4926 private:
4927 Condition EmitComparisonCodeRegConstant(FlowGraphCompiler* compiler,
4928 BranchLabels labels,
4929 Register reg,
4930 const Object& obj);
4931 bool TryEmitBoolTest(FlowGraphCompiler* compiler,
4932 BranchLabels labels,
4933 intptr_t input_index,
4934 const Object& obj,
4935 Condition* condition_out);
4936
4937 DISALLOW_COPY_AND_ASSIGN(StrictCompareInstr);
4938};
4939
4940// Comparison instruction that is equivalent to the (left & right) == 0
4941// comparison pattern.
4942class TestSmiInstr : public TemplateComparison<2, NoThrow, Pure> {
4943 public:
4944 TestSmiInstr(const InstructionSource& source,
4945 Token::Kind kind,
4946 Value* left,
4947 Value* right)
4948 : TemplateComparison(source, kind) {
4949 ASSERT(kind == Token::kEQ || kind == Token::kNE);
4950 SetInputAt(i: 0, value: left);
4951 SetInputAt(i: 1, value: right);
4952 }
4953
4954 DECLARE_COMPARISON_INSTRUCTION(TestSmi);
4955
4956 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
4957
4958 virtual CompileType ComputeType() const;
4959
4960 virtual bool ComputeCanDeoptimize() const { return false; }
4961
4962 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
4963 return kTagged;
4964 }
4965
4966 DECLARE_EMPTY_SERIALIZATION(TestSmiInstr, TemplateComparison)
4967
4968 private:
4969 DISALLOW_COPY_AND_ASSIGN(TestSmiInstr);
4970};
4971
4972// Checks the input value cid against cids stored in a table and returns either
4973// a result or deoptimizes. If the cid is not in the list and there is a deopt
4974// id, then the instruction deoptimizes. If there is no deopt id, all the
4975// results must be the same (all true or all false) and the instruction returns
4976// the opposite for cids not on the list. The first element in the table must
4977// always be the result for the Smi class-id and is allowed to differ from the
4978// other results even in the no-deopt case.
4979class TestCidsInstr : public TemplateComparison<1, NoThrow, Pure> {
4980 public:
4981 TestCidsInstr(const InstructionSource& source,
4982 Token::Kind kind,
4983 Value* value,
4984 const ZoneGrowableArray<intptr_t>& cid_results,
4985 intptr_t deopt_id);
4986
4987 const ZoneGrowableArray<intptr_t>& cid_results() const {
4988 return cid_results_;
4989 }
4990
4991 DECLARE_COMPARISON_INSTRUCTION(TestCids);
4992
4993 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
4994
4995 virtual CompileType ComputeType() const;
4996
4997 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4998
4999 virtual bool ComputeCanDeoptimize() const {
5000 return GetDeoptId() != DeoptId::kNone;
5001 }
5002
5003 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
5004 return kTagged;
5005 }
5006
5007 virtual bool AttributesEqual(const Instruction& other) const;
5008
5009 PRINT_OPERANDS_TO_SUPPORT
5010
5011#define FIELD_LIST(F) F(const ZoneGrowableArray<intptr_t>&, cid_results_)
5012
5013 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(TestCidsInstr,
5014 TemplateComparison,
5015 FIELD_LIST)
5016#undef FIELD_LIST
5017
5018 private:
5019 DISALLOW_COPY_AND_ASSIGN(TestCidsInstr);
5020};
5021
5022class EqualityCompareInstr : public TemplateComparison<2, NoThrow, Pure> {
5023 public:
5024 EqualityCompareInstr(const InstructionSource& source,
5025 Token::Kind kind,
5026 Value* left,
5027 Value* right,
5028 intptr_t cid,
5029 intptr_t deopt_id,
5030 bool null_aware = false,
5031 SpeculativeMode speculative_mode = kGuardInputs)
5032 : TemplateComparison(source, kind, deopt_id),
5033 null_aware_(null_aware),
5034 speculative_mode_(speculative_mode) {
5035 ASSERT(Token::IsEqualityOperator(kind));
5036 SetInputAt(i: 0, value: left);
5037 SetInputAt(i: 1, value: right);
5038 set_operation_cid(cid);
5039 }
5040
5041 DECLARE_COMPARISON_INSTRUCTION(EqualityCompare)
5042
5043 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
5044
5045 virtual CompileType ComputeType() const;
5046
5047 virtual bool ComputeCanDeoptimize() const { return false; }
5048
5049 bool is_null_aware() const { return null_aware_; }
5050 void set_null_aware(bool value) { null_aware_ = value; }
5051
5052 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
5053 ASSERT((idx == 0) || (idx == 1));
5054 if (is_null_aware()) return kTagged;
5055 if (operation_cid() == kDoubleCid) return kUnboxedDouble;
5056 if (operation_cid() == kMintCid) return kUnboxedInt64;
5057 return kTagged;
5058 }
5059
5060 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
5061 return speculative_mode_;
5062 }
5063
5064 virtual bool AttributesEqual(const Instruction& other) const {
5065 return ComparisonInstr::AttributesEqual(other) &&
5066 (null_aware_ == other.AsEqualityCompare()->null_aware_) &&
5067 (speculative_mode_ == other.AsEqualityCompare()->speculative_mode_);
5068 }
5069
5070 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5071
5072 PRINT_OPERANDS_TO_SUPPORT
5073
5074#define FIELD_LIST(F) \
5075 F(bool, null_aware_) \
5076 F(const SpeculativeMode, speculative_mode_)
5077
5078 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(EqualityCompareInstr,
5079 TemplateComparison,
5080 FIELD_LIST)
5081#undef FIELD_LIST
5082
5083 private:
5084 DISALLOW_COPY_AND_ASSIGN(EqualityCompareInstr);
5085};
5086
5087class RelationalOpInstr : public TemplateComparison<2, NoThrow, Pure> {
5088 public:
5089 RelationalOpInstr(const InstructionSource& source,
5090 Token::Kind kind,
5091 Value* left,
5092 Value* right,
5093 intptr_t cid,
5094 intptr_t deopt_id,
5095 SpeculativeMode speculative_mode = kGuardInputs)
5096 : TemplateComparison(source, kind, deopt_id),
5097 speculative_mode_(speculative_mode) {
5098 ASSERT(Token::IsRelationalOperator(kind));
5099 SetInputAt(i: 0, value: left);
5100 SetInputAt(i: 1, value: right);
5101 set_operation_cid(cid);
5102 }
5103
5104 DECLARE_COMPARISON_INSTRUCTION(RelationalOp)
5105
5106 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
5107
5108 virtual CompileType ComputeType() const;
5109
5110 virtual bool ComputeCanDeoptimize() const { return false; }
5111
5112 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
5113 ASSERT((idx == 0) || (idx == 1));
5114 if (operation_cid() == kDoubleCid) return kUnboxedDouble;
5115 if (operation_cid() == kMintCid) return kUnboxedInt64;
5116 return kTagged;
5117 }
5118
5119 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
5120 return speculative_mode_;
5121 }
5122
5123 virtual bool AttributesEqual(const Instruction& other) const {
5124 return ComparisonInstr::AttributesEqual(other) &&
5125 (speculative_mode_ == other.AsRelationalOp()->speculative_mode_);
5126 }
5127
5128 PRINT_OPERANDS_TO_SUPPORT
5129
5130#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
5131
5132 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(RelationalOpInstr,
5133 TemplateComparison,
5134 FIELD_LIST)
5135#undef FIELD_LIST
5136
5137 private:
5138 DISALLOW_COPY_AND_ASSIGN(RelationalOpInstr);
5139};
5140
5141// TODO(vegorov): ComparisonInstr should be switched to use IfTheElseInstr for
5142// materialization of true and false constants.
5143class IfThenElseInstr : public Definition {
5144 public:
5145 IfThenElseInstr(ComparisonInstr* comparison,
5146 Value* if_true,
5147 Value* if_false,
5148 intptr_t deopt_id)
5149 : Definition(deopt_id),
5150 comparison_(comparison),
5151 if_true_(Smi::Cast(obj: if_true->BoundConstant()).Value()),
5152 if_false_(Smi::Cast(obj: if_false->BoundConstant()).Value()) {
5153 // Adjust uses at the comparison.
5154 ASSERT(comparison->env() == nullptr);
5155 for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
5156 comparison->InputAt(i)->set_instruction(this);
5157 }
5158 }
5159
5160 // Returns true if this combination of comparison and values flowing on
5161 // the true and false paths is supported on the current platform.
5162 static bool Supports(ComparisonInstr* comparison, Value* v1, Value* v2);
5163
5164 DECLARE_INSTRUCTION(IfThenElse)
5165
5166 intptr_t InputCount() const { return comparison()->InputCount(); }
5167
5168 Value* InputAt(intptr_t i) const { return comparison()->InputAt(i); }
5169
5170 virtual bool ComputeCanDeoptimize() const {
5171 return comparison()->ComputeCanDeoptimize();
5172 }
5173
5174 virtual bool CanBecomeDeoptimizationTarget() const {
5175 return comparison()->CanBecomeDeoptimizationTarget();
5176 }
5177
5178 virtual intptr_t DeoptimizationTarget() const {
5179 return comparison()->DeoptimizationTarget();
5180 }
5181
5182 virtual Representation RequiredInputRepresentation(intptr_t i) const {
5183 return comparison()->RequiredInputRepresentation(idx: i);
5184 }
5185
5186 virtual CompileType ComputeType() const;
5187
5188 virtual void InferRange(RangeAnalysis* analysis, Range* range);
5189
5190 ComparisonInstr* comparison() const { return comparison_; }
5191 intptr_t if_true() const { return if_true_; }
5192 intptr_t if_false() const { return if_false_; }
5193
5194 virtual bool AllowsCSE() const { return comparison()->AllowsCSE(); }
5195 virtual bool HasUnknownSideEffects() const {
5196 return comparison()->HasUnknownSideEffects();
5197 }
5198 virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
5199
5200 virtual bool AttributesEqual(const Instruction& other) const {
5201 auto const other_if_then_else = other.AsIfThenElse();
5202 return (comparison()->tag() == other_if_then_else->comparison()->tag()) &&
5203 comparison()->AttributesEqual(other: *other_if_then_else->comparison()) &&
5204 (if_true_ == other_if_then_else->if_true_) &&
5205 (if_false_ == other_if_then_else->if_false_);
5206 }
5207
5208 virtual bool MayThrow() const { return comparison()->MayThrow(); }
5209
5210 PRINT_OPERANDS_TO_SUPPORT
5211
5212#define FIELD_LIST(F) \
5213 F(ComparisonInstr*, comparison_) \
5214 F(const intptr_t, if_true_) \
5215 F(const intptr_t, if_false_)
5216
5217 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(IfThenElseInstr,
5218 Definition,
5219 FIELD_LIST)
5220#undef FIELD_LIST
5221 DECLARE_EXTRA_SERIALIZATION
5222
5223 private:
5224 virtual void RawSetInputAt(intptr_t i, Value* value) {
5225 comparison()->RawSetInputAt(i, value);
5226 }
5227
5228 DISALLOW_COPY_AND_ASSIGN(IfThenElseInstr);
5229};
5230
5231class StaticCallInstr : public TemplateDartCall<0> {
5232 public:
5233 StaticCallInstr(const InstructionSource& source,
5234 const Function& function,
5235 intptr_t type_args_len,
5236 const Array& argument_names,
5237 InputsArray&& arguments,
5238 const ZoneGrowableArray<const ICData*>& ic_data_array,
5239 intptr_t deopt_id,
5240 ICData::RebindRule rebind_rule)
5241 : TemplateDartCall(deopt_id,
5242 type_args_len,
5243 argument_names,
5244 std::move(arguments),
5245 source),
5246 ic_data_(GetICData(ic_data_array, deopt_id, /*is_static_call=*/is_static_call: true)),
5247 call_count_(0),
5248 function_(function),
5249 rebind_rule_(rebind_rule),
5250 result_type_(nullptr),
5251 is_known_list_constructor_(false),
5252 entry_kind_(Code::EntryKind::kNormal),
5253 identity_(AliasIdentity::Unknown()) {
5254 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
5255 ASSERT(!function.IsNull());
5256 }
5257
5258 StaticCallInstr(const InstructionSource& source,
5259 const Function& function,
5260 intptr_t type_args_len,
5261 const Array& argument_names,
5262 InputsArray&& arguments,
5263 intptr_t deopt_id,
5264 intptr_t call_count,
5265 ICData::RebindRule rebind_rule)
5266 : TemplateDartCall(deopt_id,
5267 type_args_len,
5268 argument_names,
5269 std::move(arguments),
5270 source),
5271 ic_data_(nullptr),
5272 call_count_(call_count),
5273 function_(function),
5274 rebind_rule_(rebind_rule),
5275 result_type_(nullptr),
5276 is_known_list_constructor_(false),
5277 entry_kind_(Code::EntryKind::kNormal),
5278 identity_(AliasIdentity::Unknown()) {
5279 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
5280 ASSERT(!function.IsNull());
5281 }
5282
5283 // Generate a replacement call instruction for an instance call which
5284 // has been found to have only one target.
5285 template <class C>
5286 static StaticCallInstr* FromCall(Zone* zone,
5287 const C* call,
5288 const Function& target,
5289 intptr_t call_count) {
5290 ASSERT(!call->HasMoveArguments());
5291 InputsArray args(zone, call->ArgumentCount());
5292 for (intptr_t i = 0; i < call->ArgumentCount(); i++) {
5293 args.Add(value: call->ArgumentValueAt(i)->CopyWithType());
5294 }
5295 StaticCallInstr* new_call = new (zone) StaticCallInstr(
5296 call->source(), target, call->type_args_len(), call->argument_names(),
5297 std::move(args), call->deopt_id(), call_count, ICData::kNoRebind);
5298 if (call->result_type() != nullptr) {
5299 new_call->result_type_ = call->result_type();
5300 }
5301 new_call->set_entry_kind(call->entry_kind());
5302 return new_call;
5303 }
5304
5305 // ICData for static calls carries call count.
5306 const ICData* ic_data() const { return ic_data_; }
5307 bool HasICData() const {
5308 return (ic_data() != nullptr) && !ic_data()->IsNull();
5309 }
5310
5311 void set_ic_data(const ICData* value) { ic_data_ = value; }
5312
5313 DECLARE_INSTRUCTION(StaticCall)
5314 virtual CompileType ComputeType() const;
5315 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5316 bool Evaluate(FlowGraph* flow_graph, const Object& argument, Object* result);
5317 bool Evaluate(FlowGraph* flow_graph,
5318 const Object& argument1,
5319 const Object& argument2,
5320 Object* result);
5321
5322 // Accessors forwarded to the AST node.
5323 const Function& function() const { return function_; }
5324
5325 virtual intptr_t CallCount() const {
5326 return ic_data() == nullptr ? call_count_ : ic_data()->AggregateCount();
5327 }
5328
5329 virtual bool ComputeCanDeoptimize() const {
5330 return !CompilerState::Current().is_aot();
5331 }
5332
5333 virtual bool CanBecomeDeoptimizationTarget() const {
5334 // Static calls that are specialized by the optimizer (e.g. sqrt) need a
5335 // deoptimization descriptor before the call.
5336 return true;
5337 }
5338
5339 virtual bool HasUnknownSideEffects() const { return true; }
5340 virtual bool CanCallDart() const { return true; }
5341
5342 // Initialize result type of this call instruction if target is a recognized
5343 // method or has pragma annotation.
5344 // Returns true on success, false if result type is still unknown.
5345 bool InitResultType(Zone* zone);
5346
5347 void SetResultType(Zone* zone, CompileType new_type) {
5348 result_type_ = new (zone) CompileType(new_type);
5349 }
5350
5351 CompileType* result_type() const { return result_type_; }
5352
5353 intptr_t result_cid() const {
5354 if (result_type_ == nullptr) {
5355 return kDynamicCid;
5356 }
5357 return result_type_->ToCid();
5358 }
5359
5360 bool is_known_list_constructor() const { return is_known_list_constructor_; }
5361 void set_is_known_list_constructor(bool value) {
5362 is_known_list_constructor_ = value;
5363 }
5364
5365 Code::EntryKind entry_kind() const { return entry_kind_; }
5366
5367 void set_entry_kind(Code::EntryKind value) { entry_kind_ = value; }
5368
5369 bool IsRecognizedFactory() const { return is_known_list_constructor(); }
5370
5371 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
5372 if (type_args_len() > 0 || function().IsFactory()) {
5373 if (idx == 0) {
5374 return kGuardInputs;
5375 }
5376 idx--;
5377 }
5378 return function_.is_unboxed_parameter_at(index: idx) ? kNotSpeculative
5379 : kGuardInputs;
5380 }
5381
5382 virtual intptr_t ArgumentsSize() const;
5383
5384 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
5385
5386 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
5387
5388 virtual Representation representation() const;
5389
5390 virtual AliasIdentity Identity() const { return identity_; }
5391 virtual void SetIdentity(AliasIdentity identity) { identity_ = identity; }
5392
5393 const CallTargets& Targets();
5394 const class BinaryFeedback& BinaryFeedback();
5395
5396 PRINT_OPERANDS_TO_SUPPORT
5397
5398#define FIELD_LIST(F) \
5399 F(const ICData*, ic_data_) \
5400 F(const intptr_t, call_count_) \
5401 F(const Function&, function_) \
5402 F(const ICData::RebindRule, rebind_rule_) \
5403 /* Known or inferred result type. */ \
5404 F(CompileType*, result_type_) \
5405 /* 'True' for recognized list constructors. */ \
5406 F(bool, is_known_list_constructor_) \
5407 F(Code::EntryKind, entry_kind_) \
5408 F(AliasIdentity, identity_)
5409
5410 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StaticCallInstr,
5411 TemplateDartCall,
5412 FIELD_LIST)
5413#undef FIELD_LIST
5414
5415 private:
5416 const CallTargets* targets_ = nullptr;
5417 const class BinaryFeedback* binary_ = nullptr;
5418
5419 DISALLOW_COPY_AND_ASSIGN(StaticCallInstr);
5420};
5421
5422class LoadLocalInstr : public TemplateDefinition<0, NoThrow> {
5423 public:
5424 LoadLocalInstr(const LocalVariable& local, const InstructionSource& source)
5425 : TemplateDefinition(source),
5426 local_(local),
5427 is_last_(false),
5428 token_pos_(source.token_pos) {}
5429
5430 DECLARE_INSTRUCTION(LoadLocal)
5431 virtual CompileType ComputeType() const;
5432
5433 const LocalVariable& local() const { return local_; }
5434
5435 virtual bool ComputeCanDeoptimize() const { return false; }
5436
5437 virtual bool HasUnknownSideEffects() const {
5438 UNREACHABLE(); // Eliminated by SSA construction.
5439 return false;
5440 }
5441
5442 void mark_last() { is_last_ = true; }
5443 bool is_last() const { return is_last_; }
5444
5445 virtual TokenPosition token_pos() const { return token_pos_; }
5446
5447 PRINT_OPERANDS_TO_SUPPORT
5448
5449#define FIELD_LIST(F) \
5450 F(const LocalVariable&, local_) \
5451 F(bool, is_last_) \
5452 F(const TokenPosition, token_pos_)
5453
5454 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadLocalInstr,
5455 TemplateDefinition,
5456 FIELD_LIST)
5457#undef FIELD_LIST
5458
5459 private:
5460 DISALLOW_COPY_AND_ASSIGN(LoadLocalInstr);
5461};
5462
5463class DropTempsInstr : public Definition {
5464 public:
5465 DropTempsInstr(intptr_t num_temps, Value* value)
5466 : num_temps_(num_temps), has_input_(value != nullptr) {
5467 if (has_input_) {
5468 SetInputAt(i: 0, value);
5469 }
5470 }
5471
5472 DECLARE_INSTRUCTION(DropTemps)
5473
5474 virtual intptr_t InputCount() const { return has_input_ ? 1 : 0; }
5475 virtual Value* InputAt(intptr_t i) const {
5476 ASSERT(has_input_ && (i == 0));
5477 return value_;
5478 }
5479
5480 Value* value() const { return value_; }
5481
5482 intptr_t num_temps() const { return num_temps_; }
5483
5484 virtual CompileType ComputeType() const;
5485
5486 virtual bool ComputeCanDeoptimize() const { return false; }
5487
5488 virtual bool HasUnknownSideEffects() const {
5489 UNREACHABLE(); // Eliminated by SSA construction.
5490 return false;
5491 }
5492
5493 virtual bool MayThrow() const { return false; }
5494
5495 virtual TokenPosition token_pos() const { return TokenPosition::kTempMove; }
5496
5497 PRINT_OPERANDS_TO_SUPPORT
5498
5499#define FIELD_LIST(F) \
5500 F(const intptr_t, num_temps_) \
5501 F(const bool, has_input_)
5502
5503 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DropTempsInstr,
5504 Definition,
5505 FIELD_LIST)
5506#undef FIELD_LIST
5507
5508 private:
5509 virtual void RawSetInputAt(intptr_t i, Value* value) {
5510 ASSERT(has_input_);
5511 value_ = value;
5512 }
5513
5514 Value* value_ = nullptr;
5515
5516 DISALLOW_COPY_AND_ASSIGN(DropTempsInstr);
5517};
5518
5519// This instruction is used to reserve a space on the expression stack
5520// that later would be filled with StoreLocal. Reserved space would be
5521// filled with a null value initially.
5522//
5523// Note: One must not use Constant(#null) to reserve expression stack space
5524// because it would lead to an incorrectly compiled unoptimized code. Graph
5525// builder would set Constant(#null) as an input definition to the instruction
5526// that consumes this value from the expression stack - not knowing that
5527// this value represents a placeholder - which might lead issues if instruction
5528// has specialization for constant inputs (see https://dartbug.com/33195).
5529class MakeTempInstr : public TemplateDefinition<0, NoThrow, Pure> {
5530 public:
5531 explicit MakeTempInstr(Zone* zone)
5532 : null_(new (zone) ConstantInstr(Object::ZoneHandle())) {
5533 // Note: We put ConstantInstr inside MakeTemp to simplify code generation:
5534 // having ConstantInstr allows us to use Location::Constant(null_) as an
5535 // output location for this instruction.
5536 }
5537
5538 DECLARE_INSTRUCTION(MakeTemp)
5539
5540 virtual CompileType ComputeType() const { return CompileType::Dynamic(); }
5541
5542 virtual bool ComputeCanDeoptimize() const { return false; }
5543
5544 virtual bool HasUnknownSideEffects() const {
5545 UNREACHABLE(); // Eliminated by SSA construction.
5546 return false;
5547 }
5548
5549 virtual bool MayThrow() const { return false; }
5550
5551 virtual TokenPosition token_pos() const { return TokenPosition::kTempMove; }
5552
5553 PRINT_OPERANDS_TO_SUPPORT
5554
5555#define FIELD_LIST(F) F(ConstantInstr*, null_)
5556
5557 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MakeTempInstr,
5558 TemplateDefinition,
5559 FIELD_LIST)
5560#undef FIELD_LIST
5561 DECLARE_EXTRA_SERIALIZATION
5562
5563 private:
5564 DISALLOW_COPY_AND_ASSIGN(MakeTempInstr);
5565};
5566
5567class StoreLocalInstr : public TemplateDefinition<1, NoThrow> {
5568 public:
5569 StoreLocalInstr(const LocalVariable& local,
5570 Value* value,
5571 const InstructionSource& source)
5572 : TemplateDefinition(source),
5573 local_(local),
5574 is_dead_(false),
5575 is_last_(false),
5576 token_pos_(source.token_pos) {
5577 SetInputAt(i: 0, value);
5578 }
5579
5580 DECLARE_INSTRUCTION(StoreLocal)
5581 virtual CompileType ComputeType() const;
5582
5583 const LocalVariable& local() const { return local_; }
5584 Value* value() const { return inputs_[0]; }
5585
5586 virtual bool ComputeCanDeoptimize() const { return false; }
5587
5588 void mark_dead() { is_dead_ = true; }
5589 bool is_dead() const { return is_dead_; }
5590
5591 void mark_last() { is_last_ = true; }
5592 bool is_last() const { return is_last_; }
5593
5594 virtual bool HasUnknownSideEffects() const {
5595 UNREACHABLE(); // Eliminated by SSA construction.
5596 return false;
5597 }
5598
5599 virtual TokenPosition token_pos() const { return token_pos_; }
5600
5601 PRINT_OPERANDS_TO_SUPPORT
5602
5603#define FIELD_LIST(F) \
5604 F(const LocalVariable&, local_) \
5605 F(bool, is_dead_) \
5606 F(bool, is_last_) \
5607 F(const TokenPosition, token_pos_)
5608
5609 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreLocalInstr,
5610 TemplateDefinition,
5611 FIELD_LIST)
5612#undef FIELD_LIST
5613
5614 private:
5615 DISALLOW_COPY_AND_ASSIGN(StoreLocalInstr);
5616};
5617
5618class NativeCallInstr : public TemplateDartCall<0> {
5619 public:
5620 NativeCallInstr(const String& name,
5621 const Function& function,
5622 bool link_lazily,
5623 const InstructionSource& source,
5624 InputsArray&& args)
5625 : TemplateDartCall(DeoptId::kNone,
5626 0,
5627 Array::null_array(),
5628 std::move(args),
5629 source),
5630 native_name_(name),
5631 function_(function),
5632 token_pos_(source.token_pos),
5633 link_lazily_(link_lazily) {
5634 DEBUG_ASSERT(name.IsNotTemporaryScopedHandle());
5635 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
5636 // +1 for return value placeholder.
5637 ASSERT(ArgumentCount() ==
5638 function.NumParameters() + (function.IsGeneric() ? 1 : 0) + 1);
5639 }
5640
5641 DECLARE_INSTRUCTION(NativeCall)
5642
5643 const String& native_name() const { return native_name_; }
5644 const Function& function() const { return function_; }
5645 NativeFunction native_c_function() const { return native_c_function_; }
5646 bool is_bootstrap_native() const { return is_bootstrap_native_; }
5647 bool is_auto_scope() const { return is_auto_scope_; }
5648 bool link_lazily() const { return link_lazily_; }
5649 virtual TokenPosition token_pos() const { return token_pos_; }
5650
5651 virtual bool ComputeCanDeoptimize() const { return false; }
5652
5653 virtual bool HasUnknownSideEffects() const { return true; }
5654
5655 // Always creates an exit frame before more Dart code can be called.
5656 virtual bool CanCallDart() const { return false; }
5657
5658 void SetupNative();
5659
5660 PRINT_OPERANDS_TO_SUPPORT
5661
5662#define FIELD_LIST(F) \
5663 F(const String&, native_name_) \
5664 F(const Function&, function_) \
5665 F(const TokenPosition, token_pos_)
5666
5667 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeCallInstr,
5668 TemplateDartCall,
5669 FIELD_LIST)
5670#undef FIELD_LIST
5671
5672 private:
5673 void set_native_c_function(NativeFunction value) {
5674 native_c_function_ = value;
5675 }
5676
5677 void set_is_bootstrap_native(bool value) { is_bootstrap_native_ = value; }
5678 void set_is_auto_scope(bool value) { is_auto_scope_ = value; }
5679
5680 // These fields are not serialized.
5681 // IL serialization only supports lazy linking of native functions.
5682 NativeFunction native_c_function_ = nullptr;
5683 bool is_bootstrap_native_ = false;
5684 bool is_auto_scope_ = true;
5685 bool link_lazily_ = true;
5686
5687 DISALLOW_COPY_AND_ASSIGN(NativeCallInstr);
5688};
5689
5690// Performs a call to native C code. In contrast to NativeCall, the arguments
5691// are unboxed and passed through the native calling convention. However, not
5692// all dart objects can be passed as arguments. Please see the FFI documentation
5693// for more details.
5694//
5695// Arguments to FfiCallInstr:
5696// - The arguments to the native call, marshalled in IL as far as possible.
5697// - The argument address.
5698// - A TypedData for the return value to populate in machine code (optional).
5699class FfiCallInstr : public VariadicDefinition {
5700 public:
5701 FfiCallInstr(intptr_t deopt_id,
5702 const compiler::ffi::CallMarshaller& marshaller,
5703 bool is_leaf)
5704 : VariadicDefinition(marshaller.NumDefinitions() + 1 +
5705 (marshaller.PassTypedData() ? 1 : 0),
5706 deopt_id),
5707 marshaller_(marshaller),
5708 is_leaf_(is_leaf) {}
5709
5710 DECLARE_INSTRUCTION(FfiCall)
5711
5712 // Input index of the function pointer to invoke.
5713 intptr_t TargetAddressIndex() const { return marshaller_.NumDefinitions(); }
5714
5715 // Input index of the typed data to populate if return value is struct.
5716 intptr_t TypedDataIndex() const {
5717 ASSERT(marshaller_.PassTypedData());
5718 return marshaller_.NumDefinitions() + 1;
5719 }
5720
5721 virtual bool MayThrow() const {
5722 // By Dart_PropagateError.
5723 return true;
5724 }
5725
5726 // FfiCallInstr calls C code, which can call back into Dart.
5727 virtual bool ComputeCanDeoptimize() const {
5728 return !CompilerState::Current().is_aot();
5729 }
5730
5731 virtual bool HasUnknownSideEffects() const { return true; }
5732
5733 // Always creates an exit frame before more Dart code can be called.
5734 virtual bool CanCallDart() const { return false; }
5735
5736 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
5737 virtual Representation representation() const;
5738
5739 // Returns true if we can assume generated code will be executable during a
5740 // safepoint.
5741 //
5742 // TODO(#37739): This should be true when dual-mapping is enabled as well, but
5743 // there are some bugs where it still switches code protections currently.
5744 static bool CanExecuteGeneratedCodeInSafepoint() {
5745 return FLAG_precompiled_mode;
5746 }
5747
5748 PRINT_OPERANDS_TO_SUPPORT
5749
5750#define FIELD_LIST(F) \
5751 F(const compiler::ffi::CallMarshaller&, marshaller_) \
5752 F(bool, is_leaf_)
5753
5754 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr,
5755 VariadicDefinition,
5756 FIELD_LIST)
5757#undef FIELD_LIST
5758
5759 private:
5760 LocationSummary* MakeLocationSummaryInternal(Zone* zone,
5761 bool is_optimizing,
5762 const RegList temps) const;
5763
5764 // Clobbers the first two given registers.
5765 // `saved_fp` is used as the frame base to rebase off of.
5766 // `temp1` is only used in case of PointerToMemoryLocation.
5767 void EmitParamMoves(FlowGraphCompiler* compiler,
5768 const Register saved_fp,
5769 const Register temp0,
5770 const Register temp1);
5771 // Clobbers both given temp registers.
5772 void EmitReturnMoves(FlowGraphCompiler* compiler,
5773 const Register temp0,
5774 const Register temp1);
5775
5776 DISALLOW_COPY_AND_ASSIGN(FfiCallInstr);
5777};
5778
5779// Has the target address in a register passed as the last input in IL.
5780class CCallInstr : public VariadicDefinition {
5781 public:
5782 CCallInstr(
5783 const compiler::ffi::NativeCallingConvention& native_calling_convention,
5784 InputsArray&& inputs);
5785
5786 DECLARE_INSTRUCTION(CCall)
5787
5788 LocationSummary* MakeLocationSummaryInternal(Zone* zone,
5789 const RegList temps) const;
5790
5791 // Input index of the function pointer to invoke.
5792 intptr_t TargetAddressIndex() const {
5793 return native_calling_convention_.argument_locations().length();
5794 }
5795
5796 virtual bool MayThrow() const { return false; }
5797
5798 virtual bool ComputeCanDeoptimize() const { return false; }
5799
5800 virtual bool HasUnknownSideEffects() const { return true; }
5801
5802 virtual bool CanCallDart() const { return false; }
5803
5804 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
5805 virtual Representation representation() const;
5806
5807 void EmitParamMoves(FlowGraphCompiler* compiler,
5808 Register saved_fp,
5809 Register temp0);
5810
5811 PRINT_OPERANDS_TO_SUPPORT
5812
5813#define FIELD_LIST(F) \
5814 F(const compiler::ffi::NativeCallingConvention&, native_calling_convention_)
5815
5816 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CCallInstr,
5817 VariadicDefinition,
5818 FIELD_LIST)
5819#undef FIELD_LIST
5820
5821 private:
5822 DISALLOW_COPY_AND_ASSIGN(CCallInstr);
5823};
5824
5825// Populates the untagged base + offset outside the heap with a tagged value.
5826//
5827// The store must be outside of the heap, does not emit a store barrier.
5828// For stores in the heap, use StoreIndexedInstr, which emits store barriers.
5829//
5830// Does not have a dual RawLoadFieldInstr, because for loads we do not have to
5831// distinguish between loading from within the heap or outside the heap.
5832// Use FlowGraphBuilder::RawLoadField.
5833class RawStoreFieldInstr : public TemplateInstruction<2, NoThrow> {
5834 public:
5835 RawStoreFieldInstr(Value* base, Value* value, int32_t offset)
5836 : offset_(offset) {
5837 SetInputAt(i: kBase, value: base);
5838 SetInputAt(i: kValue, value);
5839 }
5840
5841 enum { kBase = 0, kValue = 1 };
5842
5843 DECLARE_INSTRUCTION(RawStoreField)
5844
5845 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
5846 virtual bool ComputeCanDeoptimize() const { return false; }
5847 virtual bool HasUnknownSideEffects() const { return false; }
5848
5849#define FIELD_LIST(F) F(const int32_t, offset_)
5850
5851 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(RawStoreFieldInstr,
5852 TemplateInstruction,
5853 FIELD_LIST)
5854#undef FIELD_LIST
5855
5856 private:
5857 DISALLOW_COPY_AND_ASSIGN(RawStoreFieldInstr);
5858};
5859
5860class DebugStepCheckInstr : public TemplateInstruction<0, NoThrow> {
5861 public:
5862 DebugStepCheckInstr(const InstructionSource& source,
5863 UntaggedPcDescriptors::Kind stub_kind,
5864 intptr_t deopt_id)
5865 : TemplateInstruction(source, deopt_id),
5866 token_pos_(source.token_pos),
5867 stub_kind_(stub_kind) {}
5868
5869 DECLARE_INSTRUCTION(DebugStepCheck)
5870
5871 virtual TokenPosition token_pos() const { return token_pos_; }
5872 virtual bool ComputeCanDeoptimize() const { return false; }
5873 virtual bool HasUnknownSideEffects() const { return true; }
5874 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
5875
5876#define FIELD_LIST(F) \
5877 F(const TokenPosition, token_pos_) \
5878 F(const UntaggedPcDescriptors::Kind, stub_kind_)
5879
5880 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DebugStepCheckInstr,
5881 TemplateInstruction,
5882 FIELD_LIST)
5883#undef FIELD_LIST
5884
5885 private:
5886 DISALLOW_COPY_AND_ASSIGN(DebugStepCheckInstr);
5887};
5888
5889enum StoreBarrierType { kNoStoreBarrier, kEmitStoreBarrier };
5890
5891// StoreField instruction represents a store of the given [value] into
5892// the specified [slot] on the [instance] object. [emit_store_barrier] allows to
5893// specify whether the store should omit the write barrier. [kind] specifies
5894// whether this store is an initializing store, i.e. the first store into a
5895// field after the allocation.
5896//
5897// In JIT mode a slot might be a subject to the field unboxing optimization:
5898// if field type profiling shows that this slot always contains a double or SIMD
5899// value then this field becomes "unboxed" - in this case when storing into
5900// such field we update the payload of the box referenced by the field, rather
5901// than updating the field itself.
5902//
5903// Note: even if [emit_store_barrier] is set to [kEmitStoreBarrier] the store
5904// can still omit the barrier if it establishes that it is not needed.
5905//
5906// Note: stores generated from the constructor initializer list and from
5907// field initializers *must* be marked as initializing. Initializing stores
5908// into unboxed fields are responsible for allocating the mutable box which
5909// would be mutated by subsequent stores.
5910//
5911// Note: If the value to store is an unboxed derived pointer (e.g. pointer to
5912// start of internal typed data array backing) then this instruction cannot be
5913// moved across instructions which can trigger GC, to ensure that
5914//
5915// LoadUntagged + Arithmetic + StoreField
5916//
5917// are performed as an effectively atomic set of instructions.
5918//
5919// See kernel_to_il.cc:BuildTypedDataViewFactoryConstructor.
5920class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
5921 public:
5922 enum class Kind {
5923 // Store is known to be the first store into a slot of an object after
5924 // object was allocated and before it escapes (e.g. stores in constructor
5925 // initializer list).
5926 kInitializing,
5927
5928 // All other stores.
5929 kOther,
5930 };
5931
5932 StoreFieldInstr(const Slot& slot,
5933 Value* instance,
5934 Value* value,
5935 StoreBarrierType emit_store_barrier,
5936 const InstructionSource& source,
5937 Kind kind = Kind::kOther,
5938 compiler::Assembler::MemoryOrder memory_order =
5939 compiler::Assembler::kRelaxedNonAtomic)
5940 : TemplateInstruction(source),
5941 slot_(slot),
5942 emit_store_barrier_(emit_store_barrier),
5943 memory_order_(memory_order),
5944 token_pos_(source.token_pos),
5945 is_initialization_(kind == Kind::kInitializing) {
5946 SetInputAt(i: kInstancePos, value: instance);
5947 SetInputAt(i: kValuePos, value);
5948 }
5949
5950 // Convenience constructor that looks up an IL Slot for the given [field].
5951 StoreFieldInstr(const Field& field,
5952 Value* instance,
5953 Value* value,
5954 StoreBarrierType emit_store_barrier,
5955 const InstructionSource& source,
5956 const ParsedFunction* parsed_function,
5957 Kind kind = Kind::kOther)
5958 : StoreFieldInstr(Slot::Get(field, parsed_function),
5959 instance,
5960 value,
5961 emit_store_barrier,
5962 source,
5963 kind) {}
5964
5965 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
5966 // Slots are unboxed based on statically inferrable type information.
5967 // Either sound non-nullable static types (JIT) or global type flow analysis
5968 // results (AOT).
5969 return slot().representation() != kTagged ? kNotSpeculative : kGuardInputs;
5970 }
5971
5972 DECLARE_INSTRUCTION(StoreField)
5973
5974 enum { kInstancePos = 0, kValuePos = 1 };
5975
5976 Value* instance() const { return inputs_[kInstancePos]; }
5977 const Slot& slot() const { return slot_; }
5978 Value* value() const { return inputs_[kValuePos]; }
5979
5980 virtual TokenPosition token_pos() const { return token_pos_; }
5981 bool is_initialization() const { return is_initialization_; }
5982
5983 bool ShouldEmitStoreBarrier() const {
5984 if (RepresentationUtils::IsUnboxed(rep: slot().representation())) {
5985 // The target field is native and unboxed, so not traversed by the GC.
5986 return false;
5987 }
5988 if (instance()->definition() == value()->definition()) {
5989 // `x.slot = x` cannot create an old->new or old&marked->old&unmarked
5990 // reference.
5991 return false;
5992 }
5993
5994 if (value()->definition()->Type()->IsBool()) {
5995 return false;
5996 }
5997 return value()->NeedsWriteBarrier() &&
5998 (emit_store_barrier_ == kEmitStoreBarrier);
5999 }
6000
6001 void set_emit_store_barrier(StoreBarrierType value) {
6002 emit_store_barrier_ = value;
6003 }
6004
6005 virtual bool CanTriggerGC() const { return false; }
6006
6007 virtual bool ComputeCanDeoptimize() const { return false; }
6008
6009 // May require a deoptimization target for input conversions.
6010 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
6011
6012 // Currently CSE/LICM don't operate on any instructions that can be affected
6013 // by stores/loads. LoadOptimizer handles loads separately. Hence stores
6014 // are marked as having no side-effects.
6015 virtual bool HasUnknownSideEffects() const { return false; }
6016
6017 virtual Representation RequiredInputRepresentation(intptr_t index) const;
6018
6019 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6020
6021 PRINT_OPERANDS_TO_SUPPORT
6022
6023#define FIELD_LIST(F) \
6024 F(const Slot&, slot_) \
6025 F(StoreBarrierType, emit_store_barrier_) \
6026 F(compiler::Assembler::MemoryOrder, memory_order_) \
6027 F(const TokenPosition, token_pos_) \
6028 /* Marks initializing stores. E.g. in the constructor. */ \
6029 F(const bool, is_initialization_)
6030
6031 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreFieldInstr,
6032 TemplateInstruction,
6033 FIELD_LIST)
6034#undef FIELD_LIST
6035
6036 private:
6037 friend class JitCallSpecializer; // For ASSERT(initialization_).
6038
6039 intptr_t OffsetInBytes() const { return slot().offset_in_bytes(); }
6040
6041 compiler::Assembler::CanBeSmi CanValueBeSmi() const {
6042 // Write barrier is skipped for nullable and non-nullable smis.
6043 ASSERT(value()->Type()->ToNullableCid() != kSmiCid);
6044 return value()->Type()->CanBeSmi() ? compiler::Assembler::kValueCanBeSmi
6045 : compiler::Assembler::kValueIsNotSmi;
6046 }
6047
6048 DISALLOW_COPY_AND_ASSIGN(StoreFieldInstr);
6049};
6050
6051class GuardFieldInstr : public TemplateInstruction<1, NoThrow, Pure> {
6052 public:
6053 GuardFieldInstr(Value* value, const Field& field, intptr_t deopt_id)
6054 : TemplateInstruction(deopt_id), field_(field) {
6055 SetInputAt(i: 0, value);
6056 CheckField(field);
6057 }
6058
6059 Value* value() const { return inputs_[0]; }
6060
6061 const Field& field() const { return field_; }
6062
6063 virtual bool ComputeCanDeoptimize() const { return true; }
6064 virtual bool CanBecomeDeoptimizationTarget() const {
6065 // Ensure that we record kDeopt PC descriptor in unoptimized code.
6066 return true;
6067 }
6068
6069 PRINT_OPERANDS_TO_SUPPORT
6070
6071#define FIELD_LIST(F) F(const Field&, field_)
6072
6073 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(GuardFieldInstr,
6074 TemplateInstruction,
6075 FIELD_LIST)
6076#undef FIELD_LIST
6077
6078 private:
6079 DISALLOW_COPY_AND_ASSIGN(GuardFieldInstr);
6080};
6081
6082class GuardFieldClassInstr : public GuardFieldInstr {
6083 public:
6084 GuardFieldClassInstr(Value* value, const Field& field, intptr_t deopt_id)
6085 : GuardFieldInstr(value, field, deopt_id) {
6086 CheckField(field);
6087 }
6088
6089 DECLARE_INSTRUCTION(GuardFieldClass)
6090
6091 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6092
6093 virtual bool AttributesEqual(const Instruction& other) const;
6094
6095 DECLARE_EMPTY_SERIALIZATION(GuardFieldClassInstr, GuardFieldInstr)
6096
6097 private:
6098 DISALLOW_COPY_AND_ASSIGN(GuardFieldClassInstr);
6099};
6100
6101class GuardFieldLengthInstr : public GuardFieldInstr {
6102 public:
6103 GuardFieldLengthInstr(Value* value, const Field& field, intptr_t deopt_id)
6104 : GuardFieldInstr(value, field, deopt_id) {
6105 CheckField(field);
6106 }
6107
6108 DECLARE_INSTRUCTION(GuardFieldLength)
6109
6110 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6111
6112 virtual bool AttributesEqual(const Instruction& other) const;
6113
6114 DECLARE_EMPTY_SERIALIZATION(GuardFieldLengthInstr, GuardFieldInstr)
6115
6116 private:
6117 DISALLOW_COPY_AND_ASSIGN(GuardFieldLengthInstr);
6118};
6119
6120// For a field of static type G<T0, ..., Tn> and a stored value of runtime
6121// type T checks that type arguments of T at G exactly match <T0, ..., Tn>
6122// and updates guarded state (UntaggedField::static_type_exactness_state_)
6123// accordingly.
6124//
6125// See StaticTypeExactnessState for more information.
6126class GuardFieldTypeInstr : public GuardFieldInstr {
6127 public:
6128 GuardFieldTypeInstr(Value* value, const Field& field, intptr_t deopt_id)
6129 : GuardFieldInstr(value, field, deopt_id) {
6130 CheckField(field);
6131 }
6132
6133 DECLARE_INSTRUCTION(GuardFieldType)
6134
6135 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6136
6137 virtual bool AttributesEqual(const Instruction& other) const;
6138
6139 DECLARE_EMPTY_SERIALIZATION(GuardFieldTypeInstr, GuardFieldInstr)
6140
6141 private:
6142 DISALLOW_COPY_AND_ASSIGN(GuardFieldTypeInstr);
6143};
6144
6145template <intptr_t N>
6146class TemplateLoadField : public TemplateDefinition<N, Throws> {
6147 using Base = TemplateDefinition<N, Throws>;
6148
6149 public:
6150 TemplateLoadField(const InstructionSource& source,
6151 bool calls_initializer = false,
6152 intptr_t deopt_id = DeoptId::kNone,
6153 const Field* field = nullptr)
6154 : Base(source, deopt_id),
6155 token_pos_(source.token_pos),
6156 throw_exception_on_initialization_(
6157 field != nullptr && !field->has_initializer() && field->is_late()),
6158 calls_initializer_(calls_initializer) {
6159 ASSERT(!calls_initializer || field != nullptr);
6160 ASSERT(!calls_initializer || (deopt_id != DeoptId::kNone));
6161 }
6162
6163 virtual TokenPosition token_pos() const { return token_pos_; }
6164 bool calls_initializer() const { return calls_initializer_; }
6165 void set_calls_initializer(bool value) { calls_initializer_ = value; }
6166
6167 bool throw_exception_on_initialization() const {
6168 return throw_exception_on_initialization_;
6169 }
6170
6171 // Slow path is used if load throws exception on initialization.
6172 virtual bool UseSharedSlowPathStub(bool is_optimizing) const {
6173 return Base::SlowPathSharingSupported(is_optimizing);
6174 }
6175
6176 virtual intptr_t DeoptimizationTarget() const { return Base::GetDeoptId(); }
6177 virtual bool ComputeCanDeoptimize() const { return false; }
6178 virtual bool ComputeCanDeoptimizeAfterCall() const {
6179 return calls_initializer() && !CompilerState::Current().is_aot();
6180 }
6181 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
6182 return Base::InputCount();
6183 }
6184
6185 virtual bool HasUnknownSideEffects() const {
6186 return calls_initializer() && !throw_exception_on_initialization();
6187 }
6188
6189 virtual bool CanCallDart() const {
6190 // The slow path (running the field initializer) always calls one of a
6191 // specific set of stubs. For those stubs that do not simply call the
6192 // runtime, the GC recognizes their frames and restores write barriers
6193 // automatically (see Thread::RestoreWriteBarrierInvariant).
6194 return false;
6195 }
6196 virtual bool CanTriggerGC() const { return calls_initializer(); }
6197 virtual bool MayThrow() const { return calls_initializer(); }
6198
6199#define FIELD_LIST(F) \
6200 F(const TokenPosition, token_pos_) \
6201 F(const bool, throw_exception_on_initialization_) \
6202 F(bool, calls_initializer_)
6203
6204 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(TemplateLoadField, Base, FIELD_LIST)
6205#undef FIELD_LIST
6206
6207 private:
6208 DISALLOW_COPY_AND_ASSIGN(TemplateLoadField);
6209};
6210
6211class LoadStaticFieldInstr : public TemplateLoadField<0> {
6212 public:
6213 LoadStaticFieldInstr(const Field& field,
6214 const InstructionSource& source,
6215 bool calls_initializer = false,
6216 intptr_t deopt_id = DeoptId::kNone)
6217 : TemplateLoadField<0>(source, calls_initializer, deopt_id, &field),
6218 field_(field) {}
6219
6220 DECLARE_INSTRUCTION(LoadStaticField)
6221
6222 virtual CompileType ComputeType() const;
6223
6224 const Field& field() const { return field_; }
6225
6226 virtual bool AllowsCSE() const {
6227 // If two loads of a static-final-late field call the initializer and one
6228 // dominates another, we can remove the dominated load with the result of
6229 // the dominating load.
6230 //
6231 // Though if the field is final-late there can be stores into it via
6232 // load/compare-with-sentinel/store. Those loads have
6233 // `!field().has_initializer()` and we won't allow CSE for them.
6234 return field().is_final() &&
6235 (!field().is_late() || field().has_initializer());
6236 }
6237
6238 virtual bool AttributesEqual(const Instruction& other) const;
6239
6240 PRINT_OPERANDS_TO_SUPPORT
6241
6242#define FIELD_LIST(F) F(const Field&, field_)
6243
6244 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadStaticFieldInstr,
6245 TemplateLoadField,
6246 FIELD_LIST)
6247#undef FIELD_LIST
6248
6249 private:
6250 DISALLOW_COPY_AND_ASSIGN(LoadStaticFieldInstr);
6251};
6252
6253class StoreStaticFieldInstr : public TemplateDefinition<1, NoThrow> {
6254 public:
6255 StoreStaticFieldInstr(const Field& field,
6256 Value* value,
6257 const InstructionSource& source)
6258 : TemplateDefinition(source),
6259 field_(field),
6260 token_pos_(source.token_pos) {
6261 DEBUG_ASSERT(field.IsNotTemporaryScopedHandle());
6262 SetInputAt(i: kValuePos, value);
6263 CheckField(field);
6264 }
6265
6266 enum { kValuePos = 0 };
6267
6268 DECLARE_INSTRUCTION(StoreStaticField)
6269
6270 const Field& field() const { return field_; }
6271 Value* value() const { return inputs_[kValuePos]; }
6272
6273 virtual bool ComputeCanDeoptimize() const { return false; }
6274
6275 // Currently CSE/LICM don't operate on any instructions that can be affected
6276 // by stores/loads. LoadOptimizer handles loads separately. Hence stores
6277 // are marked as having no side-effects.
6278 virtual bool HasUnknownSideEffects() const { return false; }
6279
6280 virtual TokenPosition token_pos() const { return token_pos_; }
6281
6282 PRINT_OPERANDS_TO_SUPPORT
6283
6284#define FIELD_LIST(F) \
6285 F(const Field&, field_) \
6286 F(const TokenPosition, token_pos_)
6287
6288 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreStaticFieldInstr,
6289 TemplateDefinition,
6290 FIELD_LIST)
6291#undef FIELD_LIST
6292
6293 private:
6294 compiler::Assembler::CanBeSmi CanValueBeSmi() const {
6295 ASSERT(value()->Type()->ToNullableCid() != kSmiCid);
6296 return value()->Type()->CanBeSmi() ? compiler::Assembler::kValueCanBeSmi
6297 : compiler::Assembler::kValueIsNotSmi;
6298 }
6299
6300 DISALLOW_COPY_AND_ASSIGN(StoreStaticFieldInstr);
6301};
6302
6303enum AlignmentType {
6304 kUnalignedAccess,
6305 kAlignedAccess,
6306};
6307
6308class LoadIndexedInstr : public TemplateDefinition<2, NoThrow> {
6309 public:
6310 LoadIndexedInstr(Value* array,
6311 Value* index,
6312 bool index_unboxed,
6313 intptr_t index_scale,
6314 intptr_t class_id,
6315 AlignmentType alignment,
6316 intptr_t deopt_id,
6317 const InstructionSource& source,
6318 CompileType* result_type = nullptr);
6319
6320 TokenPosition token_pos() const { return token_pos_; }
6321
6322 DECLARE_INSTRUCTION(LoadIndexed)
6323 virtual CompileType ComputeType() const;
6324 virtual bool RecomputeType();
6325
6326 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
6327 ASSERT(idx == 0 || idx == 1);
6328 // The array may be tagged or untagged (for external arrays).
6329 if (idx == 0) return kNoRepresentation;
6330
6331 if (index_unboxed_) {
6332#if defined(TARGET_ARCH_IS_64_BIT)
6333 return kUnboxedInt64;
6334#else
6335 return kUnboxedUint32;
6336#endif
6337 } else {
6338 return kTagged; // Index is a smi.
6339 }
6340 }
6341
6342 bool IsExternal() const {
6343 return array()->definition()->representation() == kUntagged;
6344 }
6345
6346 Value* array() const { return inputs_[0]; }
6347 Value* index() const { return inputs_[1]; }
6348 intptr_t index_scale() const { return index_scale_; }
6349 intptr_t class_id() const { return class_id_; }
6350 bool aligned() const { return alignment_ == kAlignedAccess; }
6351
6352 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
6353 virtual bool ComputeCanDeoptimize() const {
6354 return GetDeoptId() != DeoptId::kNone;
6355 }
6356
6357 // Representation of LoadIndexed from arrays with given cid.
6358 static Representation RepresentationOfArrayElement(intptr_t array_cid);
6359
6360 Representation representation() const {
6361 return RepresentationOfArrayElement(array_cid: class_id());
6362 }
6363
6364 virtual void InferRange(RangeAnalysis* analysis, Range* range);
6365
6366 virtual bool HasUnknownSideEffects() const { return false; }
6367
6368 virtual Definition* Canonicalize(FlowGraph* flow_graph);
6369
6370#define FIELD_LIST(F) \
6371 F(const bool, index_unboxed_) \
6372 F(const intptr_t, index_scale_) \
6373 F(const intptr_t, class_id_) \
6374 F(const AlignmentType, alignment_) \
6375 F(const TokenPosition, token_pos_) \
6376 /* derived from call */ \
6377 F(CompileType*, result_type_)
6378
6379 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadIndexedInstr,
6380 TemplateDefinition,
6381 FIELD_LIST)
6382#undef FIELD_LIST
6383
6384 private:
6385 DISALLOW_COPY_AND_ASSIGN(LoadIndexedInstr);
6386};
6387
6388// Loads the specified number of code units from the given string, packing
6389// multiple code units into a single datatype. In essence, this is a specialized
6390// version of LoadIndexedInstr which accepts only string targets and can load
6391// multiple elements at once. The result datatype differs depending on the
6392// string type, element count, and architecture; if possible, the result is
6393// packed into a Smi, falling back to a Mint otherwise.
6394// TODO(zerny): Add support for loading into UnboxedInt32x4.
6395class LoadCodeUnitsInstr : public TemplateDefinition<2, NoThrow> {
6396 public:
6397 LoadCodeUnitsInstr(Value* str,
6398 Value* index,
6399 intptr_t element_count,
6400 intptr_t class_id,
6401 const InstructionSource& source)
6402 : TemplateDefinition(source),
6403 class_id_(class_id),
6404 token_pos_(source.token_pos),
6405 element_count_(element_count),
6406 representation_(kTagged) {
6407 ASSERT(element_count == 1 || element_count == 2 || element_count == 4);
6408 ASSERT(IsStringClassId(class_id));
6409 SetInputAt(i: 0, value: str);
6410 SetInputAt(i: 1, value: index);
6411 }
6412
6413 TokenPosition token_pos() const { return token_pos_; }
6414
6415 DECLARE_INSTRUCTION(LoadCodeUnits)
6416 virtual CompileType ComputeType() const;
6417
6418 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
6419 if (idx == 0) {
6420 // The string may be tagged or untagged (for external strings).
6421 return kNoRepresentation;
6422 }
6423 ASSERT(idx == 1);
6424 return kTagged;
6425 }
6426
6427 bool IsExternal() const {
6428 return array()->definition()->representation() == kUntagged;
6429 }
6430
6431 Value* array() const { return inputs_[0]; }
6432 Value* index() const { return inputs_[1]; }
6433
6434 intptr_t index_scale() const {
6435 return compiler::target::Instance::ElementSizeFor(cid: class_id_);
6436 }
6437
6438 intptr_t class_id() const { return class_id_; }
6439 intptr_t element_count() const { return element_count_; }
6440
6441 bool can_pack_into_smi() const {
6442 return element_count() <=
6443 compiler::target::kSmiBits / (index_scale() * kBitsPerByte);
6444 }
6445
6446 virtual bool ComputeCanDeoptimize() const { return false; }
6447
6448 virtual Representation representation() const { return representation_; }
6449 void set_representation(Representation repr) { representation_ = repr; }
6450 virtual void InferRange(RangeAnalysis* analysis, Range* range);
6451
6452 virtual bool HasUnknownSideEffects() const { return false; }
6453
6454 virtual bool CanTriggerGC() const {
6455 return !can_pack_into_smi() && (representation() == kTagged);
6456 }
6457
6458#define FIELD_LIST(F) \
6459 F(const intptr_t, class_id_) \
6460 F(const TokenPosition, token_pos_) \
6461 F(const intptr_t, element_count_) \
6462 F(Representation, representation_)
6463
6464 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadCodeUnitsInstr,
6465 TemplateDefinition,
6466 FIELD_LIST)
6467#undef FIELD_LIST
6468
6469 private:
6470 DISALLOW_COPY_AND_ASSIGN(LoadCodeUnitsInstr);
6471};
6472
6473class OneByteStringFromCharCodeInstr
6474 : public TemplateDefinition<1, NoThrow, Pure> {
6475 public:
6476 explicit OneByteStringFromCharCodeInstr(Value* char_code) {
6477 SetInputAt(i: 0, value: char_code);
6478 }
6479
6480 DECLARE_INSTRUCTION(OneByteStringFromCharCode)
6481 virtual CompileType ComputeType() const;
6482
6483 Value* char_code() const { return inputs_[0]; }
6484
6485 virtual bool ComputeCanDeoptimize() const { return false; }
6486
6487 virtual bool AttributesEqual(const Instruction& other) const { return true; }
6488
6489 DECLARE_EMPTY_SERIALIZATION(OneByteStringFromCharCodeInstr,
6490 TemplateDefinition)
6491
6492 private:
6493 DISALLOW_COPY_AND_ASSIGN(OneByteStringFromCharCodeInstr);
6494};
6495
6496class StringToCharCodeInstr : public TemplateDefinition<1, NoThrow, Pure> {
6497 public:
6498 StringToCharCodeInstr(Value* str, intptr_t cid) : cid_(cid) {
6499 ASSERT(str != nullptr);
6500 SetInputAt(i: 0, value: str);
6501 }
6502
6503 DECLARE_INSTRUCTION(StringToCharCode)
6504 virtual CompileType ComputeType() const;
6505
6506 Value* str() const { return inputs_[0]; }
6507
6508 virtual bool ComputeCanDeoptimize() const { return false; }
6509
6510 virtual bool AttributesEqual(const Instruction& other) const {
6511 return other.AsStringToCharCode()->cid_ == cid_;
6512 }
6513
6514#define FIELD_LIST(F) F(const intptr_t, cid_)
6515
6516 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StringToCharCodeInstr,
6517 TemplateDefinition,
6518 FIELD_LIST)
6519#undef FIELD_LIST
6520
6521 private:
6522 DISALLOW_COPY_AND_ASSIGN(StringToCharCodeInstr);
6523};
6524
6525// Scanning instruction to compute the result size and decoding parameters
6526// for the UTF-8 decoder. Equivalent to:
6527//
6528// int _scan(Uint8List bytes, int start, int end, _OneByteString table,
6529// _Utf8Decoder decoder) {
6530// int size = 0;
6531// int flags = 0;
6532// for (int i = start; i < end; i++) {
6533// int t = table.codeUnitAt(bytes[i]);
6534// size += t & sizeMask;
6535// flags |= t;
6536// }
6537// decoder._scanFlags |= flags & flagsMask;
6538// return size;
6539// }
6540//
6541// under these assumptions:
6542// - The difference between start and end must be less than 2^30, since the
6543// resulting length can be twice the input length (and the result has to be in
6544// Smi range). This is guaranteed by `_Utf8Decoder.chunkSize` which is set to
6545// `65536`.
6546// - The decoder._scanFlags field is unboxed or contains a smi.
6547// - The first 128 entries of the table have the value 1.
6548class Utf8ScanInstr : public TemplateDefinition<5, NoThrow> {
6549 public:
6550 Utf8ScanInstr(Value* decoder,
6551 Value* bytes,
6552 Value* start,
6553 Value* end,
6554 Value* table,
6555 const Slot& decoder_scan_flags_field)
6556 : scan_flags_field_(decoder_scan_flags_field) {
6557 SetInputAt(i: 0, value: decoder);
6558 SetInputAt(i: 1, value: bytes);
6559 SetInputAt(i: 2, value: start);
6560 SetInputAt(i: 3, value: end);
6561 SetInputAt(i: 4, value: table);
6562 }
6563
6564 DECLARE_INSTRUCTION(Utf8Scan)
6565
6566 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
6567 ASSERT(idx >= 0 || idx <= 4);
6568 // The start and end inputs are unboxed, but in smi range.
6569 if (idx == 2 || idx == 3) return kUnboxedIntPtr;
6570 return kTagged;
6571 }
6572
6573 virtual Representation representation() const { return kUnboxedIntPtr; }
6574
6575 virtual CompileType ComputeType() const { return CompileType::Int(); }
6576 virtual bool HasUnknownSideEffects() const { return true; }
6577 virtual bool ComputeCanDeoptimize() const { return false; }
6578 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
6579 virtual void InferRange(RangeAnalysis* analysis, Range* range);
6580
6581 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
6582 return kNotSpeculative;
6583 }
6584
6585 virtual bool AttributesEqual(const Instruction& other) const {
6586 return scan_flags_field_.Equals(other: other.AsUtf8Scan()->scan_flags_field_);
6587 }
6588
6589 bool IsScanFlagsUnboxed() const;
6590
6591 PRINT_TO_SUPPORT
6592
6593#define FIELD_LIST(F) F(const Slot&, scan_flags_field_)
6594
6595 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Utf8ScanInstr,
6596 TemplateDefinition,
6597 FIELD_LIST)
6598#undef FIELD_LIST
6599
6600 private:
6601 DISALLOW_COPY_AND_ASSIGN(Utf8ScanInstr);
6602};
6603
6604class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
6605 public:
6606 StoreIndexedInstr(Value* array,
6607 Value* index,
6608 Value* value,
6609 StoreBarrierType emit_store_barrier,
6610 bool index_unboxed,
6611 intptr_t index_scale,
6612 intptr_t class_id,
6613 AlignmentType alignment,
6614 intptr_t deopt_id,
6615 const InstructionSource& source,
6616 SpeculativeMode speculative_mode = kGuardInputs);
6617 DECLARE_INSTRUCTION(StoreIndexed)
6618
6619 enum { kArrayPos = 0, kIndexPos = 1, kValuePos = 2 };
6620
6621 Value* array() const { return inputs_[kArrayPos]; }
6622 Value* index() const { return inputs_[kIndexPos]; }
6623 Value* value() const { return inputs_[kValuePos]; }
6624
6625 intptr_t index_scale() const { return index_scale_; }
6626 intptr_t class_id() const { return class_id_; }
6627 bool aligned() const { return alignment_ == kAlignedAccess; }
6628
6629 bool ShouldEmitStoreBarrier() const {
6630 if (array()->definition() == value()->definition()) {
6631 // `x[slot] = x` cannot create an old->new or old&marked->old&unmarked
6632 // reference.
6633 return false;
6634 }
6635
6636 if (value()->definition()->Type()->IsBool()) {
6637 return false;
6638 }
6639 return value()->NeedsWriteBarrier() &&
6640 (emit_store_barrier_ == kEmitStoreBarrier);
6641 }
6642
6643 void set_emit_store_barrier(StoreBarrierType value) {
6644 emit_store_barrier_ = value;
6645 }
6646
6647 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
6648 return speculative_mode_;
6649 }
6650
6651 virtual bool ComputeCanDeoptimize() const { return false; }
6652
6653 // Representation of value passed to StoreIndexed for arrays with given cid.
6654 static Representation RepresentationOfArrayElement(intptr_t array_cid);
6655
6656 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
6657
6658 bool IsExternal() const {
6659 return array()->definition()->representation() == kUntagged;
6660 }
6661
6662 virtual intptr_t DeoptimizationTarget() const {
6663 // Direct access since this instruction cannot deoptimize, and the deopt-id
6664 // was inherited from another instruction that could deoptimize.
6665 return GetDeoptId();
6666 }
6667
6668 virtual bool HasUnknownSideEffects() const { return false; }
6669
6670 void PrintOperandsTo(BaseTextBuffer* f) const;
6671
6672 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6673
6674#define FIELD_LIST(F) \
6675 F(StoreBarrierType, emit_store_barrier_) \
6676 F(const bool, index_unboxed_) \
6677 F(const intptr_t, index_scale_) \
6678 F(const intptr_t, class_id_) \
6679 F(const AlignmentType, alignment_) \
6680 F(const TokenPosition, token_pos_) \
6681 F(const SpeculativeMode, speculative_mode_)
6682
6683 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreIndexedInstr,
6684 TemplateInstruction,
6685 FIELD_LIST)
6686#undef FIELD_LIST
6687
6688 private:
6689 compiler::Assembler::CanBeSmi CanValueBeSmi() const {
6690 return compiler::Assembler::kValueCanBeSmi;
6691 }
6692
6693 DISALLOW_COPY_AND_ASSIGN(StoreIndexedInstr);
6694};
6695
6696class RecordCoverageInstr : public TemplateInstruction<0, NoThrow> {
6697 public:
6698 RecordCoverageInstr(const Array& coverage_array,
6699 intptr_t coverage_index,
6700 const InstructionSource& source)
6701 : TemplateInstruction(source),
6702 coverage_array_(coverage_array),
6703 coverage_index_(coverage_index),
6704 token_pos_(source.token_pos) {}
6705
6706 DECLARE_INSTRUCTION(RecordCoverage)
6707
6708 virtual TokenPosition token_pos() const { return token_pos_; }
6709 virtual bool ComputeCanDeoptimize() const { return false; }
6710 virtual bool HasUnknownSideEffects() const { return false; }
6711 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6712
6713#define FIELD_LIST(F) \
6714 F(const Array&, coverage_array_) \
6715 F(const intptr_t, coverage_index_) \
6716 F(const TokenPosition, token_pos_)
6717
6718 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(RecordCoverageInstr,
6719 TemplateInstruction,
6720 FIELD_LIST)
6721#undef FIELD_LIST
6722
6723 private:
6724 DISALLOW_COPY_AND_ASSIGN(RecordCoverageInstr);
6725};
6726
6727// Note overridable, built-in: value ? false : true.
6728class BooleanNegateInstr : public TemplateDefinition<1, NoThrow> {
6729 public:
6730 explicit BooleanNegateInstr(Value* value) { SetInputAt(i: 0, value); }
6731
6732 DECLARE_INSTRUCTION(BooleanNegate)
6733 virtual CompileType ComputeType() const;
6734
6735 Value* value() const { return inputs_[0]; }
6736
6737 virtual bool ComputeCanDeoptimize() const { return false; }
6738
6739 virtual bool HasUnknownSideEffects() const { return false; }
6740
6741 virtual Definition* Canonicalize(FlowGraph* flow_graph);
6742
6743 DECLARE_EMPTY_SERIALIZATION(BooleanNegateInstr, TemplateDefinition)
6744
6745 private:
6746 DISALLOW_COPY_AND_ASSIGN(BooleanNegateInstr);
6747};
6748
6749class InstanceOfInstr : public TemplateDefinition<3, Throws> {
6750 public:
6751 InstanceOfInstr(const InstructionSource& source,
6752 Value* value,
6753 Value* instantiator_type_arguments,
6754 Value* function_type_arguments,
6755 const AbstractType& type,
6756 intptr_t deopt_id)
6757 : TemplateDefinition(source, deopt_id),
6758 token_pos_(source.token_pos),
6759 type_(type) {
6760 ASSERT(!type.IsNull());
6761 SetInputAt(i: 0, value);
6762 SetInputAt(i: 1, value: instantiator_type_arguments);
6763 SetInputAt(i: 2, value: function_type_arguments);
6764 }
6765
6766 DECLARE_INSTRUCTION(InstanceOf)
6767 virtual CompileType ComputeType() const;
6768
6769 Value* value() const { return inputs_[0]; }
6770 Value* instantiator_type_arguments() const { return inputs_[1]; }
6771 Value* function_type_arguments() const { return inputs_[2]; }
6772
6773 const AbstractType& type() const { return type_; }
6774 virtual TokenPosition token_pos() const { return token_pos_; }
6775
6776 virtual bool ComputeCanDeoptimize() const {
6777 return !CompilerState::Current().is_aot();
6778 }
6779
6780 virtual bool HasUnknownSideEffects() const { return false; }
6781
6782 PRINT_OPERANDS_TO_SUPPORT
6783
6784#define FIELD_LIST(F) \
6785 F(const TokenPosition, token_pos_) \
6786 F(const AbstractType&, type_)
6787
6788 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstanceOfInstr,
6789 TemplateDefinition,
6790 FIELD_LIST)
6791#undef FIELD_LIST
6792
6793 private:
6794 DISALLOW_COPY_AND_ASSIGN(InstanceOfInstr);
6795};
6796
6797// Subclasses of 'AllocationInstr' must maintain the invariant that if
6798// 'WillAllocateNewOrRemembered' is true, then the result of the allocation must
6799// either reside in new space or be in the store buffer.
6800class AllocationInstr : public Definition {
6801 public:
6802 explicit AllocationInstr(const InstructionSource& source,
6803 intptr_t deopt_id = DeoptId::kNone)
6804 : Definition(source, deopt_id),
6805 token_pos_(source.token_pos),
6806 identity_(AliasIdentity::Unknown()) {}
6807
6808 virtual TokenPosition token_pos() const { return token_pos_; }
6809
6810 virtual AliasIdentity Identity() const { return identity_; }
6811 virtual void SetIdentity(AliasIdentity identity) { identity_ = identity; }
6812
6813 // TODO(sjindel): Update these conditions when the incremental write barrier
6814 // is added.
6815 virtual bool WillAllocateNewOrRemembered() const = 0;
6816
6817 virtual bool MayThrow() const {
6818 // Any allocation instruction may throw an OutOfMemory error.
6819 return true;
6820 }
6821 virtual bool ComputeCanDeoptimize() const { return false; }
6822 virtual bool ComputeCanDeoptimizeAfterCall() const {
6823 // We test that allocation instructions have correct deopt environment
6824 // (which is needed in case OOM is thrown) by actually deoptimizing
6825 // optimized code in allocation slow paths.
6826 return !CompilerState::Current().is_aot();
6827 }
6828 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
6829 return InputCount();
6830 }
6831
6832 // Returns the slot in the allocated object that contains the value at the
6833 // given input position. Returns nullptr if the input position is invalid
6834 // or if the input is not stored in the object.
6835 virtual const Slot* SlotForInput(intptr_t pos) { return nullptr; }
6836
6837 // Returns the input index that has a corresponding slot which is identical to
6838 // the given slot. Returns a negative index if no such input found.
6839 intptr_t InputForSlot(const Slot& slot) {
6840 for (intptr_t i = 0; i < InputCount(); i++) {
6841 auto* const input_slot = SlotForInput(pos: i);
6842 if (input_slot != nullptr && input_slot->IsIdentical(other: slot)) {
6843 return i;
6844 }
6845 }
6846 return -1;
6847 }
6848
6849 // Returns whether the allocated object has initialized fields and/or payload
6850 // elements. Override for any subclass that returns an uninitialized object.
6851 virtual bool ObjectIsInitialized() { return true; }
6852
6853 PRINT_OPERANDS_TO_SUPPORT
6854
6855 DECLARE_ABSTRACT_INSTRUCTION(Allocation);
6856
6857#define FIELD_LIST(F) \
6858 F(const TokenPosition, token_pos_) \
6859 F(AliasIdentity, identity_)
6860
6861 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocationInstr,
6862 Definition,
6863 FIELD_LIST)
6864#undef FIELD_LIST
6865
6866 private:
6867 DISALLOW_COPY_AND_ASSIGN(AllocationInstr);
6868};
6869
6870template <intptr_t N>
6871class TemplateAllocation : public AllocationInstr {
6872 public:
6873 explicit TemplateAllocation(const InstructionSource& source,
6874 intptr_t deopt_id)
6875 : AllocationInstr(source, deopt_id), inputs_() {}
6876
6877 virtual intptr_t InputCount() const { return N; }
6878 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
6879
6880 DECLARE_EMPTY_SERIALIZATION(TemplateAllocation, AllocationInstr)
6881
6882 protected:
6883 EmbeddedArray<Value*, N> inputs_;
6884
6885 private:
6886 friend class BranchInstr;
6887 friend class IfThenElseInstr;
6888 friend class RecordCoverageInstr;
6889
6890 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
6891};
6892
6893class AllocateObjectInstr : public AllocationInstr {
6894 public:
6895 enum { kTypeArgumentsPos = 0 };
6896 AllocateObjectInstr(const InstructionSource& source,
6897 const Class& cls,
6898 intptr_t deopt_id,
6899 Value* type_arguments = nullptr)
6900 : AllocationInstr(source, deopt_id),
6901 cls_(cls),
6902 has_type_arguments_(type_arguments != nullptr),
6903 type_arguments_slot_(nullptr),
6904 type_arguments_(type_arguments) {
6905 DEBUG_ASSERT(cls.IsNotTemporaryScopedHandle());
6906 ASSERT(!cls.IsNull());
6907 ASSERT((cls.NumTypeArguments() > 0) == has_type_arguments_);
6908 if (has_type_arguments_) {
6909 SetInputAt(i: kTypeArgumentsPos, value: type_arguments);
6910 type_arguments_slot_ =
6911 &Slot::GetTypeArgumentsSlotFor(thread: Thread::Current(), cls);
6912 }
6913 }
6914
6915 DECLARE_INSTRUCTION(AllocateObject)
6916 virtual CompileType ComputeType() const;
6917
6918 const Class& cls() const { return cls_; }
6919 Value* type_arguments() const { return type_arguments_; }
6920
6921 virtual intptr_t InputCount() const { return has_type_arguments_ ? 1 : 0; }
6922 virtual Value* InputAt(intptr_t i) const {
6923 ASSERT(has_type_arguments_ && i == kTypeArgumentsPos);
6924 return type_arguments_;
6925 }
6926
6927 virtual bool HasUnknownSideEffects() const { return false; }
6928
6929 virtual bool WillAllocateNewOrRemembered() const {
6930 return WillAllocateNewOrRemembered(cls: cls());
6931 }
6932
6933 static bool WillAllocateNewOrRemembered(const Class& cls) {
6934 return IsAllocatableInNewSpace(size: cls.target_instance_size());
6935 }
6936
6937 virtual const Slot* SlotForInput(intptr_t pos) {
6938 return pos == kTypeArgumentsPos ? type_arguments_slot_ : nullptr;
6939 }
6940
6941 PRINT_OPERANDS_TO_SUPPORT
6942
6943#define FIELD_LIST(F) \
6944 F(const Class&, cls_) \
6945 F(const bool, has_type_arguments_) \
6946 F(const Slot*, type_arguments_slot_)
6947
6948 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateObjectInstr,
6949 AllocationInstr,
6950 FIELD_LIST)
6951#undef FIELD_LIST
6952
6953 private:
6954 virtual void RawSetInputAt(intptr_t i, Value* value) {
6955 ASSERT(has_type_arguments_ && (i == kTypeArgumentsPos));
6956 type_arguments_ = value;
6957 }
6958
6959 Value* type_arguments_ = nullptr;
6960
6961 DISALLOW_COPY_AND_ASSIGN(AllocateObjectInstr);
6962};
6963
6964// Allocates and null initializes a closure object, given the closure function
6965// and the context as values.
6966class AllocateClosureInstr : public TemplateAllocation<2> {
6967 public:
6968 enum Inputs { kFunctionPos = 0, kContextPos = 1 };
6969 AllocateClosureInstr(const InstructionSource& source,
6970 Value* closure_function,
6971 Value* context,
6972 intptr_t deopt_id)
6973 : TemplateAllocation(source, deopt_id) {
6974 SetInputAt(i: kFunctionPos, value: closure_function);
6975 SetInputAt(i: kContextPos, value: context);
6976 }
6977
6978 DECLARE_INSTRUCTION(AllocateClosure)
6979 virtual CompileType ComputeType() const;
6980
6981 Value* closure_function() const { return inputs_[kFunctionPos]; }
6982 Value* context() const { return inputs_[kContextPos]; }
6983
6984 const Function& known_function() const {
6985 Value* const value = closure_function();
6986 if (value->BindsToConstant()) {
6987 ASSERT(value->BoundConstant().IsFunction());
6988 return Function::Cast(obj: value->BoundConstant());
6989 }
6990 return Object::null_function();
6991 }
6992
6993 virtual const Slot* SlotForInput(intptr_t pos) {
6994 switch (pos) {
6995 case kFunctionPos:
6996 return &Slot::Closure_function();
6997 case kContextPos:
6998 return &Slot::Closure_context();
6999 default:
7000 return TemplateAllocation::SlotForInput(pos);
7001 }
7002 }
7003
7004 virtual bool HasUnknownSideEffects() const { return false; }
7005
7006 virtual bool WillAllocateNewOrRemembered() const {
7007 return IsAllocatableInNewSpace(size: compiler::target::Closure::InstanceSize());
7008 }
7009
7010 DECLARE_EMPTY_SERIALIZATION(AllocateClosureInstr, TemplateAllocation)
7011
7012 private:
7013 DISALLOW_COPY_AND_ASSIGN(AllocateClosureInstr);
7014};
7015
7016class AllocateUninitializedContextInstr : public TemplateAllocation<0> {
7017 public:
7018 AllocateUninitializedContextInstr(const InstructionSource& source,
7019 intptr_t num_context_variables,
7020 intptr_t deopt_id);
7021
7022 DECLARE_INSTRUCTION(AllocateUninitializedContext)
7023 virtual CompileType ComputeType() const;
7024
7025 intptr_t num_context_variables() const { return num_context_variables_; }
7026
7027 virtual bool HasUnknownSideEffects() const { return false; }
7028
7029 virtual bool WillAllocateNewOrRemembered() const {
7030 return compiler::target::WillAllocateNewOrRememberedContext(
7031 num_context_variables: num_context_variables_);
7032 }
7033
7034 virtual bool ObjectIsInitialized() { return false; }
7035
7036 PRINT_OPERANDS_TO_SUPPORT
7037
7038#define FIELD_LIST(F) F(const intptr_t, num_context_variables_)
7039
7040 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateUninitializedContextInstr,
7041 TemplateAllocation,
7042 FIELD_LIST)
7043#undef FIELD_LIST
7044
7045 private:
7046 DISALLOW_COPY_AND_ASSIGN(AllocateUninitializedContextInstr);
7047};
7048
7049// Allocates and null initializes a record object.
7050class AllocateRecordInstr : public TemplateAllocation<0> {
7051 public:
7052 AllocateRecordInstr(const InstructionSource& source,
7053 RecordShape shape,
7054 intptr_t deopt_id)
7055 : TemplateAllocation(source, deopt_id), shape_(shape) {}
7056
7057 DECLARE_INSTRUCTION(AllocateRecord)
7058 virtual CompileType ComputeType() const;
7059
7060 RecordShape shape() const { return shape_; }
7061 intptr_t num_fields() const { return shape_.num_fields(); }
7062
7063 virtual bool HasUnknownSideEffects() const { return false; }
7064
7065 virtual bool WillAllocateNewOrRemembered() const {
7066 return IsAllocatableInNewSpace(
7067 size: compiler::target::Record::InstanceSize(length: num_fields()));
7068 }
7069
7070#define FIELD_LIST(F) F(const RecordShape, shape_)
7071
7072 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateRecordInstr,
7073 TemplateAllocation,
7074 FIELD_LIST)
7075#undef FIELD_LIST
7076
7077 private:
7078 DISALLOW_COPY_AND_ASSIGN(AllocateRecordInstr);
7079};
7080
7081// Allocates and initializes fields of a small record object
7082// (with 2 or 3 fields).
7083class AllocateSmallRecordInstr : public TemplateAllocation<3> {
7084 public:
7085 AllocateSmallRecordInstr(const InstructionSource& source,
7086 RecordShape shape, // 2 or 3 fields.
7087 Value* value0,
7088 Value* value1,
7089 Value* value2, // Optional.
7090 intptr_t deopt_id)
7091 : TemplateAllocation(source, deopt_id), shape_(shape) {
7092 const intptr_t num_fields = shape.num_fields();
7093 ASSERT(num_fields == 2 || num_fields == 3);
7094 ASSERT((num_fields > 2) == (value2 != nullptr));
7095 SetInputAt(i: 0, value: value0);
7096 SetInputAt(i: 1, value: value1);
7097 if (num_fields > 2) {
7098 SetInputAt(i: 2, value: value2);
7099 }
7100 }
7101
7102 DECLARE_INSTRUCTION(AllocateSmallRecord)
7103 virtual CompileType ComputeType() const;
7104
7105 RecordShape shape() const { return shape_; }
7106 intptr_t num_fields() const { return shape().num_fields(); }
7107
7108 virtual intptr_t InputCount() const { return num_fields(); }
7109
7110 virtual const Slot* SlotForInput(intptr_t pos) {
7111 return &Slot::GetRecordFieldSlot(
7112 thread: Thread::Current(), offset_in_bytes: compiler::target::Record::field_offset(index: pos));
7113 }
7114
7115 virtual bool HasUnknownSideEffects() const { return false; }
7116
7117 virtual bool WillAllocateNewOrRemembered() const {
7118 return IsAllocatableInNewSpace(
7119 size: compiler::target::Record::InstanceSize(length: num_fields()));
7120 }
7121
7122#define FIELD_LIST(F) F(const RecordShape, shape_)
7123
7124 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateSmallRecordInstr,
7125 TemplateAllocation,
7126 FIELD_LIST)
7127#undef FIELD_LIST
7128
7129 private:
7130 DISALLOW_COPY_AND_ASSIGN(AllocateSmallRecordInstr);
7131};
7132
7133// This instruction captures the state of the object which had its allocation
7134// removed during the AllocationSinking pass.
7135// It does not produce any real code only deoptimization information.
7136class MaterializeObjectInstr : public VariadicDefinition {
7137 public:
7138 MaterializeObjectInstr(AllocationInstr* allocation,
7139 const Class& cls,
7140 intptr_t length_or_shape,
7141 const ZoneGrowableArray<const Slot*>& slots,
7142 InputsArray&& values)
7143 : VariadicDefinition(std::move(values)),
7144 cls_(cls),
7145 length_or_shape_(length_or_shape),
7146 slots_(slots),
7147 registers_remapped_(false),
7148 allocation_(allocation) {
7149 ASSERT(slots_.length() == InputCount());
7150 }
7151
7152 AllocationInstr* allocation() const { return allocation_; }
7153 const Class& cls() const { return cls_; }
7154
7155 intptr_t length_or_shape() const { return length_or_shape_; }
7156
7157 intptr_t FieldOffsetAt(intptr_t i) const {
7158 return slots_[i]->offset_in_bytes();
7159 }
7160
7161 const Location& LocationAt(intptr_t i) {
7162 ASSERT(0 <= i && i < InputCount());
7163 return locations_[i];
7164 }
7165
7166 DECLARE_INSTRUCTION(MaterializeObject)
7167
7168 // SelectRepresentations pass is run once more while MaterializeObject
7169 // instructions are still in the graph. To avoid any redundant boxing
7170 // operations inserted by that pass we should indicate that this
7171 // instruction can cope with any representation as it is essentially
7172 // an environment use.
7173 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7174 ASSERT(0 <= idx && idx < InputCount());
7175 return kNoRepresentation;
7176 }
7177
7178 virtual bool ComputeCanDeoptimize() const { return false; }
7179 virtual bool HasUnknownSideEffects() const { return false; }
7180
7181 Location* locations() { return locations_; }
7182 void set_locations(Location* locations) { locations_ = locations; }
7183
7184 virtual bool MayThrow() const { return false; }
7185
7186 void RemapRegisters(intptr_t* cpu_reg_slots, intptr_t* fpu_reg_slots);
7187
7188 bool was_visited_for_liveness() const { return visited_for_liveness_; }
7189 void mark_visited_for_liveness() { visited_for_liveness_ = true; }
7190
7191 PRINT_OPERANDS_TO_SUPPORT
7192
7193#define FIELD_LIST(F) \
7194 F(const Class&, cls_) \
7195 F(intptr_t, length_or_shape_) \
7196 F(const ZoneGrowableArray<const Slot*>&, slots_) \
7197 F(bool, registers_remapped_)
7198
7199 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MaterializeObjectInstr,
7200 VariadicDefinition,
7201 FIELD_LIST)
7202#undef FIELD_LIST
7203 DECLARE_EXTRA_SERIALIZATION
7204
7205 private:
7206 Location* locations_ = nullptr;
7207
7208 // Not serialized.
7209 AllocationInstr* allocation_ = nullptr;
7210 bool visited_for_liveness_ = false;
7211
7212 DISALLOW_COPY_AND_ASSIGN(MaterializeObjectInstr);
7213};
7214
7215class ArrayAllocationInstr : public AllocationInstr {
7216 public:
7217 explicit ArrayAllocationInstr(const InstructionSource& source,
7218 intptr_t deopt_id)
7219 : AllocationInstr(source, deopt_id) {}
7220
7221 virtual Value* num_elements() const = 0;
7222
7223 bool HasConstantNumElements() const {
7224 return num_elements()->BindsToSmiConstant();
7225 }
7226 intptr_t GetConstantNumElements() const {
7227 return num_elements()->BoundSmiConstant();
7228 }
7229
7230 DECLARE_ABSTRACT_INSTRUCTION(ArrayAllocation);
7231
7232 DECLARE_EMPTY_SERIALIZATION(ArrayAllocationInstr, AllocationInstr)
7233
7234 private:
7235 DISALLOW_COPY_AND_ASSIGN(ArrayAllocationInstr);
7236};
7237
7238template <intptr_t N>
7239class TemplateArrayAllocation : public ArrayAllocationInstr {
7240 public:
7241 explicit TemplateArrayAllocation(const InstructionSource& source,
7242 intptr_t deopt_id)
7243 : ArrayAllocationInstr(source, deopt_id), inputs_() {}
7244
7245 virtual intptr_t InputCount() const { return N; }
7246 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
7247
7248 DECLARE_EMPTY_SERIALIZATION(TemplateArrayAllocation, ArrayAllocationInstr)
7249
7250 protected:
7251 EmbeddedArray<Value*, N> inputs_;
7252
7253 private:
7254 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
7255
7256 DISALLOW_COPY_AND_ASSIGN(TemplateArrayAllocation);
7257};
7258
7259class CreateArrayInstr : public TemplateArrayAllocation<2> {
7260 public:
7261 CreateArrayInstr(const InstructionSource& source,
7262 Value* type_arguments,
7263 Value* num_elements,
7264 intptr_t deopt_id)
7265 : TemplateArrayAllocation(source, deopt_id) {
7266 SetInputAt(i: kTypeArgumentsPos, value: type_arguments);
7267 SetInputAt(i: kLengthPos, value: num_elements);
7268 }
7269
7270 enum { kTypeArgumentsPos = 0, kLengthPos = 1 };
7271
7272 DECLARE_INSTRUCTION(CreateArray)
7273 virtual CompileType ComputeType() const;
7274
7275 Value* type_arguments() const { return inputs_[kTypeArgumentsPos]; }
7276 virtual Value* num_elements() const { return inputs_[kLengthPos]; }
7277
7278 virtual bool HasUnknownSideEffects() const { return false; }
7279
7280 virtual bool WillAllocateNewOrRemembered() const {
7281 // Large arrays will use cards instead; cannot skip write barrier.
7282 if (!HasConstantNumElements()) return false;
7283 return compiler::target::WillAllocateNewOrRememberedArray(
7284 length: GetConstantNumElements());
7285 }
7286
7287 virtual const Slot* SlotForInput(intptr_t pos) {
7288 switch (pos) {
7289 case kTypeArgumentsPos:
7290 return &Slot::Array_type_arguments();
7291 case kLengthPos:
7292 return &Slot::Array_length();
7293 default:
7294 return TemplateArrayAllocation::SlotForInput(pos);
7295 }
7296 }
7297
7298 DECLARE_EMPTY_SERIALIZATION(CreateArrayInstr, TemplateArrayAllocation)
7299
7300 private:
7301 DISALLOW_COPY_AND_ASSIGN(CreateArrayInstr);
7302};
7303
7304class AllocateTypedDataInstr : public TemplateArrayAllocation<1> {
7305 public:
7306 AllocateTypedDataInstr(const InstructionSource& source,
7307 classid_t class_id,
7308 Value* num_elements,
7309 intptr_t deopt_id)
7310 : TemplateArrayAllocation(source, deopt_id), class_id_(class_id) {
7311 SetInputAt(i: kLengthPos, value: num_elements);
7312 }
7313
7314 enum { kLengthPos = 0 };
7315
7316 DECLARE_INSTRUCTION(AllocateTypedData)
7317 virtual CompileType ComputeType() const;
7318
7319 classid_t class_id() const { return class_id_; }
7320 virtual Value* num_elements() const { return inputs_[kLengthPos]; }
7321
7322 virtual bool HasUnknownSideEffects() const { return false; }
7323
7324 virtual bool WillAllocateNewOrRemembered() const {
7325 // No write barriers are generated for typed data accesses.
7326 return false;
7327 }
7328
7329 virtual const Slot* SlotForInput(intptr_t pos) {
7330 switch (pos) {
7331 case kLengthPos:
7332 return &Slot::TypedDataBase_length();
7333 default:
7334 return TemplateArrayAllocation::SlotForInput(pos);
7335 }
7336 }
7337
7338#define FIELD_LIST(F) F(const classid_t, class_id_)
7339
7340 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateTypedDataInstr,
7341 TemplateArrayAllocation,
7342 FIELD_LIST)
7343#undef FIELD_LIST
7344
7345 private:
7346 DISALLOW_COPY_AND_ASSIGN(AllocateTypedDataInstr);
7347};
7348
7349// Note: This instruction must not be moved without the indexed access that
7350// depends on it (e.g. out of loops). GC may collect the array while the
7351// external data-array is still accessed.
7352// TODO(vegorov) enable LICMing this instruction by ensuring that array itself
7353// is kept alive.
7354class LoadUntaggedInstr : public TemplateDefinition<1, NoThrow> {
7355 public:
7356 LoadUntaggedInstr(Value* object, intptr_t offset) : offset_(offset) {
7357 SetInputAt(i: 0, value: object);
7358 }
7359
7360 virtual Representation representation() const { return kUntagged; }
7361 DECLARE_INSTRUCTION(LoadUntagged)
7362 virtual CompileType ComputeType() const;
7363
7364 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7365 ASSERT(idx == 0);
7366 // The object may be tagged or untagged (for external objects).
7367 return kNoRepresentation;
7368 }
7369
7370 Value* object() const { return inputs_[0]; }
7371 intptr_t offset() const { return offset_; }
7372
7373 virtual bool ComputeCanDeoptimize() const { return false; }
7374
7375 virtual bool HasUnknownSideEffects() const { return false; }
7376 virtual bool AttributesEqual(const Instruction& other) const {
7377 return other.AsLoadUntagged()->offset_ == offset_;
7378 }
7379
7380 PRINT_OPERANDS_TO_SUPPORT
7381
7382#define FIELD_LIST(F) F(const intptr_t, offset_)
7383
7384 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadUntaggedInstr,
7385 TemplateDefinition,
7386 FIELD_LIST)
7387#undef FIELD_LIST
7388
7389 private:
7390 DISALLOW_COPY_AND_ASSIGN(LoadUntaggedInstr);
7391};
7392
7393class LoadClassIdInstr : public TemplateDefinition<1, NoThrow, Pure> {
7394 public:
7395 explicit LoadClassIdInstr(Value* object,
7396 Representation representation = kTagged,
7397 bool input_can_be_smi = true)
7398 : representation_(representation), input_can_be_smi_(input_can_be_smi) {
7399 ASSERT(representation == kTagged || representation == kUntagged);
7400 SetInputAt(i: 0, value: object);
7401 }
7402
7403 virtual Representation representation() const { return representation_; }
7404 DECLARE_INSTRUCTION(LoadClassId)
7405 virtual CompileType ComputeType() const;
7406
7407 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7408
7409 Value* object() const { return inputs_[0]; }
7410
7411 virtual bool ComputeCanDeoptimize() const { return false; }
7412
7413 virtual bool AttributesEqual(const Instruction& other) const {
7414 auto const other_load = other.AsLoadClassId();
7415 return other_load->representation_ == representation_ &&
7416 other_load->input_can_be_smi_ == input_can_be_smi_;
7417 }
7418
7419 PRINT_OPERANDS_TO_SUPPORT
7420
7421#define FIELD_LIST(F) \
7422 F(const Representation, representation_) \
7423 F(const bool, input_can_be_smi_)
7424
7425 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadClassIdInstr,
7426 TemplateDefinition,
7427 FIELD_LIST)
7428#undef FIELD_LIST
7429
7430 private:
7431 DISALLOW_COPY_AND_ASSIGN(LoadClassIdInstr);
7432};
7433
7434// LoadFieldInstr represents a load from the given [slot] in the given
7435// [instance]. If calls_initializer(), then LoadFieldInstr also calls field
7436// initializer if field is not initialized yet (contains sentinel value).
7437//
7438// Note: if slot was a subject of the field unboxing optimization then this load
7439// would both load the box stored in the field and then load the content of
7440// the box.
7441class LoadFieldInstr : public TemplateLoadField<1> {
7442 public:
7443 LoadFieldInstr(Value* instance,
7444 const Slot& slot,
7445 const InstructionSource& source,
7446 bool calls_initializer = false,
7447 intptr_t deopt_id = DeoptId::kNone)
7448 : TemplateLoadField(source,
7449 calls_initializer,
7450 deopt_id,
7451 slot.IsDartField() ? &slot.field() : nullptr),
7452 slot_(slot) {
7453 SetInputAt(0, instance);
7454 }
7455
7456 Value* instance() const { return inputs_[0]; }
7457 const Slot& slot() const { return slot_; }
7458
7459 virtual Representation representation() const;
7460
7461 DECLARE_INSTRUCTION(LoadField)
7462 DECLARE_ATTRIBUTES(&slot())
7463
7464 virtual CompileType ComputeType() const;
7465
7466 virtual void InferRange(RangeAnalysis* analysis, Range* range);
7467
7468 bool IsImmutableLengthLoad() const { return slot().IsImmutableLengthSlot(); }
7469
7470 // Try evaluating this load against the given constant value of
7471 // the instance. Returns true if evaluation succeeded and
7472 // puts result into result.
7473 // Note: we only evaluate loads when we can ensure that
7474 // instance has the field.
7475 bool Evaluate(const Object& instance_value, Object* result);
7476
7477 static bool TryEvaluateLoad(const Object& instance,
7478 const Field& field,
7479 Object* result);
7480
7481 static bool TryEvaluateLoad(const Object& instance,
7482 const Slot& field,
7483 Object* result);
7484
7485 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7486
7487 static bool IsFixedLengthArrayCid(intptr_t cid);
7488 static bool IsTypedDataViewFactory(const Function& function);
7489 static bool IsUnmodifiableTypedDataViewFactory(const Function& function);
7490
7491 virtual bool AllowsCSE() const { return slot_.is_immutable(); }
7492
7493 virtual bool CanTriggerGC() const { return calls_initializer(); }
7494
7495 virtual bool AttributesEqual(const Instruction& other) const;
7496
7497 PRINT_OPERANDS_TO_SUPPORT
7498
7499#define FIELD_LIST(F) F(const Slot&, slot_)
7500
7501 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadFieldInstr,
7502 TemplateLoadField,
7503 FIELD_LIST)
7504#undef FIELD_LIST
7505
7506 private:
7507 intptr_t OffsetInBytes() const { return slot().offset_in_bytes(); }
7508
7509 // Generate code which checks if field is initialized and
7510 // calls initializer if it is not. Field value is already loaded.
7511 void EmitNativeCodeForInitializerCall(FlowGraphCompiler* compiler);
7512
7513 DISALLOW_COPY_AND_ASSIGN(LoadFieldInstr);
7514};
7515
7516class InstantiateTypeInstr : public TemplateDefinition<2, Throws> {
7517 public:
7518 InstantiateTypeInstr(const InstructionSource& source,
7519 const AbstractType& type,
7520 Value* instantiator_type_arguments,
7521 Value* function_type_arguments,
7522 intptr_t deopt_id)
7523 : TemplateDefinition(source, deopt_id),
7524 token_pos_(source.token_pos),
7525 type_(type) {
7526 DEBUG_ASSERT(type.IsNotTemporaryScopedHandle());
7527 SetInputAt(i: 0, value: instantiator_type_arguments);
7528 SetInputAt(i: 1, value: function_type_arguments);
7529 }
7530
7531 DECLARE_INSTRUCTION(InstantiateType)
7532
7533 Value* instantiator_type_arguments() const { return inputs_[0]; }
7534 Value* function_type_arguments() const { return inputs_[1]; }
7535 const AbstractType& type() const { return type_; }
7536 virtual TokenPosition token_pos() const { return token_pos_; }
7537
7538 virtual bool ComputeCanDeoptimize() const { return false; }
7539 virtual bool ComputeCanDeoptimizeAfterCall() const {
7540 return !CompilerState::Current().is_aot();
7541 }
7542 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
7543 return InputCount();
7544 }
7545
7546 virtual bool HasUnknownSideEffects() const { return false; }
7547
7548 PRINT_OPERANDS_TO_SUPPORT
7549
7550#define FIELD_LIST(F) \
7551 F(const TokenPosition, token_pos_) \
7552 F(const AbstractType&, type_)
7553
7554 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstantiateTypeInstr,
7555 TemplateDefinition,
7556 FIELD_LIST)
7557#undef FIELD_LIST
7558
7559 private:
7560 DISALLOW_COPY_AND_ASSIGN(InstantiateTypeInstr);
7561};
7562
7563class InstantiateTypeArgumentsInstr : public TemplateDefinition<3, Throws> {
7564 public:
7565 InstantiateTypeArgumentsInstr(const InstructionSource& source,
7566 Value* instantiator_type_arguments,
7567 Value* function_type_arguments,
7568 Value* type_arguments,
7569 const Class& instantiator_class,
7570 const Function& function,
7571 intptr_t deopt_id)
7572 : TemplateDefinition(source, deopt_id),
7573 token_pos_(source.token_pos),
7574 instantiator_class_(instantiator_class),
7575 function_(function) {
7576 DEBUG_ASSERT(instantiator_class.IsNotTemporaryScopedHandle());
7577 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
7578 SetInputAt(i: 0, value: instantiator_type_arguments);
7579 SetInputAt(i: 1, value: function_type_arguments);
7580 SetInputAt(i: 2, value: type_arguments);
7581 }
7582
7583 DECLARE_INSTRUCTION(InstantiateTypeArguments)
7584
7585 Value* instantiator_type_arguments() const { return inputs_[0]; }
7586 Value* function_type_arguments() const { return inputs_[1]; }
7587 Value* type_arguments() const { return inputs_[2]; }
7588 const Class& instantiator_class() const { return instantiator_class_; }
7589 const Function& function() const { return function_; }
7590 virtual TokenPosition token_pos() const { return token_pos_; }
7591
7592 virtual bool ComputeCanDeoptimize() const { return false; }
7593 virtual bool ComputeCanDeoptimizeAfterCall() const {
7594 return !CompilerState::Current().is_aot();
7595 }
7596 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
7597 return InputCount();
7598 }
7599
7600 virtual bool HasUnknownSideEffects() const { return false; }
7601
7602 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7603
7604 bool CanShareInstantiatorTypeArguments(
7605 bool* with_runtime_check = nullptr) const {
7606 if (instantiator_class().IsNull() || !type_arguments()->BindsToConstant() ||
7607 !type_arguments()->BoundConstant().IsTypeArguments()) {
7608 return false;
7609 }
7610 const auto& type_args =
7611 TypeArguments::Cast(obj: type_arguments()->BoundConstant());
7612 return type_args.CanShareInstantiatorTypeArguments(instantiator_class: instantiator_class(),
7613 with_runtime_check);
7614 }
7615
7616 bool CanShareFunctionTypeArguments(bool* with_runtime_check = nullptr) const {
7617 if (function().IsNull() || !type_arguments()->BindsToConstant() ||
7618 !type_arguments()->BoundConstant().IsTypeArguments()) {
7619 return false;
7620 }
7621 const auto& type_args =
7622 TypeArguments::Cast(obj: type_arguments()->BoundConstant());
7623 return type_args.CanShareFunctionTypeArguments(function: function(),
7624 with_runtime_check);
7625 }
7626
7627 const Code& GetStub() const {
7628 bool with_runtime_check;
7629 if (CanShareInstantiatorTypeArguments(with_runtime_check: &with_runtime_check)) {
7630 ASSERT(with_runtime_check);
7631 return StubCode::InstantiateTypeArgumentsMayShareInstantiatorTA();
7632 } else if (CanShareFunctionTypeArguments(with_runtime_check: &with_runtime_check)) {
7633 ASSERT(with_runtime_check);
7634 return StubCode::InstantiateTypeArgumentsMayShareFunctionTA();
7635 }
7636 return StubCode::InstantiateTypeArguments();
7637 }
7638
7639 PRINT_OPERANDS_TO_SUPPORT
7640
7641#define FIELD_LIST(F) \
7642 F(const TokenPosition, token_pos_) \
7643 F(const Class&, instantiator_class_) \
7644 F(const Function&, function_)
7645
7646 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstantiateTypeArgumentsInstr,
7647 TemplateDefinition,
7648 FIELD_LIST)
7649#undef FIELD_LIST
7650
7651 private:
7652 DISALLOW_COPY_AND_ASSIGN(InstantiateTypeArgumentsInstr);
7653};
7654
7655// [AllocateContext] instruction allocates a new Context object with the space
7656// for the given [context_variables].
7657class AllocateContextInstr : public TemplateAllocation<0> {
7658 public:
7659 AllocateContextInstr(const InstructionSource& source,
7660 const ZoneGrowableArray<const Slot*>& context_slots,
7661 intptr_t deopt_id)
7662 : TemplateAllocation(source, deopt_id), context_slots_(context_slots) {}
7663
7664 DECLARE_INSTRUCTION(AllocateContext)
7665 virtual CompileType ComputeType() const;
7666
7667 const ZoneGrowableArray<const Slot*>& context_slots() const {
7668 return context_slots_;
7669 }
7670
7671 intptr_t num_context_variables() const { return context_slots().length(); }
7672
7673 virtual bool ComputeCanDeoptimize() const { return false; }
7674
7675 virtual bool HasUnknownSideEffects() const { return false; }
7676
7677 virtual bool WillAllocateNewOrRemembered() const {
7678 return compiler::target::WillAllocateNewOrRememberedContext(
7679 num_context_variables: context_slots().length());
7680 }
7681
7682 PRINT_OPERANDS_TO_SUPPORT
7683
7684#define FIELD_LIST(F) F(const ZoneGrowableArray<const Slot*>&, context_slots_)
7685
7686 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateContextInstr,
7687 TemplateAllocation,
7688 FIELD_LIST)
7689#undef FIELD_LIST
7690
7691 private:
7692 DISALLOW_COPY_AND_ASSIGN(AllocateContextInstr);
7693};
7694
7695// [CloneContext] instruction clones the given Context object assuming that
7696// it contains exactly the provided [context_variables].
7697class CloneContextInstr : public TemplateDefinition<1, Throws> {
7698 public:
7699 CloneContextInstr(const InstructionSource& source,
7700 Value* context_value,
7701 const ZoneGrowableArray<const Slot*>& context_slots,
7702 intptr_t deopt_id)
7703 : TemplateDefinition(source, deopt_id),
7704 token_pos_(source.token_pos),
7705 context_slots_(context_slots) {
7706 SetInputAt(i: 0, value: context_value);
7707 }
7708
7709 virtual TokenPosition token_pos() const { return token_pos_; }
7710 Value* context_value() const { return inputs_[0]; }
7711
7712 const ZoneGrowableArray<const Slot*>& context_slots() const {
7713 return context_slots_;
7714 }
7715
7716 DECLARE_INSTRUCTION(CloneContext)
7717 virtual CompileType ComputeType() const;
7718
7719 virtual bool ComputeCanDeoptimize() const { return false; }
7720 virtual bool ComputeCanDeoptimizeAfterCall() const {
7721 // We test that allocation instructions have correct deopt environment
7722 // (which is needed in case OOM is thrown) by actually deoptimizing
7723 // optimized code in allocation slow paths.
7724 return !CompilerState::Current().is_aot();
7725 }
7726 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
7727 return InputCount();
7728 }
7729
7730 virtual bool HasUnknownSideEffects() const { return false; }
7731
7732#define FIELD_LIST(F) \
7733 F(const TokenPosition, token_pos_) \
7734 F(const ZoneGrowableArray<const Slot*>&, context_slots_)
7735
7736 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CloneContextInstr,
7737 TemplateDefinition,
7738 FIELD_LIST)
7739#undef FIELD_LIST
7740
7741 private:
7742 DISALLOW_COPY_AND_ASSIGN(CloneContextInstr);
7743};
7744
7745class CheckEitherNonSmiInstr : public TemplateInstruction<2, NoThrow, Pure> {
7746 public:
7747 CheckEitherNonSmiInstr(Value* left, Value* right, intptr_t deopt_id)
7748 : TemplateInstruction(deopt_id) {
7749 SetInputAt(i: 0, value: left);
7750 SetInputAt(i: 1, value: right);
7751 }
7752
7753 Value* left() const { return inputs_[0]; }
7754 Value* right() const { return inputs_[1]; }
7755
7756 DECLARE_INSTRUCTION(CheckEitherNonSmi)
7757
7758 virtual bool ComputeCanDeoptimize() const { return true; }
7759
7760 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
7761
7762 virtual bool AttributesEqual(const Instruction& other) const { return true; }
7763
7764 DECLARE_EMPTY_SERIALIZATION(CheckEitherNonSmiInstr, TemplateInstruction)
7765
7766 private:
7767 DISALLOW_COPY_AND_ASSIGN(CheckEitherNonSmiInstr);
7768};
7769
7770struct Boxing : public AllStatic {
7771 // Whether the given representation can be boxed or unboxed.
7772 static bool Supports(Representation rep);
7773
7774 // Whether boxing this value requires allocating a new object.
7775 static bool RequiresAllocation(Representation rep);
7776
7777 // The offset into the Layout object for the boxed value that can store
7778 // the full range of values in the representation.
7779 // Only defined for allocated boxes (i.e., RequiresAllocation must be true).
7780 static intptr_t ValueOffset(Representation rep);
7781
7782 // The class ID for the boxed value that can store the full range
7783 // of values in the representation.
7784 static intptr_t BoxCid(Representation rep);
7785};
7786
7787class BoxInstr : public TemplateDefinition<1, NoThrow, Pure> {
7788 public:
7789 static BoxInstr* Create(Representation from, Value* value);
7790
7791 Value* value() const { return inputs_[0]; }
7792 Representation from_representation() const { return from_representation_; }
7793
7794 DECLARE_INSTRUCTION(Box)
7795 virtual CompileType ComputeType() const;
7796
7797 virtual bool ComputeCanDeoptimize() const { return false; }
7798 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
7799
7800 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7801 ASSERT(idx == 0);
7802 return from_representation();
7803 }
7804
7805 virtual bool AttributesEqual(const Instruction& other) const {
7806 return other.AsBox()->from_representation() == from_representation();
7807 }
7808
7809 Definition* Canonicalize(FlowGraph* flow_graph);
7810
7811 virtual TokenPosition token_pos() const { return TokenPosition::kBox; }
7812
7813 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
7814 return kNotSpeculative;
7815 }
7816
7817#define FIELD_LIST(F) F(const Representation, from_representation_)
7818
7819 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BoxInstr,
7820 TemplateDefinition,
7821 FIELD_LIST)
7822#undef FIELD_LIST
7823
7824 protected:
7825 BoxInstr(Representation from_representation, Value* value)
7826 : from_representation_(from_representation) {
7827 SetInputAt(i: 0, value);
7828 }
7829
7830 private:
7831 intptr_t ValueOffset() const {
7832 return Boxing::ValueOffset(rep: from_representation());
7833 }
7834
7835 DISALLOW_COPY_AND_ASSIGN(BoxInstr);
7836};
7837
7838class BoxIntegerInstr : public BoxInstr {
7839 public:
7840 BoxIntegerInstr(Representation representation, Value* value)
7841 : BoxInstr(representation, value) {}
7842
7843 virtual bool ValueFitsSmi() const;
7844
7845 virtual void InferRange(RangeAnalysis* analysis, Range* range);
7846
7847 virtual CompileType ComputeType() const;
7848 virtual bool RecomputeType();
7849
7850 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7851
7852 virtual bool CanTriggerGC() const { return !ValueFitsSmi(); }
7853
7854 DECLARE_ABSTRACT_INSTRUCTION(BoxInteger)
7855
7856 DECLARE_EMPTY_SERIALIZATION(BoxIntegerInstr, BoxInstr)
7857
7858 private:
7859 DISALLOW_COPY_AND_ASSIGN(BoxIntegerInstr);
7860};
7861
7862class BoxSmallIntInstr : public BoxIntegerInstr {
7863 public:
7864 explicit BoxSmallIntInstr(Representation rep, Value* value)
7865 : BoxIntegerInstr(rep, value) {
7866 ASSERT(RepresentationUtils::ValueSize(rep) * kBitsPerByte <=
7867 compiler::target::kSmiBits);
7868 }
7869
7870 virtual bool ValueFitsSmi() const { return true; }
7871
7872 DECLARE_INSTRUCTION(BoxSmallInt)
7873
7874 DECLARE_EMPTY_SERIALIZATION(BoxSmallIntInstr, BoxIntegerInstr)
7875
7876 private:
7877 DISALLOW_COPY_AND_ASSIGN(BoxSmallIntInstr);
7878};
7879
7880class BoxInteger32Instr : public BoxIntegerInstr {
7881 public:
7882 BoxInteger32Instr(Representation representation, Value* value)
7883 : BoxIntegerInstr(representation, value) {}
7884
7885 DECLARE_INSTRUCTION_BACKEND()
7886
7887 DECLARE_EMPTY_SERIALIZATION(BoxInteger32Instr, BoxIntegerInstr)
7888
7889 private:
7890 DISALLOW_COPY_AND_ASSIGN(BoxInteger32Instr);
7891};
7892
7893class BoxInt32Instr : public BoxInteger32Instr {
7894 public:
7895 explicit BoxInt32Instr(Value* value)
7896 : BoxInteger32Instr(kUnboxedInt32, value) {}
7897
7898 DECLARE_INSTRUCTION_NO_BACKEND(BoxInt32)
7899
7900 DECLARE_EMPTY_SERIALIZATION(BoxInt32Instr, BoxInteger32Instr)
7901
7902 private:
7903 DISALLOW_COPY_AND_ASSIGN(BoxInt32Instr);
7904};
7905
7906class BoxUint32Instr : public BoxInteger32Instr {
7907 public:
7908 explicit BoxUint32Instr(Value* value)
7909 : BoxInteger32Instr(kUnboxedUint32, value) {}
7910
7911 DECLARE_INSTRUCTION_NO_BACKEND(BoxUint32)
7912
7913 DECLARE_EMPTY_SERIALIZATION(BoxUint32Instr, BoxInteger32Instr)
7914
7915 private:
7916 DISALLOW_COPY_AND_ASSIGN(BoxUint32Instr);
7917};
7918
7919class BoxInt64Instr : public BoxIntegerInstr {
7920 public:
7921 explicit BoxInt64Instr(Value* value)
7922 : BoxIntegerInstr(kUnboxedInt64, value) {}
7923
7924 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7925
7926 DECLARE_INSTRUCTION(BoxInt64)
7927
7928 DECLARE_EMPTY_SERIALIZATION(BoxInt64Instr, BoxIntegerInstr)
7929
7930 private:
7931 DISALLOW_COPY_AND_ASSIGN(BoxInt64Instr);
7932};
7933
7934class UnboxInstr : public TemplateDefinition<1, NoThrow, Pure> {
7935 public:
7936 static UnboxInstr* Create(Representation to,
7937 Value* value,
7938 intptr_t deopt_id,
7939 SpeculativeMode speculative_mode = kGuardInputs);
7940
7941 Value* value() const { return inputs_[0]; }
7942
7943 virtual bool ComputeCanDeoptimize() const {
7944 if (SpeculativeModeOfInputs() == kNotSpeculative) {
7945 return false;
7946 }
7947
7948 const intptr_t value_cid = value()->Type()->ToCid();
7949 const intptr_t box_cid = BoxCid();
7950
7951 if (value_cid == box_cid) {
7952 return false;
7953 }
7954
7955 if (CanConvertSmi() && (value_cid == kSmiCid)) {
7956 return false;
7957 }
7958
7959 return true;
7960 }
7961
7962 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
7963 return speculative_mode_;
7964 }
7965
7966 virtual Representation representation() const { return representation_; }
7967
7968 DECLARE_INSTRUCTION(Unbox)
7969 virtual CompileType ComputeType() const;
7970
7971 virtual bool AttributesEqual(const Instruction& other) const {
7972 auto const other_unbox = other.AsUnbox();
7973 return (representation() == other_unbox->representation()) &&
7974 (speculative_mode_ == other_unbox->speculative_mode_);
7975 }
7976
7977 Definition* Canonicalize(FlowGraph* flow_graph);
7978
7979 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
7980
7981 virtual TokenPosition token_pos() const { return TokenPosition::kBox; }
7982
7983#define FIELD_LIST(F) \
7984 F(const Representation, representation_) \
7985 F(SpeculativeMode, speculative_mode_)
7986
7987 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnboxInstr,
7988 TemplateDefinition,
7989 FIELD_LIST)
7990#undef FIELD_LIST
7991
7992 protected:
7993 UnboxInstr(Representation representation,
7994 Value* value,
7995 intptr_t deopt_id,
7996 SpeculativeMode speculative_mode)
7997 : TemplateDefinition(deopt_id),
7998 representation_(representation),
7999 speculative_mode_(speculative_mode) {
8000 SetInputAt(i: 0, value);
8001 }
8002
8003 void set_speculative_mode(SpeculativeMode value) {
8004 speculative_mode_ = value;
8005 }
8006
8007 private:
8008 bool CanConvertSmi() const;
8009 void EmitLoadFromBox(FlowGraphCompiler* compiler);
8010 void EmitSmiConversion(FlowGraphCompiler* compiler);
8011 void EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler);
8012 void EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler);
8013 void EmitLoadFromBoxWithDeopt(FlowGraphCompiler* compiler);
8014
8015 intptr_t BoxCid() const { return Boxing::BoxCid(rep: representation_); }
8016
8017 intptr_t ValueOffset() const { return Boxing::ValueOffset(rep: representation_); }
8018
8019 DISALLOW_COPY_AND_ASSIGN(UnboxInstr);
8020};
8021
8022class UnboxIntegerInstr : public UnboxInstr {
8023 public:
8024 enum TruncationMode { kTruncate, kNoTruncation };
8025
8026 UnboxIntegerInstr(Representation representation,
8027 TruncationMode truncation_mode,
8028 Value* value,
8029 intptr_t deopt_id,
8030 SpeculativeMode speculative_mode)
8031 : UnboxInstr(representation, value, deopt_id, speculative_mode),
8032 is_truncating_(truncation_mode == kTruncate) {}
8033
8034 bool is_truncating() const { return is_truncating_; }
8035
8036 void mark_truncating() { is_truncating_ = true; }
8037
8038 virtual CompileType ComputeType() const;
8039
8040 virtual bool AttributesEqual(const Instruction& other) const {
8041 auto const other_unbox = other.AsUnboxInteger();
8042 return UnboxInstr::AttributesEqual(other) &&
8043 (other_unbox->is_truncating_ == is_truncating_);
8044 }
8045
8046 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8047
8048 DECLARE_ABSTRACT_INSTRUCTION(UnboxInteger)
8049
8050 PRINT_OPERANDS_TO_SUPPORT
8051
8052#define FIELD_LIST(F) F(bool, is_truncating_)
8053
8054 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnboxIntegerInstr,
8055 UnboxInstr,
8056 FIELD_LIST)
8057#undef FIELD_LIST
8058
8059 private:
8060 DISALLOW_COPY_AND_ASSIGN(UnboxIntegerInstr);
8061};
8062
8063class UnboxInteger32Instr : public UnboxIntegerInstr {
8064 public:
8065 UnboxInteger32Instr(Representation representation,
8066 TruncationMode truncation_mode,
8067 Value* value,
8068 intptr_t deopt_id,
8069 SpeculativeMode speculative_mode)
8070 : UnboxIntegerInstr(representation,
8071 truncation_mode,
8072 value,
8073 deopt_id,
8074 speculative_mode) {}
8075
8076 DECLARE_INSTRUCTION_BACKEND()
8077
8078 DECLARE_EMPTY_SERIALIZATION(UnboxInteger32Instr, UnboxIntegerInstr)
8079
8080 private:
8081 DISALLOW_COPY_AND_ASSIGN(UnboxInteger32Instr);
8082};
8083
8084class UnboxUint32Instr : public UnboxInteger32Instr {
8085 public:
8086 UnboxUint32Instr(Value* value,
8087 intptr_t deopt_id,
8088 SpeculativeMode speculative_mode = kGuardInputs)
8089 : UnboxInteger32Instr(kUnboxedUint32,
8090 kTruncate,
8091 value,
8092 deopt_id,
8093 speculative_mode) {
8094 ASSERT(is_truncating());
8095 }
8096
8097 virtual bool ComputeCanDeoptimize() const;
8098
8099 virtual void InferRange(RangeAnalysis* analysis, Range* range);
8100
8101 DECLARE_INSTRUCTION_NO_BACKEND(UnboxUint32)
8102
8103 DECLARE_EMPTY_SERIALIZATION(UnboxUint32Instr, UnboxInteger32Instr)
8104
8105 private:
8106 DISALLOW_COPY_AND_ASSIGN(UnboxUint32Instr);
8107};
8108
8109class UnboxInt32Instr : public UnboxInteger32Instr {
8110 public:
8111 UnboxInt32Instr(TruncationMode truncation_mode,
8112 Value* value,
8113 intptr_t deopt_id,
8114 SpeculativeMode speculative_mode = kGuardInputs)
8115 : UnboxInteger32Instr(kUnboxedInt32,
8116 truncation_mode,
8117 value,
8118 deopt_id,
8119 speculative_mode) {}
8120
8121 virtual bool ComputeCanDeoptimize() const;
8122
8123 virtual void InferRange(RangeAnalysis* analysis, Range* range);
8124
8125 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8126
8127 DECLARE_INSTRUCTION_NO_BACKEND(UnboxInt32)
8128
8129 DECLARE_EMPTY_SERIALIZATION(UnboxInt32Instr, UnboxInteger32Instr)
8130
8131 private:
8132 DISALLOW_COPY_AND_ASSIGN(UnboxInt32Instr);
8133};
8134
8135class UnboxInt64Instr : public UnboxIntegerInstr {
8136 public:
8137 UnboxInt64Instr(Value* value,
8138 intptr_t deopt_id,
8139 SpeculativeMode speculative_mode)
8140 : UnboxIntegerInstr(kUnboxedInt64,
8141 kNoTruncation,
8142 value,
8143 deopt_id,
8144 speculative_mode) {}
8145
8146 virtual void InferRange(RangeAnalysis* analysis, Range* range);
8147
8148 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8149
8150 virtual bool ComputeCanDeoptimize() const {
8151 if (SpeculativeModeOfInputs() == kNotSpeculative) {
8152 return false;
8153 }
8154
8155 return !value()->Type()->IsInt();
8156 }
8157
8158 DECLARE_INSTRUCTION_NO_BACKEND(UnboxInt64)
8159
8160 DECLARE_EMPTY_SERIALIZATION(UnboxInt64Instr, UnboxIntegerInstr)
8161
8162 private:
8163 DISALLOW_COPY_AND_ASSIGN(UnboxInt64Instr);
8164};
8165
8166bool Definition::IsInt64Definition() {
8167 return (Type()->ToCid() == kMintCid) || IsBinaryInt64Op() ||
8168 IsUnaryInt64Op() || IsShiftInt64Op() || IsSpeculativeShiftInt64Op() ||
8169 IsBoxInt64() || IsUnboxInt64();
8170}
8171
8172class MathUnaryInstr : public TemplateDefinition<1, NoThrow, Pure> {
8173 public:
8174 enum MathUnaryKind {
8175 kIllegal,
8176 kSqrt,
8177 kDoubleSquare,
8178 };
8179 MathUnaryInstr(MathUnaryKind kind, Value* value, intptr_t deopt_id)
8180 : TemplateDefinition(deopt_id), kind_(kind) {
8181 SetInputAt(i: 0, value);
8182 }
8183
8184 Value* value() const { return inputs_[0]; }
8185 MathUnaryKind kind() const { return kind_; }
8186
8187 virtual bool ComputeCanDeoptimize() const { return false; }
8188
8189 virtual Representation representation() const { return kUnboxedDouble; }
8190
8191 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8192 ASSERT(idx == 0);
8193 return kUnboxedDouble;
8194 }
8195
8196 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
8197 ASSERT(idx == 0);
8198 return kNotSpeculative;
8199 }
8200
8201 virtual intptr_t DeoptimizationTarget() const {
8202 // Direct access since this instruction cannot deoptimize, and the deopt-id
8203 // was inherited from another instruction that could deoptimize.
8204 return GetDeoptId();
8205 }
8206
8207 DECLARE_INSTRUCTION(MathUnary)
8208 virtual CompileType ComputeType() const;
8209
8210 virtual bool AttributesEqual(const Instruction& other) const {
8211 return kind() == other.AsMathUnary()->kind();
8212 }
8213
8214 Definition* Canonicalize(FlowGraph* flow_graph);
8215
8216 static const char* KindToCString(MathUnaryKind kind);
8217
8218 PRINT_OPERANDS_TO_SUPPORT
8219
8220#define FIELD_LIST(F) F(const MathUnaryKind, kind_)
8221
8222 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MathUnaryInstr,
8223 TemplateDefinition,
8224 FIELD_LIST)
8225#undef FIELD_LIST
8226
8227 private:
8228 DISALLOW_COPY_AND_ASSIGN(MathUnaryInstr);
8229};
8230
8231// Calls into the runtime and performs a case-insensitive comparison of the
8232// UTF16 strings (i.e. TwoByteString or ExternalTwoByteString) located at
8233// str[lhs_index:lhs_index + length] and str[rhs_index:rhs_index + length].
8234// Depending on [handle_surrogates], we will treat the strings as either
8235// UCS2 (no surrogate handling) or UTF16 (surrogates handled appropriately).
8236class CaseInsensitiveCompareInstr
8237 : public TemplateDefinition<4, NoThrow, Pure> {
8238 public:
8239 CaseInsensitiveCompareInstr(Value* str,
8240 Value* lhs_index,
8241 Value* rhs_index,
8242 Value* length,
8243 bool handle_surrogates,
8244 intptr_t cid)
8245 : handle_surrogates_(handle_surrogates), cid_(cid) {
8246 ASSERT(cid == kTwoByteStringCid || cid == kExternalTwoByteStringCid);
8247 ASSERT(index_scale() == 2);
8248 SetInputAt(i: 0, value: str);
8249 SetInputAt(i: 1, value: lhs_index);
8250 SetInputAt(i: 2, value: rhs_index);
8251 SetInputAt(i: 3, value: length);
8252 }
8253
8254 Value* str() const { return inputs_[0]; }
8255 Value* lhs_index() const { return inputs_[1]; }
8256 Value* rhs_index() const { return inputs_[2]; }
8257 Value* length() const { return inputs_[3]; }
8258
8259 const RuntimeEntry& TargetFunction() const;
8260 bool IsExternal() const { return cid_ == kExternalTwoByteStringCid; }
8261 intptr_t class_id() const { return cid_; }
8262
8263 intptr_t index_scale() const {
8264 return compiler::target::Instance::ElementSizeFor(cid: cid_);
8265 }
8266
8267 virtual bool ComputeCanDeoptimize() const { return false; }
8268
8269 virtual Representation representation() const { return kTagged; }
8270
8271 DECLARE_INSTRUCTION(CaseInsensitiveCompare)
8272 virtual CompileType ComputeType() const;
8273
8274 virtual bool AttributesEqual(const Instruction& other) const {
8275 const auto* other_compare = other.AsCaseInsensitiveCompare();
8276 return (other_compare->handle_surrogates_ == handle_surrogates_) &&
8277 (other_compare->cid_ == cid_);
8278 }
8279
8280#define FIELD_LIST(F) \
8281 F(const bool, handle_surrogates_) \
8282 F(const intptr_t, cid_)
8283
8284 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CaseInsensitiveCompareInstr,
8285 TemplateDefinition,
8286 FIELD_LIST)
8287#undef FIELD_LIST
8288
8289 private:
8290 DISALLOW_COPY_AND_ASSIGN(CaseInsensitiveCompareInstr);
8291};
8292
8293// Represents Math's static min and max functions.
8294class MathMinMaxInstr : public TemplateDefinition<2, NoThrow, Pure> {
8295 public:
8296 MathMinMaxInstr(MethodRecognizer::Kind op_kind,
8297 Value* left_value,
8298 Value* right_value,
8299 intptr_t deopt_id,
8300 intptr_t result_cid)
8301 : TemplateDefinition(deopt_id),
8302 op_kind_(op_kind),
8303 result_cid_(result_cid) {
8304 ASSERT((result_cid == kSmiCid) || (result_cid == kDoubleCid));
8305 SetInputAt(i: 0, value: left_value);
8306 SetInputAt(i: 1, value: right_value);
8307 }
8308
8309 MethodRecognizer::Kind op_kind() const { return op_kind_; }
8310
8311 Value* left() const { return inputs_[0]; }
8312 Value* right() const { return inputs_[1]; }
8313
8314 intptr_t result_cid() const { return result_cid_; }
8315
8316 virtual bool ComputeCanDeoptimize() const { return false; }
8317
8318 virtual Representation representation() const {
8319 if (result_cid() == kSmiCid) {
8320 return kTagged;
8321 }
8322 ASSERT(result_cid() == kDoubleCid);
8323 return kUnboxedDouble;
8324 }
8325
8326 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8327 if (result_cid() == kSmiCid) {
8328 return kTagged;
8329 }
8330 ASSERT(result_cid() == kDoubleCid);
8331 return kUnboxedDouble;
8332 }
8333
8334 virtual intptr_t DeoptimizationTarget() const {
8335 // Direct access since this instruction cannot deoptimize, and the deopt-id
8336 // was inherited from another instruction that could deoptimize.
8337 return GetDeoptId();
8338 }
8339
8340 DECLARE_INSTRUCTION(MathMinMax)
8341 virtual CompileType ComputeType() const;
8342 virtual bool AttributesEqual(const Instruction& other) const;
8343
8344#define FIELD_LIST(F) \
8345 F(const MethodRecognizer::Kind, op_kind_) \
8346 F(const intptr_t, result_cid_)
8347
8348 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MathMinMaxInstr,
8349 TemplateDefinition,
8350 FIELD_LIST)
8351#undef FIELD_LIST
8352
8353 private:
8354 DISALLOW_COPY_AND_ASSIGN(MathMinMaxInstr);
8355};
8356
8357class BinaryDoubleOpInstr : public TemplateDefinition<2, NoThrow, Pure> {
8358 public:
8359 BinaryDoubleOpInstr(Token::Kind op_kind,
8360 Value* left,
8361 Value* right,
8362 intptr_t deopt_id,
8363 const InstructionSource& source,
8364 SpeculativeMode speculative_mode = kGuardInputs)
8365 : TemplateDefinition(source, deopt_id),
8366 op_kind_(op_kind),
8367 token_pos_(source.token_pos),
8368 speculative_mode_(speculative_mode) {
8369 SetInputAt(i: 0, value: left);
8370 SetInputAt(i: 1, value: right);
8371 }
8372
8373 Value* left() const { return inputs_[0]; }
8374 Value* right() const { return inputs_[1]; }
8375
8376 Token::Kind op_kind() const { return op_kind_; }
8377
8378 virtual TokenPosition token_pos() const { return token_pos_; }
8379
8380 virtual bool ComputeCanDeoptimize() const { return false; }
8381
8382 virtual Representation representation() const { return kUnboxedDouble; }
8383
8384 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8385 ASSERT((idx == 0) || (idx == 1));
8386 return kUnboxedDouble;
8387 }
8388
8389 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
8390 return speculative_mode_;
8391 }
8392
8393 virtual intptr_t DeoptimizationTarget() const {
8394 // Direct access since this instruction cannot deoptimize, and the deopt-id
8395 // was inherited from another instruction that could deoptimize.
8396 return GetDeoptId();
8397 }
8398
8399 PRINT_OPERANDS_TO_SUPPORT
8400
8401 DECLARE_INSTRUCTION(BinaryDoubleOp)
8402 virtual CompileType ComputeType() const;
8403
8404 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8405
8406 virtual bool AttributesEqual(const Instruction& other) const {
8407 auto const other_bin_op = other.AsBinaryDoubleOp();
8408 return (op_kind() == other_bin_op->op_kind()) &&
8409 (speculative_mode_ == other_bin_op->speculative_mode_);
8410 }
8411
8412#define FIELD_LIST(F) \
8413 F(const Token::Kind, op_kind_) \
8414 F(const TokenPosition, token_pos_) \
8415 F(const SpeculativeMode, speculative_mode_)
8416
8417 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BinaryDoubleOpInstr,
8418 TemplateDefinition,
8419 FIELD_LIST)
8420#undef FIELD_LIST
8421
8422 private:
8423 DISALLOW_COPY_AND_ASSIGN(BinaryDoubleOpInstr);
8424};
8425
8426class DoubleTestOpInstr : public TemplateComparison<1, NoThrow, Pure> {
8427 public:
8428 DoubleTestOpInstr(MethodRecognizer::Kind op_kind,
8429 Value* value,
8430 intptr_t deopt_id,
8431 const InstructionSource& source)
8432 : TemplateComparison(source, Token::kEQ, deopt_id), op_kind_(op_kind) {
8433 SetInputAt(i: 0, value);
8434 }
8435
8436 Value* value() const { return InputAt(i: 0); }
8437
8438 MethodRecognizer::Kind op_kind() const { return op_kind_; }
8439
8440 virtual bool ComputeCanDeoptimize() const { return false; }
8441
8442 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8443 ASSERT(idx == 0);
8444 return kUnboxedDouble;
8445 }
8446
8447 PRINT_OPERANDS_TO_SUPPORT
8448
8449 DECLARE_COMPARISON_INSTRUCTION(DoubleTestOp)
8450
8451 virtual CompileType ComputeType() const;
8452
8453 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8454
8455 virtual bool AttributesEqual(const Instruction& other) const {
8456 return op_kind_ == other.AsDoubleTestOp()->op_kind() &&
8457 ComparisonInstr::AttributesEqual(other);
8458 }
8459
8460 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
8461
8462#define FIELD_LIST(F) F(const MethodRecognizer::Kind, op_kind_)
8463
8464 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DoubleTestOpInstr,
8465 TemplateComparison,
8466 FIELD_LIST)
8467#undef FIELD_LIST
8468
8469 private:
8470 DISALLOW_COPY_AND_ASSIGN(DoubleTestOpInstr);
8471};
8472
8473class HashDoubleOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
8474 public:
8475 HashDoubleOpInstr(Value* value, intptr_t deopt_id)
8476 : TemplateDefinition(deopt_id) {
8477 SetInputAt(i: 0, value);
8478 }
8479
8480 static HashDoubleOpInstr* Create(Value* value, intptr_t deopt_id) {
8481 return new HashDoubleOpInstr(value, deopt_id);
8482 }
8483
8484 Value* value() const { return inputs_[0]; }
8485
8486 virtual intptr_t DeoptimizationTarget() const {
8487 // Direct access since this instruction cannot deoptimize, and the deopt-id
8488 // was inherited from another instruction that could deoptimize.
8489 return GetDeoptId();
8490 }
8491
8492 virtual Representation representation() const { return kUnboxedInt64; }
8493
8494 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8495 ASSERT(idx == 0);
8496 return kUnboxedDouble;
8497 }
8498
8499 DECLARE_INSTRUCTION(HashDoubleOp)
8500
8501 virtual bool ComputeCanDeoptimize() const { return false; }
8502
8503 virtual CompileType ComputeType() const { return CompileType::Smi(); }
8504
8505 virtual bool AttributesEqual(const Instruction& other) const { return true; }
8506
8507 DECLARE_EMPTY_SERIALIZATION(HashDoubleOpInstr, TemplateDefinition)
8508
8509 private:
8510 DISALLOW_COPY_AND_ASSIGN(HashDoubleOpInstr);
8511};
8512
8513class HashIntegerOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
8514 public:
8515 HashIntegerOpInstr(Value* value, bool smi, intptr_t deopt_id)
8516 : TemplateDefinition(deopt_id), smi_(smi) {
8517 SetInputAt(i: 0, value);
8518 }
8519
8520 static HashIntegerOpInstr* Create(Value* value, bool smi, intptr_t deopt_id) {
8521 return new HashIntegerOpInstr(value, smi, deopt_id);
8522 }
8523
8524 Value* value() const { return inputs_[0]; }
8525
8526 virtual intptr_t DeoptimizationTarget() const {
8527 // Direct access since this instruction cannot deoptimize, and the deopt-id
8528 // was inherited from another instruction that could deoptimize.
8529 return GetDeoptId();
8530 }
8531
8532 virtual Representation representation() const { return kTagged; }
8533
8534 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8535 ASSERT(idx == 0);
8536 return kTagged;
8537 }
8538
8539 DECLARE_INSTRUCTION(HashIntegerOp)
8540
8541 virtual bool ComputeCanDeoptimize() const { return false; }
8542
8543 virtual CompileType ComputeType() const { return CompileType::Smi(); }
8544
8545 virtual bool AttributesEqual(const Instruction& other) const { return true; }
8546
8547#define FIELD_LIST(F) F(const bool, smi_)
8548
8549 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(HashIntegerOpInstr,
8550 TemplateDefinition,
8551 FIELD_LIST)
8552#undef FIELD_LIST
8553
8554 PRINT_OPERANDS_TO_SUPPORT
8555
8556 private:
8557 DISALLOW_COPY_AND_ASSIGN(HashIntegerOpInstr);
8558};
8559
8560class UnaryIntegerOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
8561 public:
8562 UnaryIntegerOpInstr(Token::Kind op_kind, Value* value, intptr_t deopt_id)
8563 : TemplateDefinition(deopt_id), op_kind_(op_kind) {
8564 ASSERT((op_kind == Token::kNEGATE) || (op_kind == Token::kBIT_NOT));
8565 SetInputAt(i: 0, value);
8566 }
8567
8568 static UnaryIntegerOpInstr* Make(Representation representation,
8569 Token::Kind op_kind,
8570 Value* value,
8571 intptr_t deopt_id,
8572 Range* range);
8573
8574 Value* value() const { return inputs_[0]; }
8575 Token::Kind op_kind() const { return op_kind_; }
8576
8577 virtual bool AttributesEqual(const Instruction& other) const {
8578 return other.AsUnaryIntegerOp()->op_kind() == op_kind();
8579 }
8580
8581 virtual intptr_t DeoptimizationTarget() const {
8582 // Direct access since this instruction cannot deoptimize, and the deopt-id
8583 // was inherited from another instruction that could deoptimize.
8584 return GetDeoptId();
8585 }
8586
8587 PRINT_OPERANDS_TO_SUPPORT
8588
8589 DECLARE_ABSTRACT_INSTRUCTION(UnaryIntegerOp)
8590
8591#define FIELD_LIST(F) F(const Token::Kind, op_kind_)
8592
8593 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnaryIntegerOpInstr,
8594 TemplateDefinition,
8595 FIELD_LIST)
8596#undef FIELD_LIST
8597
8598 private:
8599 DISALLOW_COPY_AND_ASSIGN(UnaryIntegerOpInstr);
8600};
8601
8602// Handles both Smi operations: BIT_OR and NEGATE.
8603class UnarySmiOpInstr : public UnaryIntegerOpInstr {
8604 public:
8605 UnarySmiOpInstr(Token::Kind op_kind, Value* value, intptr_t deopt_id)
8606 : UnaryIntegerOpInstr(op_kind, value, deopt_id) {}
8607
8608 virtual bool ComputeCanDeoptimize() const {
8609 return op_kind() == Token::kNEGATE;
8610 }
8611
8612 virtual CompileType ComputeType() const;
8613
8614 DECLARE_INSTRUCTION(UnarySmiOp)
8615
8616 DECLARE_EMPTY_SERIALIZATION(UnarySmiOpInstr, UnaryIntegerOpInstr)
8617
8618 private:
8619 DISALLOW_COPY_AND_ASSIGN(UnarySmiOpInstr);
8620};
8621
8622class UnaryUint32OpInstr : public UnaryIntegerOpInstr {
8623 public:
8624 UnaryUint32OpInstr(Token::Kind op_kind, Value* value, intptr_t deopt_id)
8625 : UnaryIntegerOpInstr(op_kind, value, deopt_id) {
8626 ASSERT(IsSupported(op_kind));
8627 }
8628
8629 virtual bool ComputeCanDeoptimize() const { return false; }
8630
8631 virtual CompileType ComputeType() const;
8632
8633 virtual Representation representation() const { return kUnboxedUint32; }
8634
8635 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8636 ASSERT(idx == 0);
8637 return kUnboxedUint32;
8638 }
8639
8640 static bool IsSupported(Token::Kind op_kind) {
8641 return op_kind == Token::kBIT_NOT;
8642 }
8643
8644 DECLARE_INSTRUCTION(UnaryUint32Op)
8645
8646 DECLARE_EMPTY_SERIALIZATION(UnaryUint32OpInstr, UnaryIntegerOpInstr)
8647
8648 private:
8649 DISALLOW_COPY_AND_ASSIGN(UnaryUint32OpInstr);
8650};
8651
8652class UnaryInt64OpInstr : public UnaryIntegerOpInstr {
8653 public:
8654 UnaryInt64OpInstr(Token::Kind op_kind,
8655 Value* value,
8656 intptr_t deopt_id,
8657 SpeculativeMode speculative_mode = kGuardInputs)
8658 : UnaryIntegerOpInstr(op_kind, value, deopt_id),
8659 speculative_mode_(speculative_mode) {
8660 ASSERT(op_kind == Token::kBIT_NOT || op_kind == Token::kNEGATE);
8661 }
8662
8663 virtual bool ComputeCanDeoptimize() const { return false; }
8664
8665 virtual CompileType ComputeType() const;
8666
8667 virtual Representation representation() const { return kUnboxedInt64; }
8668
8669 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8670 ASSERT(idx == 0);
8671 return kUnboxedInt64;
8672 }
8673
8674 virtual bool AttributesEqual(const Instruction& other) const {
8675 auto const unary_op_other = other.AsUnaryInt64Op();
8676 return UnaryIntegerOpInstr::AttributesEqual(other) &&
8677 (speculative_mode_ == unary_op_other->speculative_mode_);
8678 }
8679
8680 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
8681 return speculative_mode_;
8682 }
8683
8684 DECLARE_INSTRUCTION(UnaryInt64Op)
8685
8686#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
8687
8688 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnaryInt64OpInstr,
8689 UnaryIntegerOpInstr,
8690 FIELD_LIST)
8691#undef FIELD_LIST
8692
8693 private:
8694 DISALLOW_COPY_AND_ASSIGN(UnaryInt64OpInstr);
8695};
8696
8697class BinaryIntegerOpInstr : public TemplateDefinition<2, NoThrow, Pure> {
8698 public:
8699 BinaryIntegerOpInstr(Token::Kind op_kind,
8700 Value* left,
8701 Value* right,
8702 intptr_t deopt_id)
8703 : TemplateDefinition(deopt_id),
8704 op_kind_(op_kind),
8705 can_overflow_(true),
8706 is_truncating_(false) {
8707 SetInputAt(i: 0, value: left);
8708 SetInputAt(i: 1, value: right);
8709 }
8710
8711 static BinaryIntegerOpInstr* Make(
8712 Representation representation,
8713 Token::Kind op_kind,
8714 Value* left,
8715 Value* right,
8716 intptr_t deopt_id,
8717 SpeculativeMode speculative_mode = kGuardInputs);
8718
8719 static BinaryIntegerOpInstr* Make(
8720 Representation representation,
8721 Token::Kind op_kind,
8722 Value* left,
8723 Value* right,
8724 intptr_t deopt_id,
8725 bool can_overflow,
8726 bool is_truncating,
8727 Range* range,
8728 SpeculativeMode speculative_mode = kGuardInputs);
8729
8730 Token::Kind op_kind() const { return op_kind_; }
8731 Value* left() const { return inputs_[0]; }
8732 Value* right() const { return inputs_[1]; }
8733
8734 bool can_overflow() const { return can_overflow_; }
8735 void set_can_overflow(bool overflow) {
8736 ASSERT(!is_truncating_ || !overflow);
8737 can_overflow_ = overflow;
8738 }
8739
8740 bool is_truncating() const { return is_truncating_; }
8741 void mark_truncating() {
8742 is_truncating_ = true;
8743 set_can_overflow(false);
8744 }
8745
8746 // Returns true if right is a non-zero Smi constant which absolute value is
8747 // a power of two.
8748 bool RightIsPowerOfTwoConstant() const;
8749
8750 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8751
8752 virtual bool AttributesEqual(const Instruction& other) const;
8753
8754 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
8755
8756 virtual void InferRange(RangeAnalysis* analysis, Range* range);
8757
8758 PRINT_OPERANDS_TO_SUPPORT
8759
8760 DECLARE_ABSTRACT_INSTRUCTION(BinaryIntegerOp)
8761
8762#define FIELD_LIST(F) \
8763 F(const Token::Kind, op_kind_) \
8764 F(bool, can_overflow_) \
8765 F(bool, is_truncating_)
8766
8767 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BinaryIntegerOpInstr,
8768 TemplateDefinition,
8769 FIELD_LIST)
8770#undef FIELD_LIST
8771
8772 protected:
8773 void InferRangeHelper(const Range* left_range,
8774 const Range* right_range,
8775 Range* range);
8776
8777 private:
8778 Definition* CreateConstantResult(FlowGraph* graph, const Integer& result);
8779
8780 DISALLOW_COPY_AND_ASSIGN(BinaryIntegerOpInstr);
8781};
8782
8783class BinarySmiOpInstr : public BinaryIntegerOpInstr {
8784 public:
8785 BinarySmiOpInstr(Token::Kind op_kind,
8786 Value* left,
8787 Value* right,
8788 intptr_t deopt_id,
8789 // Provided by BinaryIntegerOpInstr::Make for constant RHS.
8790 Range* right_range = nullptr)
8791 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id),
8792 right_range_(right_range) {}
8793
8794 virtual bool ComputeCanDeoptimize() const;
8795
8796 virtual void InferRange(RangeAnalysis* analysis, Range* range);
8797 virtual CompileType ComputeType() const;
8798
8799 DECLARE_INSTRUCTION(BinarySmiOp)
8800
8801 Range* right_range() const { return right_range_; }
8802
8803#define FIELD_LIST(F) F(Range*, right_range_)
8804
8805 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BinarySmiOpInstr,
8806 BinaryIntegerOpInstr,
8807 FIELD_LIST)
8808#undef FIELD_LIST
8809
8810 private:
8811 DISALLOW_COPY_AND_ASSIGN(BinarySmiOpInstr);
8812};
8813
8814class BinaryInt32OpInstr : public BinaryIntegerOpInstr {
8815 public:
8816 BinaryInt32OpInstr(Token::Kind op_kind,
8817 Value* left,
8818 Value* right,
8819 intptr_t deopt_id)
8820 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id) {
8821 SetInputAt(i: 0, value: left);
8822 SetInputAt(i: 1, value: right);
8823 }
8824
8825 static bool IsSupported(Token::Kind op_kind, Value* left, Value* right) {
8826#if defined(TARGET_ARCH_IS_32_BIT)
8827 switch (op_kind) {
8828 case Token::kADD:
8829 case Token::kSUB:
8830 case Token::kMUL:
8831 case Token::kBIT_AND:
8832 case Token::kBIT_OR:
8833 case Token::kBIT_XOR:
8834 return true;
8835
8836 case Token::kSHL:
8837 case Token::kSHR:
8838 case Token::kUSHR:
8839 if (right->BindsToConstant() && right->BoundConstant().IsSmi()) {
8840 const intptr_t value = Smi::Cast(right->BoundConstant()).Value();
8841 return 0 <= value && value < kBitsPerWord;
8842 }
8843 return false;
8844
8845 default:
8846 return false;
8847 }
8848#else
8849 return false;
8850#endif
8851 }
8852
8853 virtual bool ComputeCanDeoptimize() const;
8854
8855 virtual Representation representation() const { return kUnboxedInt32; }
8856
8857 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8858 ASSERT((idx == 0) || (idx == 1));
8859 return kUnboxedInt32;
8860 }
8861
8862 virtual CompileType ComputeType() const;
8863
8864 DECLARE_INSTRUCTION(BinaryInt32Op)
8865
8866 DECLARE_EMPTY_SERIALIZATION(BinaryInt32OpInstr, BinaryIntegerOpInstr)
8867
8868 private:
8869 DISALLOW_COPY_AND_ASSIGN(BinaryInt32OpInstr);
8870};
8871
8872class BinaryUint32OpInstr : public BinaryIntegerOpInstr {
8873 public:
8874 BinaryUint32OpInstr(Token::Kind op_kind,
8875 Value* left,
8876 Value* right,
8877 intptr_t deopt_id)
8878 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id) {
8879 mark_truncating();
8880 ASSERT(IsSupported(op_kind));
8881 }
8882
8883 virtual bool ComputeCanDeoptimize() const { return false; }
8884
8885 virtual Representation representation() const { return kUnboxedUint32; }
8886
8887 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8888 ASSERT((idx == 0) || (idx == 1));
8889 return kUnboxedUint32;
8890 }
8891
8892 virtual CompileType ComputeType() const;
8893
8894 static bool IsSupported(Token::Kind op_kind) {
8895 switch (op_kind) {
8896 case Token::kADD:
8897 case Token::kSUB:
8898 case Token::kMUL:
8899 case Token::kBIT_AND:
8900 case Token::kBIT_OR:
8901 case Token::kBIT_XOR:
8902 return true;
8903 default:
8904 return false;
8905 }
8906 }
8907
8908 DECLARE_INSTRUCTION(BinaryUint32Op)
8909
8910 DECLARE_EMPTY_SERIALIZATION(BinaryUint32OpInstr, BinaryIntegerOpInstr)
8911
8912 private:
8913 DISALLOW_COPY_AND_ASSIGN(BinaryUint32OpInstr);
8914};
8915
8916class BinaryInt64OpInstr : public BinaryIntegerOpInstr {
8917 public:
8918 BinaryInt64OpInstr(Token::Kind op_kind,
8919 Value* left,
8920 Value* right,
8921 intptr_t deopt_id,
8922 SpeculativeMode speculative_mode = kGuardInputs)
8923 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id),
8924 speculative_mode_(speculative_mode) {
8925 mark_truncating();
8926 }
8927
8928 virtual bool ComputeCanDeoptimize() const {
8929 ASSERT(!can_overflow());
8930 return false;
8931 }
8932
8933 virtual bool MayThrow() const {
8934 return op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV;
8935 }
8936
8937 virtual Representation representation() const { return kUnboxedInt64; }
8938
8939 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8940 ASSERT((idx == 0) || (idx == 1));
8941 return kUnboxedInt64;
8942 }
8943
8944 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
8945 return speculative_mode_;
8946 }
8947
8948 virtual bool AttributesEqual(const Instruction& other) const {
8949 return BinaryIntegerOpInstr::AttributesEqual(other) &&
8950 (speculative_mode_ == other.AsBinaryInt64Op()->speculative_mode_);
8951 }
8952
8953 virtual CompileType ComputeType() const;
8954
8955 DECLARE_INSTRUCTION(BinaryInt64Op)
8956
8957#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
8958
8959 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BinaryInt64OpInstr,
8960 BinaryIntegerOpInstr,
8961 FIELD_LIST)
8962#undef FIELD_LIST
8963
8964 private:
8965 DISALLOW_COPY_AND_ASSIGN(BinaryInt64OpInstr);
8966};
8967
8968// Base class for integer shift operations.
8969class ShiftIntegerOpInstr : public BinaryIntegerOpInstr {
8970 public:
8971 ShiftIntegerOpInstr(Token::Kind op_kind,
8972 Value* left,
8973 Value* right,
8974 intptr_t deopt_id,
8975 // Provided by BinaryIntegerOpInstr::Make for constant RHS
8976 Range* right_range = nullptr)
8977 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id),
8978 shift_range_(right_range) {
8979 ASSERT((op_kind == Token::kSHL) || (op_kind == Token::kSHR) ||
8980 (op_kind == Token::kUSHR));
8981 mark_truncating();
8982 }
8983
8984 Range* shift_range() const { return shift_range_; }
8985
8986 // Set the range directly (takes ownership).
8987 void set_shift_range(Range* shift_range) { shift_range_ = shift_range; }
8988
8989 virtual void InferRange(RangeAnalysis* analysis, Range* range);
8990
8991 DECLARE_ABSTRACT_INSTRUCTION(ShiftIntegerOp)
8992
8993#define FIELD_LIST(F) F(Range*, shift_range_)
8994
8995 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr,
8996 BinaryIntegerOpInstr,
8997 FIELD_LIST)
8998#undef FIELD_LIST
8999
9000 protected:
9001 static constexpr intptr_t kShiftCountLimit = 63;
9002
9003 // Returns true if the shift amount is guaranteed to be in
9004 // [0..max] range.
9005 bool IsShiftCountInRange(int64_t max = kShiftCountLimit) const;
9006
9007 private:
9008 DISALLOW_COPY_AND_ASSIGN(ShiftIntegerOpInstr);
9009};
9010
9011// Non-speculative int64 shift. Takes 2 unboxed int64.
9012// Throws if right operand is negative.
9013class ShiftInt64OpInstr : public ShiftIntegerOpInstr {
9014 public:
9015 ShiftInt64OpInstr(Token::Kind op_kind,
9016 Value* left,
9017 Value* right,
9018 intptr_t deopt_id,
9019 Range* right_range = nullptr)
9020 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}
9021
9022 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9023 return kNotSpeculative;
9024 }
9025 virtual bool ComputeCanDeoptimize() const { return false; }
9026 virtual bool MayThrow() const { return !IsShiftCountInRange(); }
9027
9028 virtual Representation representation() const { return kUnboxedInt64; }
9029
9030 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9031 ASSERT((idx == 0) || (idx == 1));
9032 return kUnboxedInt64;
9033 }
9034
9035 virtual CompileType ComputeType() const;
9036
9037 DECLARE_INSTRUCTION(ShiftInt64Op)
9038
9039 DECLARE_EMPTY_SERIALIZATION(ShiftInt64OpInstr, ShiftIntegerOpInstr)
9040
9041 private:
9042 DISALLOW_COPY_AND_ASSIGN(ShiftInt64OpInstr);
9043};
9044
9045// Speculative int64 shift. Takes unboxed int64 and smi.
9046// Deoptimizes if right operand is negative or greater than kShiftCountLimit.
9047class SpeculativeShiftInt64OpInstr : public ShiftIntegerOpInstr {
9048 public:
9049 SpeculativeShiftInt64OpInstr(Token::Kind op_kind,
9050 Value* left,
9051 Value* right,
9052 intptr_t deopt_id,
9053 Range* right_range = nullptr)
9054 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}
9055
9056 virtual bool ComputeCanDeoptimize() const {
9057 ASSERT(!can_overflow());
9058 return !IsShiftCountInRange();
9059 }
9060
9061 virtual Representation representation() const { return kUnboxedInt64; }
9062
9063 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9064 ASSERT((idx == 0) || (idx == 1));
9065 return (idx == 0) ? kUnboxedInt64 : kTagged;
9066 }
9067
9068 virtual CompileType ComputeType() const;
9069
9070 DECLARE_INSTRUCTION(SpeculativeShiftInt64Op)
9071
9072 DECLARE_EMPTY_SERIALIZATION(SpeculativeShiftInt64OpInstr, ShiftIntegerOpInstr)
9073
9074 private:
9075 DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftInt64OpInstr);
9076};
9077
9078// Non-speculative uint32 shift. Takes unboxed uint32 and unboxed int64.
9079// Throws if right operand is negative.
9080class ShiftUint32OpInstr : public ShiftIntegerOpInstr {
9081 public:
9082 ShiftUint32OpInstr(Token::Kind op_kind,
9083 Value* left,
9084 Value* right,
9085 intptr_t deopt_id,
9086 Range* right_range = nullptr)
9087 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}
9088
9089 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9090 return kNotSpeculative;
9091 }
9092 virtual bool ComputeCanDeoptimize() const { return false; }
9093 virtual bool MayThrow() const { return true; }
9094
9095 virtual Representation representation() const { return kUnboxedUint32; }
9096
9097 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9098 ASSERT((idx == 0) || (idx == 1));
9099 return (idx == 0) ? kUnboxedUint32 : kUnboxedInt64;
9100 }
9101
9102 virtual CompileType ComputeType() const;
9103
9104 DECLARE_INSTRUCTION(ShiftUint32Op)
9105
9106 DECLARE_EMPTY_SERIALIZATION(ShiftUint32OpInstr, ShiftIntegerOpInstr)
9107
9108 private:
9109 static constexpr intptr_t kUint32ShiftCountLimit = 31;
9110
9111 DISALLOW_COPY_AND_ASSIGN(ShiftUint32OpInstr);
9112};
9113
9114// Speculative uint32 shift. Takes unboxed uint32 and smi.
9115// Deoptimizes if right operand is negative.
9116class SpeculativeShiftUint32OpInstr : public ShiftIntegerOpInstr {
9117 public:
9118 SpeculativeShiftUint32OpInstr(Token::Kind op_kind,
9119 Value* left,
9120 Value* right,
9121 intptr_t deopt_id,
9122 Range* right_range = nullptr)
9123 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}
9124
9125 virtual bool ComputeCanDeoptimize() const { return !IsShiftCountInRange(); }
9126
9127 virtual Representation representation() const { return kUnboxedUint32; }
9128
9129 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9130 ASSERT((idx == 0) || (idx == 1));
9131 return (idx == 0) ? kUnboxedUint32 : kTagged;
9132 }
9133
9134 DECLARE_INSTRUCTION(SpeculativeShiftUint32Op)
9135
9136 virtual CompileType ComputeType() const;
9137
9138 DECLARE_EMPTY_SERIALIZATION(SpeculativeShiftUint32OpInstr,
9139 ShiftIntegerOpInstr)
9140
9141 private:
9142 static constexpr intptr_t kUint32ShiftCountLimit = 31;
9143
9144 DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftUint32OpInstr);
9145};
9146
9147// Handles only NEGATE.
9148class UnaryDoubleOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
9149 public:
9150 UnaryDoubleOpInstr(Token::Kind op_kind,
9151 Value* value,
9152 intptr_t deopt_id,
9153 SpeculativeMode speculative_mode = kGuardInputs)
9154 : TemplateDefinition(deopt_id),
9155 op_kind_(op_kind),
9156 speculative_mode_(speculative_mode) {
9157 ASSERT(op_kind == Token::kNEGATE);
9158 SetInputAt(i: 0, value);
9159 }
9160
9161 Value* value() const { return inputs_[0]; }
9162 Token::Kind op_kind() const { return op_kind_; }
9163
9164 DECLARE_INSTRUCTION(UnaryDoubleOp)
9165 virtual CompileType ComputeType() const;
9166
9167 virtual bool ComputeCanDeoptimize() const { return false; }
9168
9169 virtual intptr_t DeoptimizationTarget() const {
9170 // Direct access since this instruction cannot deoptimize, and the deopt-id
9171 // was inherited from another instruction that could deoptimize.
9172 return GetDeoptId();
9173 }
9174
9175 virtual Representation representation() const { return kUnboxedDouble; }
9176
9177 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9178 ASSERT(idx == 0);
9179 return kUnboxedDouble;
9180 }
9181
9182 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9183 return speculative_mode_;
9184 }
9185
9186 virtual bool AttributesEqual(const Instruction& other) const {
9187 return speculative_mode_ == other.AsUnaryDoubleOp()->speculative_mode_;
9188 }
9189
9190 PRINT_OPERANDS_TO_SUPPORT
9191
9192#define FIELD_LIST(F) \
9193 F(const Token::Kind, op_kind_) \
9194 F(const SpeculativeMode, speculative_mode_)
9195
9196 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnaryDoubleOpInstr,
9197 TemplateDefinition,
9198 FIELD_LIST)
9199#undef FIELD_LIST
9200
9201 private:
9202 DISALLOW_COPY_AND_ASSIGN(UnaryDoubleOpInstr);
9203};
9204
9205class CheckStackOverflowInstr : public TemplateInstruction<0, NoThrow> {
9206 public:
9207 enum Kind {
9208 // kOsrAndPreemption stack overflow checks are emitted in both unoptimized
9209 // and optimized versions of the code and they serve as both preemption and
9210 // OSR entry points.
9211 kOsrAndPreemption,
9212
9213 // kOsrOnly stack overflow checks are only needed in the unoptimized code
9214 // because we can't OSR optimized code.
9215 kOsrOnly,
9216 };
9217
9218 CheckStackOverflowInstr(const InstructionSource& source,
9219 intptr_t stack_depth,
9220 intptr_t loop_depth,
9221 intptr_t deopt_id,
9222 Kind kind)
9223 : TemplateInstruction(source, deopt_id),
9224 token_pos_(source.token_pos),
9225 stack_depth_(stack_depth),
9226 loop_depth_(loop_depth),
9227 kind_(kind) {
9228 ASSERT(kind != kOsrOnly || loop_depth > 0);
9229 }
9230
9231 virtual TokenPosition token_pos() const { return token_pos_; }
9232 bool in_loop() const { return loop_depth_ > 0; }
9233 intptr_t stack_depth() const { return stack_depth_; }
9234 intptr_t loop_depth() const { return loop_depth_; }
9235
9236 DECLARE_INSTRUCTION(CheckStackOverflow)
9237
9238 virtual bool ComputeCanDeoptimize() const {
9239 return !CompilerState::Current().is_aot();
9240 }
9241
9242 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
9243
9244 virtual bool HasUnknownSideEffects() const { return false; }
9245
9246 virtual bool UseSharedSlowPathStub(bool is_optimizing) const {
9247 return SlowPathSharingSupported(is_optimizing);
9248 }
9249
9250 PRINT_OPERANDS_TO_SUPPORT
9251
9252#define FIELD_LIST(F) \
9253 F(const TokenPosition, token_pos_) \
9254 F(const intptr_t, stack_depth_) \
9255 F(const intptr_t, loop_depth_) \
9256 F(const Kind, kind_)
9257
9258 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckStackOverflowInstr,
9259 TemplateInstruction,
9260 FIELD_LIST)
9261#undef FIELD_LIST
9262
9263 private:
9264 DISALLOW_COPY_AND_ASSIGN(CheckStackOverflowInstr);
9265};
9266
9267// TODO(vegorov): remove this instruction in favor of Int32ToDouble.
9268class SmiToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
9269 public:
9270 SmiToDoubleInstr(Value* value, const InstructionSource& source)
9271 : TemplateDefinition(source), token_pos_(source.token_pos) {
9272 SetInputAt(i: 0, value);
9273 }
9274
9275 Value* value() const { return inputs_[0]; }
9276 virtual TokenPosition token_pos() const { return token_pos_; }
9277
9278 DECLARE_INSTRUCTION(SmiToDouble)
9279 virtual CompileType ComputeType() const;
9280
9281 virtual Representation representation() const { return kUnboxedDouble; }
9282
9283 virtual bool ComputeCanDeoptimize() const { return false; }
9284
9285 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9286
9287#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
9288
9289 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(SmiToDoubleInstr,
9290 TemplateDefinition,
9291 FIELD_LIST)
9292#undef FIELD_LIST
9293
9294 private:
9295 DISALLOW_COPY_AND_ASSIGN(SmiToDoubleInstr);
9296};
9297
9298class Int32ToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
9299 public:
9300 explicit Int32ToDoubleInstr(Value* value) { SetInputAt(i: 0, value); }
9301
9302 Value* value() const { return inputs_[0]; }
9303
9304 DECLARE_INSTRUCTION(Int32ToDouble)
9305 virtual CompileType ComputeType() const;
9306
9307 virtual Representation RequiredInputRepresentation(intptr_t index) const {
9308 ASSERT(index == 0);
9309 return kUnboxedInt32;
9310 }
9311
9312 virtual Representation representation() const { return kUnboxedDouble; }
9313
9314 virtual bool ComputeCanDeoptimize() const { return false; }
9315
9316 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9317
9318 DECLARE_EMPTY_SERIALIZATION(Int32ToDoubleInstr, TemplateDefinition)
9319
9320 private:
9321 DISALLOW_COPY_AND_ASSIGN(Int32ToDoubleInstr);
9322};
9323
9324class Int64ToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
9325 public:
9326 Int64ToDoubleInstr(Value* value,
9327 intptr_t deopt_id,
9328 SpeculativeMode speculative_mode = kGuardInputs)
9329 : TemplateDefinition(deopt_id), speculative_mode_(speculative_mode) {
9330 SetInputAt(i: 0, value);
9331 }
9332
9333 Value* value() const { return inputs_[0]; }
9334
9335 DECLARE_INSTRUCTION(Int64ToDouble)
9336 virtual CompileType ComputeType() const;
9337
9338 virtual Representation RequiredInputRepresentation(intptr_t index) const {
9339 ASSERT(index == 0);
9340 return kUnboxedInt64;
9341 }
9342
9343 virtual Representation representation() const { return kUnboxedDouble; }
9344
9345 virtual intptr_t DeoptimizationTarget() const {
9346 // Direct access since this instruction cannot deoptimize, and the deopt-id
9347 // was inherited from another instruction that could deoptimize.
9348 return GetDeoptId();
9349 }
9350
9351 virtual bool ComputeCanDeoptimize() const { return false; }
9352
9353 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9354 return speculative_mode_;
9355 }
9356
9357 virtual bool AttributesEqual(const Instruction& other) const {
9358 return speculative_mode_ == other.AsInt64ToDouble()->speculative_mode_;
9359 }
9360
9361#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
9362
9363 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Int64ToDoubleInstr,
9364 TemplateDefinition,
9365 FIELD_LIST)
9366#undef FIELD_LIST
9367
9368 private:
9369 DISALLOW_COPY_AND_ASSIGN(Int64ToDoubleInstr);
9370};
9371
9372class DoubleToIntegerInstr : public TemplateDefinition<1, Throws, Pure> {
9373 public:
9374 DoubleToIntegerInstr(Value* value,
9375 MethodRecognizer::Kind recognized_kind,
9376 intptr_t deopt_id)
9377 : TemplateDefinition(deopt_id), recognized_kind_(recognized_kind) {
9378 ASSERT((recognized_kind == MethodRecognizer::kDoubleToInteger) ||
9379 (recognized_kind == MethodRecognizer::kDoubleFloorToInt) ||
9380 (recognized_kind == MethodRecognizer::kDoubleCeilToInt));
9381 SetInputAt(i: 0, value);
9382 }
9383
9384 Value* value() const { return inputs_[0]; }
9385
9386 MethodRecognizer::Kind recognized_kind() const { return recognized_kind_; }
9387
9388 DECLARE_INSTRUCTION(DoubleToInteger)
9389 virtual CompileType ComputeType() const;
9390
9391 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9392 ASSERT(idx == 0);
9393 return kUnboxedDouble;
9394 }
9395
9396 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
9397 ASSERT(idx == 0);
9398 return kNotSpeculative;
9399 }
9400
9401 virtual bool ComputeCanDeoptimize() const {
9402 return !CompilerState::Current().is_aot();
9403 }
9404
9405 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
9406
9407 virtual bool HasUnknownSideEffects() const { return false; }
9408
9409 virtual bool AttributesEqual(const Instruction& other) const {
9410 return other.AsDoubleToInteger()->recognized_kind() == recognized_kind();
9411 }
9412
9413#define FIELD_LIST(F) F(const MethodRecognizer::Kind, recognized_kind_)
9414
9415 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DoubleToIntegerInstr,
9416 TemplateDefinition,
9417 FIELD_LIST)
9418#undef FIELD_LIST
9419
9420 private:
9421 DISALLOW_COPY_AND_ASSIGN(DoubleToIntegerInstr);
9422};
9423
9424// Similar to 'DoubleToIntegerInstr' but expects unboxed double as input
9425// and creates a Smi.
9426class DoubleToSmiInstr : public TemplateDefinition<1, NoThrow, Pure> {
9427 public:
9428 DoubleToSmiInstr(Value* value, intptr_t deopt_id)
9429 : TemplateDefinition(deopt_id) {
9430 SetInputAt(i: 0, value);
9431 }
9432
9433 Value* value() const { return inputs_[0]; }
9434
9435 DECLARE_INSTRUCTION(DoubleToSmi)
9436 virtual CompileType ComputeType() const;
9437
9438 virtual bool ComputeCanDeoptimize() const { return true; }
9439
9440 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9441 ASSERT(idx == 0);
9442 return kUnboxedDouble;
9443 }
9444
9445 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
9446
9447 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9448
9449 DECLARE_EMPTY_SERIALIZATION(DoubleToSmiInstr, TemplateDefinition)
9450
9451 private:
9452 DISALLOW_COPY_AND_ASSIGN(DoubleToSmiInstr);
9453};
9454
9455class DoubleToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
9456 public:
9457 DoubleToDoubleInstr(Value* value,
9458 MethodRecognizer::Kind recognized_kind,
9459 intptr_t deopt_id)
9460 : TemplateDefinition(deopt_id), recognized_kind_(recognized_kind) {
9461 ASSERT((recognized_kind == MethodRecognizer::kDoubleTruncateToDouble) ||
9462 (recognized_kind == MethodRecognizer::kDoubleFloorToDouble) ||
9463 (recognized_kind == MethodRecognizer::kDoubleCeilToDouble));
9464 SetInputAt(i: 0, value);
9465 }
9466
9467 Value* value() const { return inputs_[0]; }
9468
9469 MethodRecognizer::Kind recognized_kind() const { return recognized_kind_; }
9470
9471 DECLARE_INSTRUCTION(DoubleToDouble)
9472 virtual CompileType ComputeType() const;
9473
9474 virtual bool ComputeCanDeoptimize() const { return false; }
9475
9476 virtual Representation representation() const { return kUnboxedDouble; }
9477
9478 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9479 ASSERT(idx == 0);
9480 return kUnboxedDouble;
9481 }
9482
9483 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
9484 ASSERT(idx == 0);
9485 return kNotSpeculative;
9486 }
9487
9488 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
9489
9490 virtual bool AttributesEqual(const Instruction& other) const {
9491 return other.AsDoubleToDouble()->recognized_kind() == recognized_kind();
9492 }
9493
9494#define FIELD_LIST(F) F(const MethodRecognizer::Kind, recognized_kind_)
9495
9496 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DoubleToDoubleInstr,
9497 TemplateDefinition,
9498 FIELD_LIST)
9499#undef FIELD_LIST
9500
9501 private:
9502 DISALLOW_COPY_AND_ASSIGN(DoubleToDoubleInstr);
9503};
9504
9505class DoubleToFloatInstr : public TemplateDefinition<1, NoThrow, Pure> {
9506 public:
9507 DoubleToFloatInstr(Value* value,
9508 intptr_t deopt_id,
9509 SpeculativeMode speculative_mode = kGuardInputs)
9510 : TemplateDefinition(deopt_id), speculative_mode_(speculative_mode) {
9511 SetInputAt(i: 0, value);
9512 }
9513
9514 Value* value() const { return inputs_[0]; }
9515
9516 DECLARE_INSTRUCTION(DoubleToFloat)
9517
9518 virtual CompileType ComputeType() const;
9519
9520 virtual bool ComputeCanDeoptimize() const { return false; }
9521
9522 virtual Representation representation() const { return kUnboxedFloat; }
9523
9524 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9525 ASSERT(idx == 0);
9526 return kUnboxedDouble;
9527 }
9528
9529 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9530 return speculative_mode_;
9531 }
9532
9533 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
9534
9535 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9536
9537 virtual Definition* Canonicalize(FlowGraph* flow_graph);
9538
9539#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
9540
9541 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DoubleToFloatInstr,
9542 TemplateDefinition,
9543 FIELD_LIST)
9544#undef FIELD_LIST
9545
9546 private:
9547 DISALLOW_COPY_AND_ASSIGN(DoubleToFloatInstr);
9548};
9549
9550class FloatToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
9551 public:
9552 FloatToDoubleInstr(Value* value, intptr_t deopt_id)
9553 : TemplateDefinition(deopt_id) {
9554 SetInputAt(i: 0, value);
9555 }
9556
9557 Value* value() const { return inputs_[0]; }
9558
9559 DECLARE_INSTRUCTION(FloatToDouble)
9560
9561 virtual CompileType ComputeType() const;
9562
9563 virtual bool ComputeCanDeoptimize() const { return false; }
9564
9565 virtual Representation representation() const { return kUnboxedDouble; }
9566
9567 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9568 ASSERT(idx == 0);
9569 return kUnboxedFloat;
9570 }
9571
9572 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
9573
9574 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9575
9576 virtual Definition* Canonicalize(FlowGraph* flow_graph);
9577
9578 DECLARE_EMPTY_SERIALIZATION(FloatToDoubleInstr, TemplateDefinition)
9579
9580 private:
9581 DISALLOW_COPY_AND_ASSIGN(FloatToDoubleInstr);
9582};
9583
9584// TODO(sjindel): Replace with FFICallInstr.
9585class InvokeMathCFunctionInstr : public VariadicDefinition {
9586 public:
9587 InvokeMathCFunctionInstr(InputsArray&& inputs,
9588 intptr_t deopt_id,
9589 MethodRecognizer::Kind recognized_kind,
9590 const InstructionSource& source);
9591
9592 static intptr_t ArgumentCountFor(MethodRecognizer::Kind recognized_kind_);
9593
9594 const RuntimeEntry& TargetFunction() const;
9595
9596 MethodRecognizer::Kind recognized_kind() const { return recognized_kind_; }
9597
9598 virtual TokenPosition token_pos() const { return token_pos_; }
9599
9600 DECLARE_INSTRUCTION(InvokeMathCFunction)
9601 virtual CompileType ComputeType() const;
9602
9603 virtual bool ComputeCanDeoptimize() const { return false; }
9604
9605 virtual Representation representation() const { return kUnboxedDouble; }
9606
9607 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9608 ASSERT((0 <= idx) && (idx < InputCount()));
9609 return kUnboxedDouble;
9610 }
9611
9612 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
9613 ASSERT((0 <= idx) && (idx < InputCount()));
9614 return kNotSpeculative;
9615 }
9616
9617 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
9618
9619 virtual bool AllowsCSE() const { return true; }
9620 virtual bool HasUnknownSideEffects() const { return false; }
9621
9622 virtual bool AttributesEqual(const Instruction& other) const {
9623 auto const other_invoke = other.AsInvokeMathCFunction();
9624 return other_invoke->recognized_kind() == recognized_kind();
9625 }
9626
9627 virtual bool MayThrow() const { return false; }
9628
9629 static constexpr intptr_t kSavedSpTempIndex = 0;
9630 static constexpr intptr_t kObjectTempIndex = 1;
9631 static constexpr intptr_t kDoubleTempIndex = 2;
9632
9633 PRINT_OPERANDS_TO_SUPPORT
9634
9635#define FIELD_LIST(F) \
9636 F(const MethodRecognizer::Kind, recognized_kind_) \
9637 F(const TokenPosition, token_pos_)
9638
9639 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InvokeMathCFunctionInstr,
9640 VariadicDefinition,
9641 FIELD_LIST)
9642#undef FIELD_LIST
9643
9644 private:
9645 DISALLOW_COPY_AND_ASSIGN(InvokeMathCFunctionInstr);
9646};
9647
9648class ExtractNthOutputInstr : public TemplateDefinition<1, NoThrow, Pure> {
9649 public:
9650 // Extract the Nth output register from value.
9651 ExtractNthOutputInstr(Value* value,
9652 intptr_t n,
9653 Representation definition_rep,
9654 intptr_t definition_cid)
9655 : index_(n),
9656 definition_rep_(definition_rep),
9657 definition_cid_(definition_cid) {
9658 SetInputAt(i: 0, value);
9659 }
9660
9661 Value* value() const { return inputs_[0]; }
9662
9663 DECLARE_INSTRUCTION(ExtractNthOutput)
9664 DECLARE_ATTRIBUTES(index())
9665
9666 virtual CompileType ComputeType() const;
9667 virtual bool ComputeCanDeoptimize() const { return false; }
9668
9669 intptr_t index() const { return index_; }
9670
9671 virtual Representation representation() const { return definition_rep_; }
9672
9673 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9674 ASSERT(idx == 0);
9675 if (representation() == kTagged) {
9676 return kPairOfTagged;
9677 }
9678 UNREACHABLE();
9679 return definition_rep_;
9680 }
9681
9682 virtual bool AttributesEqual(const Instruction& other) const {
9683 auto const other_extract = other.AsExtractNthOutput();
9684 return (other_extract->representation() == representation()) &&
9685 (other_extract->index() == index());
9686 }
9687
9688 PRINT_OPERANDS_TO_SUPPORT
9689
9690#define FIELD_LIST(F) \
9691 F(const intptr_t, index_) \
9692 F(const Representation, definition_rep_) \
9693 F(const intptr_t, definition_cid_)
9694
9695 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ExtractNthOutputInstr,
9696 TemplateDefinition,
9697 FIELD_LIST)
9698#undef FIELD_LIST
9699
9700 private:
9701 DISALLOW_COPY_AND_ASSIGN(ExtractNthOutputInstr);
9702};
9703
9704// Combines 2 values into a pair with kPairOfTagged representation.
9705class MakePairInstr : public TemplateDefinition<2, NoThrow, Pure> {
9706 public:
9707 MakePairInstr(Value* x, Value* y) {
9708 SetInputAt(i: 0, value: x);
9709 SetInputAt(i: 1, value: y);
9710 }
9711
9712 DECLARE_INSTRUCTION(MakePair)
9713
9714 virtual CompileType ComputeType() const;
9715 virtual bool ComputeCanDeoptimize() const { return false; }
9716
9717 virtual Representation representation() const { return kPairOfTagged; }
9718
9719 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9720 ASSERT((0 <= idx) && (idx < InputCount()));
9721 return kTagged;
9722 }
9723
9724 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9725
9726 DECLARE_EMPTY_SERIALIZATION(MakePairInstr, TemplateDefinition)
9727
9728 private:
9729 DISALLOW_COPY_AND_ASSIGN(MakePairInstr);
9730};
9731
9732class TruncDivModInstr : public TemplateDefinition<2, NoThrow, Pure> {
9733 public:
9734 TruncDivModInstr(Value* lhs, Value* rhs, intptr_t deopt_id);
9735
9736 static intptr_t OutputIndexOf(Token::Kind token);
9737
9738 virtual CompileType ComputeType() const;
9739
9740 virtual bool ComputeCanDeoptimize() const { return true; }
9741
9742 virtual Representation representation() const { return kPairOfTagged; }
9743
9744 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9745 ASSERT((0 <= idx) && (idx < InputCount()));
9746 return kTagged;
9747 }
9748
9749 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
9750
9751 DECLARE_INSTRUCTION(TruncDivMod)
9752
9753 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9754
9755 PRINT_OPERANDS_TO_SUPPORT
9756
9757 DECLARE_EMPTY_SERIALIZATION(TruncDivModInstr, TemplateDefinition)
9758
9759 private:
9760 Range* divisor_range() const {
9761 // Note: this range is only used to remove check for zero divisor from
9762 // the emitted pattern. It is not used for deciding whether instruction
9763 // will deoptimize or not - that is why it is ok to access range of
9764 // the definition directly. Otherwise range analysis or another pass
9765 // needs to cache range of the divisor in the operation to prevent
9766 // bugs when range information gets out of sync with the final decision
9767 // whether some instruction can deoptimize or not made in
9768 // EliminateEnvironments().
9769 return InputAt(i: 1)->definition()->range();
9770 }
9771
9772 DISALLOW_COPY_AND_ASSIGN(TruncDivModInstr);
9773};
9774
9775class CheckClassInstr : public TemplateInstruction<1, NoThrow> {
9776 public:
9777 CheckClassInstr(Value* value,
9778 intptr_t deopt_id,
9779 const Cids& cids,
9780 const InstructionSource& source);
9781
9782 DECLARE_INSTRUCTION(CheckClass)
9783
9784 virtual bool ComputeCanDeoptimize() const { return true; }
9785
9786 virtual TokenPosition token_pos() const { return token_pos_; }
9787
9788 Value* value() const { return inputs_[0]; }
9789
9790 const Cids& cids() const { return cids_; }
9791
9792 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
9793
9794 bool IsNullCheck() const { return IsDeoptIfNull() || IsDeoptIfNotNull(); }
9795
9796 bool IsDeoptIfNull() const;
9797 bool IsDeoptIfNotNull() const;
9798
9799 bool IsBitTest() const;
9800 static bool IsCompactCidRange(const Cids& cids);
9801 intptr_t ComputeCidMask() const;
9802
9803 virtual bool AllowsCSE() const { return true; }
9804 virtual bool HasUnknownSideEffects() const { return false; }
9805
9806 virtual bool AttributesEqual(const Instruction& other) const;
9807
9808 PRINT_OPERANDS_TO_SUPPORT
9809
9810#define FIELD_LIST(F) \
9811 F(const Cids&, cids_) \
9812 F(bool, is_bit_test_) \
9813 F(const TokenPosition, token_pos_)
9814
9815 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr,
9816 TemplateInstruction,
9817 FIELD_LIST)
9818#undef FIELD_LIST
9819
9820 private:
9821 int EmitCheckCid(FlowGraphCompiler* compiler,
9822 int bias,
9823 intptr_t cid_start,
9824 intptr_t cid_end,
9825 bool is_last,
9826 compiler::Label* is_ok,
9827 compiler::Label* deopt,
9828 bool use_near_jump);
9829 void EmitBitTest(FlowGraphCompiler* compiler,
9830 intptr_t min,
9831 intptr_t max,
9832 intptr_t mask,
9833 compiler::Label* deopt);
9834 void EmitNullCheck(FlowGraphCompiler* compiler, compiler::Label* deopt);
9835
9836 DISALLOW_COPY_AND_ASSIGN(CheckClassInstr);
9837};
9838
9839class CheckSmiInstr : public TemplateInstruction<1, NoThrow, Pure> {
9840 public:
9841 CheckSmiInstr(Value* value,
9842 intptr_t deopt_id,
9843 const InstructionSource& source)
9844 : TemplateInstruction(source, deopt_id), token_pos_(source.token_pos) {
9845 SetInputAt(i: 0, value);
9846 }
9847
9848 Value* value() const { return inputs_[0]; }
9849 virtual TokenPosition token_pos() const { return token_pos_; }
9850
9851 DECLARE_INSTRUCTION(CheckSmi)
9852
9853 virtual bool ComputeCanDeoptimize() const { return true; }
9854
9855 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
9856
9857 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9858
9859#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
9860
9861 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckSmiInstr,
9862 TemplateInstruction,
9863 FIELD_LIST)
9864#undef FIELD_LIST
9865
9866 private:
9867 DISALLOW_COPY_AND_ASSIGN(CheckSmiInstr);
9868};
9869
9870// CheckNull instruction takes one input (`value`) and tests it for `null`.
9871// If `value` is `null`, then an exception is thrown according to
9872// `exception_type`. Otherwise, execution proceeds to the next instruction.
9873class CheckNullInstr : public TemplateDefinition<1, Throws, Pure> {
9874 public:
9875 enum ExceptionType {
9876 kNoSuchMethod,
9877 kArgumentError,
9878 kCastError,
9879 };
9880
9881 CheckNullInstr(Value* value,
9882 const String& function_name,
9883 intptr_t deopt_id,
9884 const InstructionSource& source,
9885 ExceptionType exception_type = kNoSuchMethod)
9886 : TemplateDefinition(source, deopt_id),
9887 token_pos_(source.token_pos),
9888 function_name_(function_name),
9889 exception_type_(exception_type) {
9890 DEBUG_ASSERT(function_name.IsNotTemporaryScopedHandle());
9891 ASSERT(function_name.IsSymbol());
9892 SetInputAt(i: 0, value);
9893 }
9894
9895 Value* value() const { return inputs_[0]; }
9896 virtual TokenPosition token_pos() const { return token_pos_; }
9897 const String& function_name() const { return function_name_; }
9898 ExceptionType exception_type() const { return exception_type_; }
9899
9900 virtual bool UseSharedSlowPathStub(bool is_optimizing) const {
9901 return SlowPathSharingSupported(is_optimizing);
9902 }
9903
9904 DECLARE_INSTRUCTION(CheckNull)
9905
9906 virtual CompileType ComputeType() const;
9907 virtual bool RecomputeType();
9908
9909 // CheckNull can implicitly call Dart code (NoSuchMethodError constructor),
9910 // so it needs a deopt ID in optimized and unoptimized code.
9911 virtual bool ComputeCanDeoptimize() const {
9912 return !CompilerState::Current().is_aot();
9913 }
9914 virtual bool CanBecomeDeoptimizationTarget() const { return true; }
9915
9916 virtual Definition* Canonicalize(FlowGraph* flow_graph);
9917
9918 virtual bool AttributesEqual(const Instruction& other) const;
9919
9920 static void AddMetadataForRuntimeCall(CheckNullInstr* check_null,
9921 FlowGraphCompiler* compiler);
9922
9923 virtual Value* RedefinedValue() const;
9924
9925 PRINT_OPERANDS_TO_SUPPORT
9926
9927#define FIELD_LIST(F) \
9928 F(const TokenPosition, token_pos_) \
9929 F(const String&, function_name_) \
9930 F(const ExceptionType, exception_type_)
9931
9932 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckNullInstr,
9933 TemplateDefinition,
9934 FIELD_LIST)
9935#undef FIELD_LIST
9936
9937 private:
9938 DISALLOW_COPY_AND_ASSIGN(CheckNullInstr);
9939};
9940
9941class CheckClassIdInstr : public TemplateInstruction<1, NoThrow> {
9942 public:
9943 CheckClassIdInstr(Value* value, CidRangeValue cids, intptr_t deopt_id)
9944 : TemplateInstruction(deopt_id), cids_(cids) {
9945 SetInputAt(i: 0, value);
9946 }
9947
9948 Value* value() const { return inputs_[0]; }
9949 const CidRangeValue& cids() const { return cids_; }
9950
9951 DECLARE_INSTRUCTION(CheckClassId)
9952
9953 virtual bool ComputeCanDeoptimize() const { return true; }
9954
9955 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
9956
9957 virtual bool AllowsCSE() const { return true; }
9958 virtual bool HasUnknownSideEffects() const { return false; }
9959
9960 virtual bool AttributesEqual(const Instruction& other) const {
9961 return other.Cast<CheckClassIdInstr>()->cids().Equals(other: cids_);
9962 }
9963
9964 PRINT_OPERANDS_TO_SUPPORT
9965
9966#define FIELD_LIST(F) F(CidRangeValue, cids_)
9967
9968 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassIdInstr,
9969 TemplateInstruction,
9970 FIELD_LIST)
9971#undef FIELD_LIST
9972
9973 private:
9974 bool Contains(intptr_t cid) const;
9975
9976 DISALLOW_COPY_AND_ASSIGN(CheckClassIdInstr);
9977};
9978
9979// Base class for speculative [CheckArrayBoundInstr] and
9980// non-speculative [GenericCheckBoundInstr] bounds checking.
9981class CheckBoundBase : public TemplateDefinition<2, NoThrow, Pure> {
9982 public:
9983 CheckBoundBase(Value* length, Value* index, intptr_t deopt_id)
9984 : TemplateDefinition(deopt_id) {
9985 SetInputAt(i: kLengthPos, value: length);
9986 SetInputAt(i: kIndexPos, value: index);
9987 }
9988
9989 Value* length() const { return inputs_[kLengthPos]; }
9990 Value* index() const { return inputs_[kIndexPos]; }
9991
9992 virtual Definition* Canonicalize(FlowGraph* flow_graph);
9993
9994 virtual CheckBoundBase* AsCheckBoundBase() { return this; }
9995 virtual const CheckBoundBase* AsCheckBoundBase() const { return this; }
9996 virtual Value* RedefinedValue() const;
9997
9998 // Returns true if the bounds check can be eliminated without
9999 // changing the semantics (viz. 0 <= index < length).
10000 bool IsRedundant(bool use_loops = false);
10001
10002 // Give a name to the location/input indices.
10003 enum { kLengthPos = 0, kIndexPos = 1 };
10004
10005 DECLARE_EMPTY_SERIALIZATION(CheckBoundBase, TemplateDefinition)
10006
10007 private:
10008 DISALLOW_COPY_AND_ASSIGN(CheckBoundBase);
10009};
10010
10011// Performs an array bounds check, where
10012// safe_index := CheckArrayBound(length, index)
10013// returns the "safe" index when
10014// 0 <= index < length
10015// or otherwise deoptimizes (viz. speculative).
10016class CheckArrayBoundInstr : public CheckBoundBase {
10017 public:
10018 CheckArrayBoundInstr(Value* length, Value* index, intptr_t deopt_id)
10019 : CheckBoundBase(length, index, deopt_id), generalized_(false) {}
10020
10021 DECLARE_INSTRUCTION(CheckArrayBound)
10022
10023 virtual CompileType ComputeType() const;
10024 virtual bool RecomputeType();
10025
10026 virtual bool ComputeCanDeoptimize() const { return true; }
10027
10028 void mark_generalized() { generalized_ = true; }
10029
10030 // Returns the length offset for array and string types.
10031 static intptr_t LengthOffsetFor(intptr_t class_id);
10032
10033 static bool IsFixedLengthArrayType(intptr_t class_id);
10034
10035 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10036
10037#define FIELD_LIST(F) F(bool, generalized_)
10038
10039 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckArrayBoundInstr,
10040 CheckBoundBase,
10041 FIELD_LIST)
10042#undef FIELD_LIST
10043
10044 private:
10045 DISALLOW_COPY_AND_ASSIGN(CheckArrayBoundInstr);
10046};
10047
10048// Performs an array bounds check, where
10049// safe_index := GenericCheckBound(length, index)
10050// returns the "safe" index when
10051// 0 <= index < length
10052// or otherwise throws an out-of-bounds exception (viz. non-speculative).
10053class GenericCheckBoundInstr : public CheckBoundBase {
10054 public:
10055 // We prefer to have unboxed inputs on 64-bit where values can fit into a
10056 // register.
10057 static bool UseUnboxedRepresentation() {
10058 return compiler::target::kWordSize == 8;
10059 }
10060
10061 GenericCheckBoundInstr(Value* length, Value* index, intptr_t deopt_id)
10062 : CheckBoundBase(length, index, deopt_id) {}
10063
10064 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10065
10066 DECLARE_INSTRUCTION(GenericCheckBound)
10067
10068 virtual CompileType ComputeType() const;
10069 virtual bool RecomputeType();
10070
10071 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
10072
10073 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
10074 return kNotSpeculative;
10075 }
10076
10077 virtual Representation representation() const {
10078 return UseUnboxedRepresentation() ? kUnboxedInt64 : kTagged;
10079 }
10080
10081 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10082 ASSERT(idx == kIndexPos || idx == kLengthPos);
10083 return UseUnboxedRepresentation() ? kUnboxedInt64 : kTagged;
10084 }
10085
10086 // GenericCheckBound can implicitly call Dart code (RangeError or
10087 // ArgumentError constructor), so it can lazily deopt.
10088 virtual bool ComputeCanDeoptimize() const {
10089 return !CompilerState::Current().is_aot();
10090 }
10091
10092 virtual bool MayThrow() const { return true; }
10093
10094 virtual bool UseSharedSlowPathStub(bool is_optimizing) const {
10095 return SlowPathSharingSupported(is_optimizing);
10096 }
10097
10098 DECLARE_EMPTY_SERIALIZATION(GenericCheckBoundInstr, CheckBoundBase)
10099
10100 private:
10101 DISALLOW_COPY_AND_ASSIGN(GenericCheckBoundInstr);
10102};
10103
10104class CheckWritableInstr : public TemplateDefinition<1, Throws, Pure> {
10105 public:
10106 CheckWritableInstr(Value* array,
10107 intptr_t deopt_id,
10108 const InstructionSource& source)
10109 : TemplateDefinition(source, deopt_id) {
10110 SetInputAt(i: 0, value: array);
10111 }
10112
10113 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10114
10115 DECLARE_INSTRUCTION(CheckWritable)
10116
10117 Value* value() const { return inputs_[0]; }
10118
10119 virtual Definition* Canonicalize(FlowGraph* flow_graph);
10120
10121 virtual Value* RedefinedValue() const;
10122
10123 virtual bool ComputeCanDeoptimize() const { return false; }
10124
10125 DECLARE_EMPTY_SERIALIZATION(CheckWritableInstr, TemplateDefinition)
10126
10127 private:
10128 DISALLOW_COPY_AND_ASSIGN(CheckWritableInstr);
10129};
10130
10131// Instruction evaluates the given comparison and deoptimizes if it evaluates
10132// to false.
10133class CheckConditionInstr : public Instruction {
10134 public:
10135 CheckConditionInstr(ComparisonInstr* comparison, intptr_t deopt_id)
10136 : Instruction(deopt_id), comparison_(comparison) {
10137 ASSERT(comparison->ArgumentCount() == 0);
10138 ASSERT(comparison->env() == nullptr);
10139 for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
10140 comparison->InputAt(i)->set_instruction(this);
10141 }
10142 }
10143
10144 ComparisonInstr* comparison() const { return comparison_; }
10145
10146 DECLARE_INSTRUCTION(CheckCondition)
10147
10148 virtual bool ComputeCanDeoptimize() const { return true; }
10149
10150 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
10151
10152 virtual bool AllowsCSE() const { return true; }
10153 virtual bool HasUnknownSideEffects() const { return false; }
10154
10155 virtual bool AttributesEqual(const Instruction& other) const {
10156 return other.AsCheckCondition()->comparison()->AttributesEqual(
10157 other: *comparison());
10158 }
10159
10160 virtual intptr_t InputCount() const { return comparison()->InputCount(); }
10161 virtual Value* InputAt(intptr_t i) const { return comparison()->InputAt(i); }
10162
10163 virtual bool MayThrow() const { return false; }
10164
10165 PRINT_OPERANDS_TO_SUPPORT
10166
10167#define FIELD_LIST(F) F(ComparisonInstr*, comparison_)
10168
10169 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckConditionInstr,
10170 Instruction,
10171 FIELD_LIST)
10172#undef FIELD_LIST
10173 DECLARE_EXTRA_SERIALIZATION
10174
10175 private:
10176 virtual void RawSetInputAt(intptr_t i, Value* value) {
10177 comparison()->RawSetInputAt(i, value);
10178 }
10179
10180 DISALLOW_COPY_AND_ASSIGN(CheckConditionInstr);
10181};
10182
10183class IntConverterInstr : public TemplateDefinition<1, NoThrow, Pure> {
10184 public:
10185 IntConverterInstr(Representation from,
10186 Representation to,
10187 Value* value,
10188 intptr_t deopt_id)
10189 : TemplateDefinition(deopt_id),
10190 from_representation_(from),
10191 to_representation_(to),
10192 is_truncating_(to == kUnboxedUint32) {
10193 ASSERT(from != to);
10194 ASSERT(from == kUnboxedInt64 || from == kUnboxedUint32 ||
10195 from == kUnboxedInt32 || from == kUntagged);
10196 ASSERT(to == kUnboxedInt64 || to == kUnboxedUint32 || to == kUnboxedInt32 ||
10197 to == kUntagged);
10198 ASSERT(from != kUntagged ||
10199 (to == kUnboxedIntPtr || to == kUnboxedFfiIntPtr));
10200 ASSERT(to != kUntagged ||
10201 (from == kUnboxedIntPtr || from == kUnboxedFfiIntPtr));
10202 SetInputAt(i: 0, value);
10203 }
10204
10205 Value* value() const { return inputs_[0]; }
10206
10207 Representation from() const { return from_representation_; }
10208 Representation to() const { return to_representation_; }
10209 bool is_truncating() const { return is_truncating_; }
10210
10211 void mark_truncating() { is_truncating_ = true; }
10212
10213 Definition* Canonicalize(FlowGraph* flow_graph);
10214
10215 virtual bool ComputeCanDeoptimize() const;
10216
10217 virtual Representation representation() const { return to(); }
10218
10219 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10220 ASSERT(idx == 0);
10221 return from();
10222 }
10223
10224 virtual bool AttributesEqual(const Instruction& other) const {
10225 ASSERT(other.IsIntConverter());
10226 auto const converter = other.AsIntConverter();
10227 return (converter->from() == from()) && (converter->to() == to()) &&
10228 (converter->is_truncating() == is_truncating());
10229 }
10230
10231 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
10232
10233 virtual void InferRange(RangeAnalysis* analysis, Range* range);
10234
10235 virtual CompileType ComputeType() const {
10236 // TODO(vegorov) use range information to improve type.
10237 return CompileType::Int();
10238 }
10239
10240 DECLARE_INSTRUCTION(IntConverter);
10241
10242 PRINT_OPERANDS_TO_SUPPORT
10243
10244#define FIELD_LIST(F) \
10245 F(const Representation, from_representation_) \
10246 F(const Representation, to_representation_) \
10247 F(bool, is_truncating_)
10248
10249 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(IntConverterInstr,
10250 TemplateDefinition,
10251 FIELD_LIST)
10252#undef FIELD_LIST
10253
10254 private:
10255 DISALLOW_COPY_AND_ASSIGN(IntConverterInstr);
10256};
10257
10258// Moves a floating-point value between CPU and FPU registers. Used to implement
10259// "softfp" calling conventions, where FPU arguments/return values are passed in
10260// normal CPU registers.
10261class BitCastInstr : public TemplateDefinition<1, NoThrow, Pure> {
10262 public:
10263 BitCastInstr(Representation from, Representation to, Value* value)
10264 : TemplateDefinition(DeoptId::kNone),
10265 from_representation_(from),
10266 to_representation_(to) {
10267 ASSERT(from != to);
10268 ASSERT((to == kUnboxedInt32 && from == kUnboxedFloat) ||
10269 (to == kUnboxedFloat && from == kUnboxedInt32) ||
10270 (to == kUnboxedInt64 && from == kUnboxedDouble) ||
10271 (to == kUnboxedDouble && from == kUnboxedInt64));
10272 SetInputAt(i: 0, value);
10273 }
10274
10275 Value* value() const { return inputs_[0]; }
10276
10277 Representation from() const { return from_representation_; }
10278 Representation to() const { return to_representation_; }
10279
10280 virtual bool ComputeCanDeoptimize() const { return false; }
10281
10282 virtual Representation representation() const { return to(); }
10283
10284 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10285 ASSERT(idx == 0);
10286 return from();
10287 }
10288
10289 virtual bool AttributesEqual(const Instruction& other) const {
10290 ASSERT(other.IsBitCast());
10291 auto const converter = other.AsBitCast();
10292 return converter->from() == from() && converter->to() == to();
10293 }
10294
10295 virtual CompileType ComputeType() const { return CompileType::Dynamic(); }
10296
10297 DECLARE_INSTRUCTION(BitCast);
10298
10299 PRINT_OPERANDS_TO_SUPPORT
10300
10301#define FIELD_LIST(F) \
10302 F(const Representation, from_representation_) \
10303 F(const Representation, to_representation_)
10304
10305 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BitCastInstr,
10306 TemplateDefinition,
10307 FIELD_LIST)
10308#undef FIELD_LIST
10309
10310 private:
10311 DISALLOW_COPY_AND_ASSIGN(BitCastInstr);
10312};
10313
10314class LoadThreadInstr : public TemplateDefinition<0, NoThrow, Pure> {
10315 public:
10316 LoadThreadInstr() : TemplateDefinition(DeoptId::kNone) {}
10317
10318 virtual bool ComputeCanDeoptimize() const { return false; }
10319
10320 virtual Representation representation() const { return kUntagged; }
10321
10322 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10323 UNREACHABLE();
10324 }
10325
10326 virtual CompileType ComputeType() const { return CompileType::Int(); }
10327
10328 // CSE is allowed. The thread should always be the same value.
10329 virtual bool AttributesEqual(const Instruction& other) const {
10330 ASSERT(other.IsLoadThread());
10331 return true;
10332 }
10333
10334 DECLARE_INSTRUCTION(LoadThread);
10335
10336 DECLARE_EMPTY_SERIALIZATION(LoadThreadInstr, TemplateDefinition)
10337
10338 private:
10339 DISALLOW_COPY_AND_ASSIGN(LoadThreadInstr);
10340};
10341
10342// SimdOpInstr
10343//
10344// All SIMD intrinsics and recognized methods are represented via instances
10345// of SimdOpInstr, a particular type of SimdOp is selected by SimdOpInstr::Kind.
10346//
10347// Defines below are used to construct SIMD_OP_LIST - a list of all SIMD
10348// operations. SIMD_OP_LIST contains information such as arity, input types and
10349// output type for each SIMD op and is used to derive things like input
10350// and output representations, type of return value, etc.
10351//
10352// Lists of SIMD ops are defined using macro M, OP and BINARY_OP which are
10353// expected to have the following signature:
10354//
10355// (Arity, HasMask, Name, (In_0, ..., In_Arity), Out)
10356//
10357// where:
10358//
10359// HasMask is either _ or MASK and determines if operation has an
10360// constant mask attribute
10361// In_0, ..., In_Arity are input types
10362// Out is output type
10363//
10364
10365// A binary SIMD op with the given name that has signature T x T -> T.
10366#define SIMD_BINARY_OP(M, T, Name) M(2, _, T##Name, (T, T), T)
10367
10368// List of SIMD_BINARY_OPs common for Float32x4 or Float64x2.
10369// Note: M for recognized methods and OP for operators.
10370#define SIMD_BINARY_FLOAT_OP_LIST(M, OP, T) \
10371 SIMD_BINARY_OP(OP, T, Add) \
10372 SIMD_BINARY_OP(OP, T, Sub) \
10373 SIMD_BINARY_OP(OP, T, Mul) \
10374 SIMD_BINARY_OP(OP, T, Div) \
10375 SIMD_BINARY_OP(M, T, Min) \
10376 SIMD_BINARY_OP(M, T, Max)
10377
10378// List of SIMD_BINARY_OP for Int32x4.
10379// Note: M for recognized methods and OP for operators.
10380#define SIMD_BINARY_INTEGER_OP_LIST(M, OP, T) \
10381 SIMD_BINARY_OP(OP, T, Add) \
10382 SIMD_BINARY_OP(OP, T, Sub) \
10383 SIMD_BINARY_OP(OP, T, BitAnd) \
10384 SIMD_BINARY_OP(OP, T, BitOr) \
10385 SIMD_BINARY_OP(OP, T, BitXor)
10386
10387// Given a signature of a given SIMD op construct its per component variations.
10388#define SIMD_PER_COMPONENT_XYZW(M, Arity, Name, Inputs, Output) \
10389 M(Arity, _, Name##X, Inputs, Output) \
10390 M(Arity, _, Name##Y, Inputs, Output) \
10391 M(Arity, _, Name##Z, Inputs, Output) \
10392 M(Arity, _, Name##W, Inputs, Output)
10393
10394// Define conversion between two SIMD types.
10395#define SIMD_CONVERSION(M, FromType, ToType) \
10396 M(1, _, FromType##To##ToType, (FromType), ToType)
10397
10398// List of all recognized SIMD operations.
10399// Note: except for operations that map to operators (Add, Mul, Sub, Div,
10400// BitXor, BitOr) all other operations must match names used by
10401// MethodRecognizer. This allows to autogenerate conversion from
10402// MethodRecognizer::Kind into SimdOpInstr::Kind (see KindForMethod helper).
10403// Note: M is for those SimdOp that are recognized methods and BINARY_OP
10404// is for operators.
10405#define SIMD_OP_LIST(M, BINARY_OP) \
10406 SIMD_BINARY_FLOAT_OP_LIST(M, BINARY_OP, Float32x4) \
10407 SIMD_BINARY_FLOAT_OP_LIST(M, BINARY_OP, Float64x2) \
10408 SIMD_BINARY_INTEGER_OP_LIST(M, BINARY_OP, Int32x4) \
10409 SIMD_PER_COMPONENT_XYZW(M, 1, Float32x4Get, (Float32x4), Double) \
10410 SIMD_PER_COMPONENT_XYZW(M, 2, Float32x4With, (Double, Float32x4), Float32x4) \
10411 SIMD_PER_COMPONENT_XYZW(M, 1, Int32x4GetFlag, (Int32x4), Bool) \
10412 SIMD_PER_COMPONENT_XYZW(M, 2, Int32x4WithFlag, (Int32x4, Bool), Int32x4) \
10413 M(1, MASK, Float32x4Shuffle, (Float32x4), Float32x4) \
10414 M(1, MASK, Int32x4Shuffle, (Int32x4), Int32x4) \
10415 M(2, MASK, Float32x4ShuffleMix, (Float32x4, Float32x4), Float32x4) \
10416 M(2, MASK, Int32x4ShuffleMix, (Int32x4, Int32x4), Int32x4) \
10417 M(2, _, Float32x4Equal, (Float32x4, Float32x4), Int32x4) \
10418 M(2, _, Float32x4GreaterThan, (Float32x4, Float32x4), Int32x4) \
10419 M(2, _, Float32x4GreaterThanOrEqual, (Float32x4, Float32x4), Int32x4) \
10420 M(2, _, Float32x4LessThan, (Float32x4, Float32x4), Int32x4) \
10421 M(2, _, Float32x4LessThanOrEqual, (Float32x4, Float32x4), Int32x4) \
10422 M(2, _, Float32x4NotEqual, (Float32x4, Float32x4), Int32x4) \
10423 M(4, _, Int32x4FromInts, (Int32, Int32, Int32, Int32), Int32x4) \
10424 M(4, _, Int32x4FromBools, (Bool, Bool, Bool, Bool), Int32x4) \
10425 M(4, _, Float32x4FromDoubles, (Double, Double, Double, Double), Float32x4) \
10426 M(2, _, Float64x2FromDoubles, (Double, Double), Float64x2) \
10427 M(0, _, Float32x4Zero, (), Float32x4) \
10428 M(0, _, Float64x2Zero, (), Float64x2) \
10429 M(1, _, Float32x4Splat, (Double), Float32x4) \
10430 M(1, _, Float64x2Splat, (Double), Float64x2) \
10431 M(1, _, Int32x4GetSignMask, (Int32x4), Int8) \
10432 M(1, _, Float32x4GetSignMask, (Float32x4), Int8) \
10433 M(1, _, Float64x2GetSignMask, (Float64x2), Int8) \
10434 M(2, _, Float32x4Scale, (Double, Float32x4), Float32x4) \
10435 M(2, _, Float64x2Scale, (Float64x2, Double), Float64x2) \
10436 M(1, _, Float32x4Sqrt, (Float32x4), Float32x4) \
10437 M(1, _, Float64x2Sqrt, (Float64x2), Float64x2) \
10438 M(1, _, Float32x4Reciprocal, (Float32x4), Float32x4) \
10439 M(1, _, Float32x4ReciprocalSqrt, (Float32x4), Float32x4) \
10440 M(1, _, Float32x4Negate, (Float32x4), Float32x4) \
10441 M(1, _, Float64x2Negate, (Float64x2), Float64x2) \
10442 M(1, _, Float32x4Abs, (Float32x4), Float32x4) \
10443 M(1, _, Float64x2Abs, (Float64x2), Float64x2) \
10444 M(3, _, Float32x4Clamp, (Float32x4, Float32x4, Float32x4), Float32x4) \
10445 M(3, _, Float64x2Clamp, (Float64x2, Float64x2, Float64x2), Float64x2) \
10446 M(1, _, Float64x2GetX, (Float64x2), Double) \
10447 M(1, _, Float64x2GetY, (Float64x2), Double) \
10448 M(2, _, Float64x2WithX, (Float64x2, Double), Float64x2) \
10449 M(2, _, Float64x2WithY, (Float64x2, Double), Float64x2) \
10450 M(3, _, Int32x4Select, (Int32x4, Float32x4, Float32x4), Float32x4) \
10451 SIMD_CONVERSION(M, Float32x4, Int32x4) \
10452 SIMD_CONVERSION(M, Int32x4, Float32x4) \
10453 SIMD_CONVERSION(M, Float32x4, Float64x2) \
10454 SIMD_CONVERSION(M, Float64x2, Float32x4)
10455
10456class SimdOpInstr : public Definition {
10457 public:
10458 enum Kind {
10459#define DECLARE_ENUM(Arity, Mask, Name, ...) k##Name,
10460 SIMD_OP_LIST(DECLARE_ENUM, DECLARE_ENUM)
10461#undef DECLARE_ENUM
10462 kIllegalSimdOp,
10463 };
10464
10465 // Create SimdOp from the arguments of the given call and the given receiver.
10466 static SimdOpInstr* CreateFromCall(Zone* zone,
10467 MethodRecognizer::Kind kind,
10468 Definition* receiver,
10469 Instruction* call,
10470 intptr_t mask = 0);
10471
10472 // Create SimdOp from the arguments of the given factory call.
10473 static SimdOpInstr* CreateFromFactoryCall(Zone* zone,
10474 MethodRecognizer::Kind kind,
10475 Instruction* call);
10476
10477 // Create a binary SimdOp instr.
10478 static SimdOpInstr* Create(Kind kind,
10479 Value* left,
10480 Value* right,
10481 intptr_t deopt_id) {
10482 return new SimdOpInstr(kind, left, right, deopt_id);
10483 }
10484
10485 // Create a binary SimdOp instr.
10486 static SimdOpInstr* Create(MethodRecognizer::Kind kind,
10487 Value* left,
10488 Value* right,
10489 intptr_t deopt_id) {
10490 return new SimdOpInstr(KindForMethod(method_kind: kind), left, right, deopt_id);
10491 }
10492
10493 // Create a unary SimdOp.
10494 static SimdOpInstr* Create(MethodRecognizer::Kind kind,
10495 Value* left,
10496 intptr_t deopt_id) {
10497 return new SimdOpInstr(KindForMethod(method_kind: kind), left, deopt_id);
10498 }
10499
10500 static Kind KindForOperator(MethodRecognizer::Kind kind);
10501
10502 static Kind KindForMethod(MethodRecognizer::Kind method_kind);
10503
10504 // Convert a combination of SIMD cid and an arithmetic token into Kind, e.g.
10505 // Float32x4 and Token::kADD becomes Float32x4Add.
10506 static Kind KindForOperator(intptr_t cid, Token::Kind op);
10507
10508 virtual intptr_t InputCount() const;
10509 virtual Value* InputAt(intptr_t i) const {
10510 ASSERT(0 <= i && i < InputCount());
10511 return inputs_[i];
10512 }
10513
10514 Kind kind() const { return kind_; }
10515 intptr_t mask() const {
10516 ASSERT(HasMask());
10517 return mask_;
10518 }
10519
10520 virtual Representation representation() const;
10521 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
10522
10523 virtual CompileType ComputeType() const;
10524
10525 virtual bool MayThrow() const { return false; }
10526 virtual bool ComputeCanDeoptimize() const { return false; }
10527
10528 virtual intptr_t DeoptimizationTarget() const {
10529 // Direct access since this instruction cannot deoptimize, and the deopt-id
10530 // was inherited from another instruction that could deoptimize.
10531 return GetDeoptId();
10532 }
10533
10534 virtual bool HasUnknownSideEffects() const { return false; }
10535 virtual bool AllowsCSE() const { return true; }
10536
10537 virtual bool AttributesEqual(const Instruction& other) const {
10538 auto const other_op = other.AsSimdOp();
10539 return kind() == other_op->kind() &&
10540 (!HasMask() || mask() == other_op->mask());
10541 }
10542
10543 DECLARE_INSTRUCTION(SimdOp)
10544 PRINT_OPERANDS_TO_SUPPORT
10545
10546#define FIELD_LIST(F) \
10547 F(const Kind, kind_) \
10548 F(intptr_t, mask_)
10549
10550 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(SimdOpInstr, Definition, FIELD_LIST)
10551#undef FIELD_LIST
10552
10553 private:
10554 SimdOpInstr(Kind kind, intptr_t deopt_id)
10555 : Definition(deopt_id), kind_(kind) {}
10556
10557 SimdOpInstr(Kind kind, Value* left, intptr_t deopt_id)
10558 : Definition(deopt_id), kind_(kind) {
10559 SetInputAt(i: 0, value: left);
10560 }
10561
10562 SimdOpInstr(Kind kind, Value* left, Value* right, intptr_t deopt_id)
10563 : Definition(deopt_id), kind_(kind) {
10564 SetInputAt(i: 0, value: left);
10565 SetInputAt(i: 1, value: right);
10566 }
10567
10568 bool HasMask() const;
10569 void set_mask(intptr_t mask) { mask_ = mask; }
10570
10571 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
10572
10573 // We consider SimdOpInstr to be very uncommon so we don't optimize them for
10574 // size. Any instance of SimdOpInstr has enough space to fit any variation.
10575 // TODO(dartbug.com/30949) optimize this for size.
10576 Value* inputs_[4];
10577
10578 DISALLOW_COPY_AND_ASSIGN(SimdOpInstr);
10579};
10580
10581// Generic instruction to call 1-argument stubs specified using [StubId].
10582class Call1ArgStubInstr : public TemplateDefinition<1, Throws> {
10583 public:
10584 enum class StubId {
10585 kCloneSuspendState,
10586 kInitAsync,
10587 kInitAsyncStar,
10588 kInitSyncStar,
10589 kFfiAsyncCallbackSend,
10590 };
10591
10592 Call1ArgStubInstr(const InstructionSource& source,
10593 StubId stub_id,
10594 Value* operand,
10595 intptr_t deopt_id)
10596 : TemplateDefinition(source, deopt_id),
10597 stub_id_(stub_id),
10598 token_pos_(source.token_pos) {
10599 SetInputAt(i: 0, value: operand);
10600 }
10601
10602 Value* operand() const { return inputs_[0]; }
10603 StubId stub_id() const { return stub_id_; }
10604 virtual TokenPosition token_pos() const { return token_pos_; }
10605
10606 virtual bool CanCallDart() const { return true; }
10607 virtual bool ComputeCanDeoptimize() const { return true; }
10608 virtual bool HasUnknownSideEffects() const { return true; }
10609 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
10610 return InputCount();
10611 }
10612
10613 DECLARE_INSTRUCTION(Call1ArgStub);
10614 PRINT_OPERANDS_TO_SUPPORT
10615
10616#define FIELD_LIST(F) \
10617 F(const StubId, stub_id_) \
10618 F(const TokenPosition, token_pos_)
10619
10620 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Call1ArgStubInstr,
10621 TemplateDefinition,
10622 FIELD_LIST)
10623#undef FIELD_LIST
10624
10625 private:
10626 DISALLOW_COPY_AND_ASSIGN(Call1ArgStubInstr);
10627};
10628
10629// Suspends execution using the suspend stub specified using [StubId].
10630class SuspendInstr : public TemplateDefinition<2, Throws> {
10631 public:
10632 enum class StubId {
10633 kAwait,
10634 kAwaitWithTypeCheck,
10635 kYieldAsyncStar,
10636 kSuspendSyncStarAtStart,
10637 kSuspendSyncStarAtYield,
10638 };
10639
10640 SuspendInstr(const InstructionSource& source,
10641 StubId stub_id,
10642 Value* operand,
10643 Value* type_args,
10644 intptr_t deopt_id,
10645 intptr_t resume_deopt_id)
10646 : TemplateDefinition(source, deopt_id),
10647 stub_id_(stub_id),
10648 resume_deopt_id_(resume_deopt_id),
10649 token_pos_(source.token_pos) {
10650 SetInputAt(i: 0, value: operand);
10651 if (has_type_args()) {
10652 SetInputAt(i: 1, value: type_args);
10653 } else {
10654 ASSERT(type_args == nullptr);
10655 }
10656 }
10657
10658 bool has_type_args() const { return stub_id_ == StubId::kAwaitWithTypeCheck; }
10659 virtual intptr_t InputCount() const { return has_type_args() ? 2 : 1; }
10660
10661 Value* operand() const { return inputs_[0]; }
10662 Value* type_args() const {
10663 ASSERT(has_type_args());
10664 return inputs_[1];
10665 }
10666
10667 StubId stub_id() const { return stub_id_; }
10668 intptr_t resume_deopt_id() const { return resume_deopt_id_; }
10669 virtual TokenPosition token_pos() const { return token_pos_; }
10670
10671 virtual bool CanCallDart() const { return true; }
10672 virtual bool ComputeCanDeoptimize() const { return true; }
10673 virtual bool HasUnknownSideEffects() const { return true; }
10674 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
10675 return InputCount();
10676 }
10677
10678 DECLARE_INSTRUCTION(Suspend);
10679 PRINT_OPERANDS_TO_SUPPORT
10680
10681 virtual Definition* Canonicalize(FlowGraph* flow_graph);
10682
10683#define FIELD_LIST(F) \
10684 F(StubId, stub_id_) \
10685 F(const intptr_t, resume_deopt_id_) \
10686 F(const TokenPosition, token_pos_)
10687
10688 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(SuspendInstr,
10689 TemplateDefinition,
10690 FIELD_LIST)
10691#undef FIELD_LIST
10692
10693 private:
10694 DISALLOW_COPY_AND_ASSIGN(SuspendInstr);
10695};
10696
10697#undef DECLARE_INSTRUCTION
10698
10699class Environment : public ZoneAllocated {
10700 public:
10701 // Iterate the non-null values in the innermost level of an environment.
10702 class ShallowIterator : public ValueObject {
10703 public:
10704 explicit ShallowIterator(Environment* environment)
10705 : environment_(environment), index_(0) {}
10706
10707 ShallowIterator(const ShallowIterator& other)
10708 : ValueObject(),
10709 environment_(other.environment_),
10710 index_(other.index_) {}
10711
10712 ShallowIterator& operator=(const ShallowIterator& other) {
10713 environment_ = other.environment_;
10714 index_ = other.index_;
10715 return *this;
10716 }
10717
10718 Environment* environment() const { return environment_; }
10719
10720 void Advance() {
10721 ASSERT(!Done());
10722 ++index_;
10723 }
10724
10725 bool Done() const {
10726 return (environment_ == nullptr) || (index_ >= environment_->Length());
10727 }
10728
10729 Value* CurrentValue() const {
10730 ASSERT(!Done());
10731 ASSERT(environment_->values_[index_] != nullptr);
10732 return environment_->values_[index_];
10733 }
10734
10735 void SetCurrentValue(Value* value) {
10736 ASSERT(!Done());
10737 ASSERT(value != nullptr);
10738 environment_->values_[index_] = value;
10739 }
10740
10741 Location CurrentLocation() const {
10742 ASSERT(!Done());
10743 return environment_->locations_[index_];
10744 }
10745
10746 void SetCurrentLocation(Location loc) {
10747 ASSERT(!Done());
10748 environment_->locations_[index_] = loc;
10749 }
10750
10751 private:
10752 Environment* environment_;
10753 intptr_t index_;
10754 };
10755
10756 // Iterate all non-null values in an environment, including outer
10757 // environments. Note that the iterator skips empty environments.
10758 class DeepIterator : public ValueObject {
10759 public:
10760 explicit DeepIterator(Environment* environment) : iterator_(environment) {
10761 SkipDone();
10762 }
10763
10764 void Advance() {
10765 ASSERT(!Done());
10766 iterator_.Advance();
10767 SkipDone();
10768 }
10769
10770 bool Done() const { return iterator_.environment() == nullptr; }
10771
10772 Value* CurrentValue() const {
10773 ASSERT(!Done());
10774 return iterator_.CurrentValue();
10775 }
10776
10777 void SetCurrentValue(Value* value) {
10778 ASSERT(!Done());
10779 iterator_.SetCurrentValue(value);
10780 }
10781
10782 Location CurrentLocation() const {
10783 ASSERT(!Done());
10784 return iterator_.CurrentLocation();
10785 }
10786
10787 void SetCurrentLocation(Location loc) {
10788 ASSERT(!Done());
10789 iterator_.SetCurrentLocation(loc);
10790 }
10791
10792 private:
10793 void SkipDone() {
10794 while (!Done() && iterator_.Done()) {
10795 iterator_ = ShallowIterator(iterator_.environment()->outer());
10796 }
10797 }
10798
10799 ShallowIterator iterator_;
10800 };
10801
10802 // Construct an environment by constructing uses from an array of definitions.
10803 static Environment* From(Zone* zone,
10804 const GrowableArray<Definition*>& definitions,
10805 intptr_t fixed_parameter_count,
10806 intptr_t lazy_deopt_pruning_count,
10807 const ParsedFunction& parsed_function);
10808
10809 void set_locations(Location* locations) {
10810 ASSERT(locations_ == nullptr);
10811 locations_ = locations;
10812 }
10813
10814 // Get deopt_id associated with this environment.
10815 // Note that only outer environments have deopt id associated with
10816 // them (set by DeepCopyToOuter).
10817 intptr_t GetDeoptId() const {
10818 ASSERT(DeoptIdBits::decode(bitfield_) != DeoptId::kNone);
10819 return DeoptIdBits::decode(value: bitfield_);
10820 }
10821
10822 intptr_t LazyDeoptPruneCount() const {
10823 return LazyDeoptPruningBits::decode(bitfield_);
10824 }
10825
10826 bool LazyDeoptToBeforeDeoptId() const {
10827 return LazyDeoptToBeforeDeoptId::decode(bitfield_);
10828 }
10829
10830 void MarkAsLazyDeoptToBeforeDeoptId() {
10831 bitfield_ = LazyDeoptToBeforeDeoptId::update(value: true, original: bitfield_);
10832 }
10833
10834 // This environment belongs to an optimistically hoisted instruction.
10835 bool IsHoisted() const { return Hoisted::decode(bitfield_); }
10836
10837 void MarkAsHoisted() { bitfield_ = Hoisted::update(value: true, original: bitfield_); }
10838
10839 Environment* GetLazyDeoptEnv(Zone* zone) {
10840 const intptr_t num_args_to_prune = LazyDeoptPruneCount();
10841 if (num_args_to_prune == 0) return this;
10842 return DeepCopy(zone, length: Length() - num_args_to_prune);
10843 }
10844
10845 Environment* outer() const { return outer_; }
10846
10847 Environment* Outermost() {
10848 Environment* result = this;
10849 while (result->outer() != nullptr)
10850 result = result->outer();
10851 return result;
10852 }
10853
10854 Value* ValueAt(intptr_t ix) const { return values_[ix]; }
10855
10856 void PushValue(Value* value);
10857
10858 intptr_t Length() const { return values_.length(); }
10859
10860 Location LocationAt(intptr_t index) const {
10861 ASSERT((index >= 0) && (index < values_.length()));
10862 return locations_[index];
10863 }
10864
10865 // The use index is the index in the flattened environment.
10866 Value* ValueAtUseIndex(intptr_t index) const {
10867 const Environment* env = this;
10868 while (index >= env->Length()) {
10869 ASSERT(env->outer_ != nullptr);
10870 index -= env->Length();
10871 env = env->outer_;
10872 }
10873 return env->ValueAt(ix: index);
10874 }
10875
10876 intptr_t fixed_parameter_count() const { return fixed_parameter_count_; }
10877
10878 intptr_t CountArgsPushed() {
10879 intptr_t count = 0;
10880 for (Environment::DeepIterator it(this); !it.Done(); it.Advance()) {
10881 if (it.CurrentValue()->definition()->IsMoveArgument()) {
10882 count++;
10883 }
10884 }
10885 return count;
10886 }
10887
10888 const Function& function() const { return function_; }
10889
10890 Environment* DeepCopy(Zone* zone) const { return DeepCopy(zone, length: Length()); }
10891
10892 void DeepCopyTo(Zone* zone, Instruction* instr) const;
10893 void DeepCopyToOuter(Zone* zone,
10894 Instruction* instr,
10895 intptr_t outer_deopt_id) const;
10896
10897 void DeepCopyAfterTo(Zone* zone,
10898 Instruction* instr,
10899 intptr_t argc,
10900 Definition* dead,
10901 Definition* result) const;
10902
10903 void PrintTo(BaseTextBuffer* f) const;
10904 const char* ToCString() const;
10905
10906 // Deep copy an environment. The 'length' parameter may be less than the
10907 // environment's length in order to drop values (e.g., passed arguments)
10908 // from the copy.
10909 Environment* DeepCopy(Zone* zone, intptr_t length) const;
10910
10911 void Write(FlowGraphSerializer* s) const;
10912 explicit Environment(FlowGraphDeserializer* d);
10913
10914 private:
10915 friend class ShallowIterator;
10916 friend class compiler::BlockBuilder; // For Environment constructor.
10917
10918 class LazyDeoptPruningBits : public BitField<uintptr_t, uintptr_t, 0, 8> {};
10919 class LazyDeoptToBeforeDeoptId
10920 : public BitField<uintptr_t, bool, LazyDeoptPruningBits::kNextBit, 1> {};
10921 class Hoisted : public BitField<uintptr_t,
10922 bool,
10923 LazyDeoptToBeforeDeoptId::kNextBit,
10924 1> {};
10925 class DeoptIdBits : public BitField<uintptr_t,
10926 intptr_t,
10927 Hoisted::kNextBit,
10928 kBitsPerWord - Hoisted::kNextBit,
10929 /*sign_extend=*/true> {};
10930
10931 Environment(intptr_t length,
10932 intptr_t fixed_parameter_count,
10933 intptr_t lazy_deopt_pruning_count,
10934 const Function& function,
10935 Environment* outer)
10936 : values_(length),
10937 fixed_parameter_count_(fixed_parameter_count),
10938 bitfield_(DeoptIdBits::encode(value: DeoptId::kNone) |
10939 LazyDeoptToBeforeDeoptId::encode(value: false) |
10940 LazyDeoptPruningBits::encode(value: lazy_deopt_pruning_count)),
10941 function_(function),
10942 outer_(outer) {}
10943
10944 void SetDeoptId(intptr_t deopt_id) {
10945 bitfield_ = DeoptIdBits::update(value: deopt_id, original: bitfield_);
10946 }
10947 void SetLazyDeoptPruneCount(intptr_t value) {
10948 bitfield_ = LazyDeoptPruningBits::update(value, original: bitfield_);
10949 }
10950 void SetLazyDeoptToBeforeDeoptId(bool value) {
10951 bitfield_ = LazyDeoptToBeforeDeoptId::update(value, original: bitfield_);
10952 }
10953
10954 GrowableArray<Value*> values_;
10955 Location* locations_ = nullptr;
10956 const intptr_t fixed_parameter_count_;
10957 // Deoptimization id associated with this environment. Only set for
10958 // outer environments.
10959 uintptr_t bitfield_;
10960 const Function& function_;
10961 Environment* outer_;
10962
10963 DISALLOW_COPY_AND_ASSIGN(Environment);
10964};
10965
10966class InstructionVisitor : public ValueObject {
10967 public:
10968 InstructionVisitor() {}
10969 virtual ~InstructionVisitor() {}
10970
10971// Visit functions for instruction classes, with an empty default
10972// implementation.
10973#define DECLARE_VISIT_INSTRUCTION(ShortName, Attrs) \
10974 virtual void Visit##ShortName(ShortName##Instr* instr) {}
10975
10976 FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
10977
10978#undef DECLARE_VISIT_INSTRUCTION
10979
10980 private:
10981 DISALLOW_COPY_AND_ASSIGN(InstructionVisitor);
10982};
10983
10984// Visitor base class to visit each instruction and computation in a flow
10985// graph as defined by a reversed list of basic blocks.
10986class FlowGraphVisitor : public InstructionVisitor {
10987 public:
10988 explicit FlowGraphVisitor(const GrowableArray<BlockEntryInstr*>& block_order)
10989 : current_iterator_(nullptr), block_order_(&block_order) {}
10990 virtual ~FlowGraphVisitor() {}
10991
10992 ForwardInstructionIterator* current_iterator() const {
10993 return current_iterator_;
10994 }
10995
10996 // Visit each block in the block order, and for each block its
10997 // instructions in order from the block entry to exit.
10998 virtual void VisitBlocks();
10999
11000 protected:
11001 void set_block_order(const GrowableArray<BlockEntryInstr*>& block_order) {
11002 block_order_ = &block_order;
11003 }
11004
11005 ForwardInstructionIterator* current_iterator_;
11006
11007 private:
11008 const GrowableArray<BlockEntryInstr*>* block_order_;
11009 DISALLOW_COPY_AND_ASSIGN(FlowGraphVisitor);
11010};
11011
11012// Helper macros for platform ports.
11013#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name) \
11014 LocationSummary* Name::MakeLocationSummary(Zone* zone, bool opt) const { \
11015 UNIMPLEMENTED(); \
11016 return nullptr; \
11017 } \
11018 void Name::EmitNativeCode(FlowGraphCompiler* compiler) { \
11019 UNIMPLEMENTED(); \
11020 }
11021
11022template <intptr_t kExtraInputs>
11023StringPtr TemplateDartCall<kExtraInputs>::Selector() {
11024 if (auto static_call = this->AsStaticCall()) {
11025 return static_call->function().name();
11026 } else if (auto instance_call = this->AsInstanceCall()) {
11027 return instance_call->function_name().ptr();
11028 } else {
11029 UNREACHABLE();
11030 }
11031}
11032
11033inline bool Value::CanBe(const Object& value) {
11034 ConstantInstr* constant = definition()->AsConstant();
11035 return (constant == nullptr) || constant->value().ptr() == value.ptr();
11036}
11037#undef DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS
11038#undef DECLARE_CUSTOM_SERIALIZATION
11039#undef DECLARE_EMPTY_SERIALIZATION
11040
11041} // namespace dart
11042
11043#endif // RUNTIME_VM_COMPILER_BACKEND_IL_H_
11044

source code of dart_sdk/runtime/vm/compiler/backend/il.h