for (int trip = 0; trip <= 1; trip++) {
if (st->is_Store()) {
Node* st_adr = st->in(MemNode::Address);
if (!phase->eqv(st_adr, ld_adr)) {
intptr_t st_off = 0;
AllocateNode* alloc = AllocateNode::Ideal_allocation(st_adr, phase, st_off);
if (alloc == NULL) return NULL;
if (alloc != ld_alloc) return NULL;
if (ld_off != st_off) return NULL;
}
if (store_Opcode() != st->Opcode())
return NULL;
return st->in(MemNode::ValueIn);
}
if (st->is_Proj() && st->in(0)->is_Allocate() &&
(st->in(0) == ld_alloc) &&
(ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
return phase->zerocon(memory_type());
}
if (st->is_Proj() && st->in(0)->is_Initialize()) {
InitializeNode* init = st->in(0)->as_Initialize();
AllocateNode* alloc = init->allocation();
if ((alloc != NULL) && (alloc == ld_alloc)) {
st = init->find_captured_store(ld_off, memory_size(), phase);
if (st != NULL)
continue; // take one more trip around
}
}
if (this->is_Load() && ld_adr->is_AddP() &&
(tp != NULL) && tp->is_ptr_to_boxed_value()) {
intptr_t ignore = 0;
Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
if (base != NULL && base->is_Proj() &&
base->as_Proj()->_con == TypeFunc::Parms &&
base->in(0)->is_CallStaticJava() &&
base->in(0)->as_CallStaticJava()->is_boxing_method()) {
return base->in(0)->in(TypeFunc::Parms);
}
}
break;
}
return NULL;
}
bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
in(Address)->is_AddP() ) {
const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
if( t_oop != NULL &&
(t_oop->is_ptr_to_boxed_value() ||
t_oop->is_known_instance_field()) &&
t_oop->offset() != Type::OffsetBot &&
t_oop->offset() != Type::OffsetTop) {
return true;
}
}
return false;
}
Node *LoadNode::Identity( PhaseTransform *phase ) {
Node* mem = in(Memory);
Node* value = can_see_stored_value(mem, phase);
if( value ) {
if (memory_size() < BytesPerInt) {
if (!phase->type(value)->higher_equal(phase->type(this)))
return this;
}
return value;
}
Node *region = mem->in(0);
if (is_instance_field_load_with_local_phi(region)) {
const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr();
int this_index = phase->C->get_alias_index(addr_t);
int this_offset = addr_t->offset();
int this_iid = addr_t->instance_id();
if (!addr_t->is_known_instance() &&
addr_t->is_ptr_to_boxed_value()) {
intptr_t ignore = 0;
Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
if (base == NULL) {
return this;
}
this_iid = base->_idx;
}
const Type* this_type = bottom_type();
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
Node* phi = region->fast_out(i);
if (phi->is_Phi() && phi != mem &&
phi->as_Phi()->is_same_inst_field(this_type, (int)mem->_idx, this_iid, this_index, this_offset)) {
return phi;
}
}
}
return this;
}
Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
assert(phase->C->eliminate_boxing(), "sanity");
intptr_t ignore = 0;
Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
if ((base == NULL) || base->is_Phi()) {
return NULL;
} else if (base->is_Load() ||
base->is_DecodeN() && base->in(1)->is_Load()) {
if (base->is_DecodeN()) {
base = base->in(1);
}
if (!base->in(Address)->is_AddP()) {
return NULL; // Complex address
}
AddPNode* address = base->in(Address)->as_AddP();
Node* cache_base = address->in(AddPNode::Base);
if ((cache_base != NULL) && cache_base->is_DecodeN()) {
cache_base = cache_base->in(1);
}
if ((cache_base != NULL) && cache_base->is_Con()) {
const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr();
if ((base_type != NULL) && base_type->is_autobox_cache()) {
Node* elements[4];
int shift = exact_log2(type2aelembytes(T_OBJECT));
int count = address->unpack_offsets(elements, ARRAY_SIZE(elements));
if ((count > 0) && elements[0]->is_Con() &&
((count == 1) ||
(count == 2) && elements[1]->Opcode() == Op_LShiftX &&
elements[1]->in(2) == phase->intcon(shift))) {
ciObjArray* array = base_type->const_oop()->as_obj_array();
ciInstance* box = array->obj_at(0)->as_instance();
ciInstanceKlass* ik = box->klass()->as_instance_klass();
assert(ik->is_box_klass(), "sanity");
assert(ik->nof_nonstatic_fields() == 1, "change following code");
if (ik->nof_nonstatic_fields() == 1) {
ciConstant c = box->field_value(ik->nonstatic_field_at(0));
BasicType bt = c.basic_type();
assert(bt == T_BOOLEAN || bt == T_CHAR ||
bt == T_BYTE || bt == T_SHORT ||
bt == T_INT || bt == T_LONG, err_msg_res("wrong type = %s", type2name(bt)));
jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int();
if (cache_low != (int)cache_low) {
return NULL; // should not happen since cache is array indexed by value
}
jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift);
if (offset != (int)offset) {
return NULL; // should not happen since cache is array indexed by value
}
Node* result = elements[0];
for (int i = 1; i < count; i++) {
result = phase->transform(new (phase->C) AddXNode(result, elements[i]));
}
result = phase->transform(new (phase->C) AddXNode(result, phase->MakeConX(-(int)offset)));
if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
result = new (phase->C) RShiftXNode(result->in(1), phase->intcon(0));
} else if (result->is_Add() && result->in(2)->is_Con() &&
result->in(1)->Opcode() == Op_LShiftX &&
result->in(1)->in(2) == phase->intcon(shift)) {
Node* add_con = new (phase->C) RShiftXNode(result->in(2), phase->intcon(shift));
result = new (phase->C) AddXNode(result->in(1)->in(1), phase->transform(add_con));
} else {
result = new (phase->C) RShiftXNode(result, phase->intcon(shift));
}
#ifdef _LP64
if (bt != T_LONG) {
result = new (phase->C) ConvL2INode(phase->transform(result));
}
#else
if (bt == T_LONG) {
result = new (phase->C) ConvI2LNode(phase->transform(result));
}
#endif
switch(this->Opcode()) {
case Op_LoadUB:
result = new (phase->C) AndINode(phase->transform(result), phase->intcon(0xFF));
break;
case Op_LoadUS:
result = new (phase->C) AndINode(phase->transform(result), phase->intcon(0xFFFF));
break;
}
return result;
}
}
}
}
}
return NULL;
}
static bool stable_phi(PhiNode* phi, PhaseGVN *phase) {
Node* region = phi->in(0);
if (region == NULL) {
return false; // Wait stable graph
}
uint cnt = phi->req();
for (uint i = 1; i < cnt; i++) {
Node* rc = region->in(i);
if (rc == NULL || phase->type(rc) == Type::TOP)
return false; // Wait stable graph
Node* in = phi->in(i);
if (in == NULL || phase->type(in) == Type::TOP)
return false; // Wait stable graph
}
return true;
}
Node *LoadNode::split_through_phi(PhaseGVN *phase) {
Node* mem = in(Memory);
Node* address = in(Address);
const TypeOopPtr *t_oop = phase->type(address)->isa_oopptr();
assert((t_oop != NULL) &&
(t_oop->is_known_instance_field() ||
t_oop->is_ptr_to_boxed_value()), "invalide conditions");
Compile* C = phase->C;
intptr_t ignore = 0;
Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore);
bool base_is_phi = (base != NULL) && base->is_Phi();
bool load_boxed_values = t_oop->is_ptr_to_boxed_value() && C->aggressive_unboxing() &&
(base != NULL) && (base == address->in(AddPNode::Base)) &&
phase->type(base)->higher_equal(TypePtr::NOTNULL);
if (!((mem->is_Phi() || base_is_phi) &&
(load_boxed_values || t_oop->is_known_instance_field()))) {
return NULL; // memory is not Phi
}
if (mem->is_Phi()) {
if (!stable_phi(mem->as_Phi(), phase)) {
return NULL; // Wait stable graph
}
uint cnt = mem->req();
if (cnt == 3) {
for (uint i = 1; i < cnt; i++) {
Node* in = mem->in(i);
Node* m = optimize_memory_chain(in, t_oop, this, phase);
if (m == mem) {
if (i == 1) {
Node *n = optimize_memory_chain(mem->in(2), t_oop, this, phase);
if (n == mem) {
break;
}
}
set_req(Memory, mem->in(cnt - i));
return this; // made change
}
}
}
}
if (base_is_phi) {
if (!stable_phi(base->as_Phi(), phase)) {
return NULL; // Wait stable graph
}
uint cnt = base->req();
if (cnt == 3) {
for (uint i = 1; i < cnt; i++) {
if (base->in(i) == base) {
return NULL; // Wait stable graph
}
}
}
}
bool load_boxed_phi = load_boxed_values && base_is_phi && (base->in(0) == mem->in(0));
assert(C->have_alias_type(t_oop), "instance should have alias type");
if (!phase->eqv(this, this->Identity(phase)))
return NULL;
Node* region;
if (!base_is_phi) {
assert(mem->is_Phi(), "sanity");
region = mem->in(0);
if (!MemNode::all_controls_dominate(address, region))
return NULL;
} else if (!mem->is_Phi()) {
assert(base_is_phi, "sanity");
region = base->in(0);
if (!MemNode::all_controls_dominate(mem, region))
return NULL;
} else if (base->in(0) != mem->in(0)) {
assert(base_is_phi && mem->is_Phi(), "sanity");
if (MemNode::all_controls_dominate(mem, base->in(0))) {
region = base->in(0);
} else if (MemNode::all_controls_dominate(address, mem->in(0))) {
region = mem->in(0);
} else {
return NULL; // complex graph
}
} else {
assert(base->in(0) == mem->in(0), "sanity");
region = mem->in(0);
}
const Type* this_type = this->bottom_type();
int this_index = C->get_alias_index(t_oop);
int this_offset = t_oop->offset();
int this_iid = t_oop->instance_id();
if (!t_oop->is_known_instance() && load_boxed_values) {
this_iid = base->_idx;
}
PhaseIterGVN* igvn = phase->is_IterGVN();
Node* phi = new (C) PhiNode(region, this_type, NULL, mem->_idx, this_iid, this_index, this_offset);
for (uint i = 1; i < region->req(); i++) {
Node* x;
Node* the_clone = NULL;
if (region->in(i) == C->top()) {
x = C->top(); // Dead path? Use a dead data op
} else {
x = this->clone(); // Else clone up the data op
the_clone = x; // Remember for possible deletion.
if (this->in(0) == region) {
x->set_req(0, region->in(i));
} else {
x->set_req(0, NULL);
}
if (mem->is_Phi() && (mem->in(0) == region)) {
x->set_req(Memory, mem->in(i)); // Use pre-Phi input for the clone.
}
if (address->is_Phi() && address->in(0) == region) {
x->set_req(Address, address->in(i)); // Use pre-Phi input for the clone
}
if (base_is_phi && (base->in(0) == region)) {
Node* base_x = base->in(i); // Clone address for loads from boxed objects.
Node* adr_x = phase->transform(new (C) AddPNode(base_x,base_x,address->in(AddPNode::Offset)));
x->set_req(Address, adr_x);
}
}
const Type *t = x->Value(igvn);
bool singleton = t->singleton();
if (singleton && t == Type::TOP) {
singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
}
if (singleton) {
x = igvn->makecon(t);
} else {
igvn->set_type(x, t);
x->raise_bottom_type(t);
Node *y = x->Identity(igvn);
if (y != x) {
x = y;
} else {
y = igvn->hash_find_insert(x);
if (y) {
x = y;
} else {
igvn->_worklist.push(x);
}
}
}
if (x != the_clone && the_clone != NULL) {
igvn->remove_dead_node(the_clone);
}
phi->set_req(i, x);
}
igvn->register_new_node_with_optimizer(phi);
return phi;
}
Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* p = MemNode::Ideal_common(phase, can_reshape);
if (p) return (p == NodeSentinel) ? NULL : p;
Node* ctrl = in(MemNode::Control);
Node* address = in(MemNode::Address);
bool addr_mark = ((phase->type(address)->isa_oopptr() || phase->type(address)->isa_narrowoop()) &&
phase->type(address)->is_ptr()->offset() == oopDesc::mark_offset_in_bytes());
if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint &&
phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw &&
!addr_mark ) {
ctrl = ctrl->in(0);
set_req(MemNode::Control,ctrl);
}
intptr_t ignore = 0;
Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore);
if (base != NULL
&& phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
if (in(MemNode::Control) != NULL
&& can_remove_control()
&& phase->type(base)->higher_equal(TypePtr::NOTNULL)
&& all_controls_dominate(base, phase->C->start())) {
set_req(MemNode::Control, NULL);
}
}
Node* mem = in(MemNode::Memory);
const TypePtr *addr_t = phase->type(address)->isa_ptr();
if (can_reshape && (addr_t != NULL)) {
Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
if (opt_mem != mem) {
set_req(MemNode::Memory, opt_mem);
if (phase->type( opt_mem ) == Type::TOP) return NULL;
return this;
}
const TypeOopPtr *t_oop = addr_t->isa_oopptr();
if ((t_oop != NULL) &&
(t_oop->is_known_instance_field() ||
t_oop->is_ptr_to_boxed_value())) {
PhaseIterGVN *igvn = phase->is_IterGVN();
if (igvn != NULL && igvn->_worklist.member(opt_mem)) {
phase->is_IterGVN()->_worklist.push(this);
return NULL;
}
Node* result = split_through_phi(phase);
if (result != NULL) return result;
if (t_oop->is_ptr_to_boxed_value()) {
Node* result = eliminate_autobox(phase);
if (result != NULL) return result;
}
}
}
Node* prev_mem = find_previous_store(phase);
if (prev_mem != NULL && prev_mem != in(MemNode::Memory)) {
if (can_see_stored_value(prev_mem, phase)) {
set_req(MemNode::Memory, prev_mem);
return this;
}
}
return NULL; // No further progress
}
const Type*
LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
ciKlass* klass) const {
if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
return TypeInt::make(klass->modifier_flags());
}
if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
return TypeInt::make(klass->access_flags());
}
if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
return TypeInt::make(klass->layout_helper());
}
return NULL;
}
static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
assert(ary->const_oop(), "array should be constant");
assert(ary->is_stable(), "array should be stable");
ciArray* aobj = ary->const_oop()->as_array();
ciConstant con = aobj->element_value_by_offset(off);
if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
const Type* con_type = Type::make_from_constant(con);
if (con_type != NULL) {
if (con_type->isa_aryptr()) {
int dim = ary->stable_dimension();
con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
}
if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
con_type = con_type->make_narrowoop();
}
#ifndef PRODUCT
if (TraceIterativeGVN) {
tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
con_type->dump(); tty->cr();
}
#endif //PRODUCT
return con_type;
}
}
return NULL;
}
const Type *LoadNode::Value( PhaseTransform *phase ) const {
Node* mem = in(MemNode::Memory);
const Type *t1 = phase->type(mem);
if (t1 == Type::TOP) return Type::TOP;
Node* adr = in(MemNode::Address);
const TypePtr* tp = phase->type(adr)->isa_ptr();
if (tp == NULL || tp->empty()) return Type::TOP;
int off = tp->offset();
assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
Compile* C = phase->C;
if (tp->isa_aryptr()) {
const TypeAryPtr* ary = tp->is_aryptr();
const Type* t = ary->elem();
const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) {
if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
if (con_type != NULL) {
return con_type;
}
}
}
if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
&& (_type->isa_vect() == NULL)
&& Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
if (off_beyond_header) { // is the offset beyond the header?
const Type* jt = t->join_speculative(_type);
if (jt->empty() && !t->empty()) {
jt = _type;
}
#ifdef ASSERT
if (phase->C->eliminate_boxing() && adr->is_AddP()) {
Node* base = adr->in(AddPNode::Base);
if ((base != NULL) && base->is_DecodeN()) {
base = base->in(1);
}
if ((base != NULL) && base->is_Con()) {
const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
if ((base_type != NULL) && base_type->is_autobox_cache()) {
assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
}
}
}
#endif
return jt;
}
}
} else if (tp->base() == Type::InstPtr) {
ciEnv* env = C->env();
const TypeInstPtr* tinst = tp->is_instptr();
ciKlass* klass = tinst->klass();
assert( off != Type::OffsetBot ||
tp->is_oopptr()->klass()->is_java_lang_Object() ||
C->has_unsafe_access(),
"Field accesses must be precise" );
if (klass == env->String_klass() &&
adr->is_AddP() && off != Type::OffsetBot) {
Node* base = adr->in(AddPNode::Base);
const TypeOopPtr* t = phase->type(base)->isa_oopptr();
if (t != NULL && t->singleton()) {
ciField* field = env->String_klass()->get_field_by_offset(off, false);
if (field != NULL && field->is_final()) {
ciObject* string = t->const_oop();
ciConstant constant = string->as_instance()->field_value(field);
if (constant.basic_type() == T_INT) {
return TypeInt::make(constant.as_int());
} else if (constant.basic_type() == T_ARRAY) {
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
return TypeNarrowOop::make_from_constant(constant.as_object(), true);
} else {
return TypeOopPtr::make_from_constant(constant.as_object(), true);
}
}
}
}
}
ciObject* const_oop = tinst->const_oop();
if (const_oop != NULL) {
if (tinst->is_ptr_to_boxed_value()) {
return tinst->get_const_boxed_value();
} else
if (const_oop->is_call_site()) {
ciCallSite* call_site = const_oop->as_call_site();
ciField* field = call_site->klass()->as_instance_klass()->get_field_by_offset(off, /*is_static=*/ false);
if (field != NULL && field->is_call_site_target()) {
ciMethodHandle* target = call_site->get_target();
if (target != NULL) { // just in case
ciConstant constant(T_OBJECT, target);
const Type* t;
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
t = TypeNarrowOop::make_from_constant(constant.as_object(), true);
} else {
t = TypeOopPtr::make_from_constant(constant.as_object(), true);
}
if (!call_site->is_constant_call_site()) {
C->dependencies()->assert_call_site_target_value(call_site, target);
}
return t;
}
}
}
}
} else if (tp->base() == Type::KlassPtr) {
assert( off != Type::OffsetBot ||
tp->is_klassptr()->klass()->is_java_lang_Object() ||
Opcode() == Op_LoadKlass,
"Field accesses must be precise" );
}
const TypeKlassPtr *tkls = tp->isa_klassptr();
if (tkls != NULL && !StressReflectiveCode) {
ciKlass* klass = tkls->klass();
if (klass->is_loaded() && tkls->klass_is_exact()) {
if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
return TypeInt::make(klass->super_check_offset());
}
juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
if( depth < ciKlass::primary_super_limit() ) {
assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
ciKlass *ss = klass->super_of_depth(depth);
return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
}
const Type* aift = load_array_final_field(tkls, klass);
if (aift != NULL) return aift;
if (tkls->offset() == in_bytes(ArrayKlass::component_mirror_offset())
&& klass->is_array_klass()) {
assert(Opcode() == Op_LoadP, "must load an oop from _component_mirror");
return TypeInstPtr::make(klass->as_array_klass()->component_mirror());
}
if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
return TypeInstPtr::make(klass->java_mirror());
}
}
if (klass->is_loaded() ) {
ciType *inner = klass;
while( inner->is_obj_array_klass() )
inner = inner->as_obj_array_klass()->base_element_type();
if( inner->is_instance_klass() &&
!inner->as_instance_klass()->flags().is_interface() ) {
juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
if( depth < ciKlass::primary_super_limit() &&
depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case
assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
ciKlass *ss = klass->super_of_depth(depth);
return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
}
}
}
if (tkls->offset() == in_bytes(Klass::layout_helper_offset())
&& !klass->is_array_klass() // not directly typed as an array
&& !klass->is_interface() // specifically not Serializable & Cloneable
&& !klass->is_java_lang_Object() // not the supertype of all T[]
) {
assert(Opcode() == Op_LoadI, "must load an int from _layout_helper");
jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false);
return TypeInt::make(min_size, max_jint, Type::WidenMin);
}
}
const TypeOopPtr *tinst = tp->isa_oopptr();
bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
bool is_boxed_value = (tinst != NULL) && tinst->is_ptr_to_boxed_value();
if (ReduceFieldZeroing || is_instance || is_boxed_value) {
Node* value = can_see_stored_value(mem,phase);
if (value != NULL && value->is_Con()) {
assert(value->bottom_type()->higher_equal(_type),"sanity");
return value->bottom_type();
}
}
if (is_instance) {
Node *mem = in(MemNode::Memory);
if (mem->is_Parm() && mem->in(0)->is_Start()) {
assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
return Type::get_zero_type(_type->basic_type());
}
}
return _type;
}
uint LoadNode::match_edge(uint idx) const {
return idx == MemNode::Address;
}
Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
if( value && !phase->type(value)->higher_equal( _type ) ) {
Node *result = phase->transform( new (phase->C) LShiftINode(value, phase->intcon(24)) );
return new (phase->C) RShiftINode(result, phase->intcon(24));
}
return LoadNode::Ideal(phase, can_reshape);
}
const Type* LoadBNode::Value(PhaseTransform *phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
if (value != NULL && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
int con = value->get_int();
return TypeInt::make((con << 24) >> 24);
}
return LoadNode::Value(phase);
}
Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem, phase);
if (value && !phase->type(value)->higher_equal(_type))
return new (phase->C) AndINode(value, phase->intcon(0xFF));
return LoadNode::Ideal(phase, can_reshape);
}
const Type* LoadUBNode::Value(PhaseTransform *phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
if (value != NULL && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
int con = value->get_int();
return TypeInt::make(con & 0xFF);
}
return LoadNode::Value(phase);
}
Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
if( value && !phase->type(value)->higher_equal( _type ) )
return new (phase->C) AndINode(value,phase->intcon(0xFFFF));
return LoadNode::Ideal(phase, can_reshape);
}
const Type* LoadUSNode::Value(PhaseTransform *phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
if (value != NULL && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
int con = value->get_int();
return TypeInt::make(con & 0xFFFF);
}
return LoadNode::Value(phase);
}
Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
if( value && !phase->type(value)->higher_equal( _type ) ) {
Node *result = phase->transform( new (phase->C) LShiftINode(value, phase->intcon(16)) );
return new (phase->C) RShiftINode(result, phase->intcon(16));
}
return LoadNode::Ideal(phase, can_reshape);
}
const Type* LoadSNode::Value(PhaseTransform *phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
if (value != NULL && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
int con = value->get_int();
return TypeInt::make((con << 16) >> 16);
}
return LoadNode::Value(phase);
}
Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk) {
Compile* C = gvn.C;
const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
assert(adr_type != NULL, "expecting TypeKlassPtr");
#ifdef _LP64
if (adr_type->is_ptr_to_narrowklass()) {
assert(UseCompressedClassPointers, "no compressed klasses");
Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
}
#endif
assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
return new (C) LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
}
const Type *LoadKlassNode::Value( PhaseTransform *phase ) const {
return klass_value_common(phase);
}
bool LoadKlassNode::can_remove_control() const {
return false;
}
const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
const Type *t1 = phase->type( in(MemNode::Memory) );
if (t1 == Type::TOP) return Type::TOP;
Node *adr = in(MemNode::Address);
const Type *t2 = phase->type( adr );
if (t2 == Type::TOP) return Type::TOP;
const TypePtr *tp = t2->is_ptr();
if (TypePtr::above_centerline(tp->ptr()) ||
tp->ptr() == TypePtr::Null) return Type::TOP;
const TypeInstPtr *tinst = tp->isa_instptr();
if (tinst != NULL) {
ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
int offset = tinst->offset();
if (ik == phase->C->env()->Class_klass()
&& (offset == java_lang_Class::klass_offset_in_bytes() ||
offset == java_lang_Class::array_klass_offset_in_bytes())) {
ciType* t = tinst->java_mirror_type();
if (t != NULL) {
if (offset == java_lang_Class::array_klass_offset_in_bytes()) {
if (t->is_void()) {
return TypePtr::NULL_PTR;
}
return TypeKlassPtr::make(ciArrayKlass::make(t));
}
if (!t->is_klass()) {
return TypePtr::NULL_PTR;
}
return TypeKlassPtr::make(t->as_klass());
}
}
if( !ik->is_loaded() )
return _type; // Bail out if not loaded
if (offset == oopDesc::klass_offset_in_bytes()) {
if (tinst->klass_is_exact()) {
return TypeKlassPtr::make(ik);
}
if (!ik->is_interface() && !ik->has_subklass()) {
if (!ik->is_final()) {
phase->C->dependencies()->assert_leaf_type(ik);
}
return TypeKlassPtr::make(ik);
}
return TypeKlassPtr::make(TypePtr::NotNull, ik, 0/*offset*/);
}
}
const TypeAryPtr *tary = tp->isa_aryptr();
if( tary != NULL ) {
ciKlass *tary_klass = tary->klass();
if (tary_klass != NULL // can be NULL when at BOTTOM or TOP
&& tary->offset() == oopDesc::klass_offset_in_bytes()) {
if (tary->klass_is_exact()) {
return TypeKlassPtr::make(tary_klass);
}
ciArrayKlass *ak = tary->klass()->as_array_klass();
if( ak->is_obj_array_klass() ) {
assert( ak->is_loaded(), "" );
ciKlass *base_k = ak->as_obj_array_klass()->base_element_klass();
if( base_k->is_loaded() && base_k->is_instance_klass() ) {
ciInstanceKlass* ik = base_k->as_instance_klass();
if (!ik->is_interface() && !ik->has_subklass()) {
if (!ik->is_final()) {
phase->C->dependencies()->assert_leaf_type(ik);
}
return TypeKlassPtr::make(ak);
}
}
return TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
} else { // Found a type-array?
assert( ak->is_type_array_klass(), "" );
return TypeKlassPtr::make(ak); // These are always precise
}
}
}
const TypeKlassPtr *tkls = tp->isa_klassptr();
if (tkls != NULL && !StressReflectiveCode) {
ciKlass* klass = tkls->klass();
if( !klass->is_loaded() )
return _type; // Bail out if not loaded
if( klass->is_obj_array_klass() &&
tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
ciKlass* elem = klass->as_obj_array_klass()->element_klass();
return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/);
}
if( klass->is_instance_klass() && tkls->klass_is_exact() &&
tkls->offset() == in_bytes(Klass::super_offset())) {
ciKlass* sup = klass->as_instance_klass()->super();
return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
}
}
return LoadNode::Value(phase);
}
Node* LoadKlassNode::Identity( PhaseTransform *phase ) {
return klass_identity_common(phase);
}
Node* LoadNode::klass_identity_common(PhaseTransform *phase ) {
Node* x = LoadNode::Identity(phase);
if (x != this) return x;
Node* adr = in(MemNode::Address);
intptr_t offset = 0;
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
if (base == NULL) return this;
const TypeOopPtr* toop = phase->type(adr)->isa_oopptr();
if (toop == NULL) return this;
if (offset == oopDesc::klass_offset_in_bytes()) {
Node* allocated_klass = AllocateNode::Ideal_klass(base, phase);
if (allocated_klass != NULL) {
return allocated_klass;
}
}
if (toop->isa_instptr() && toop->klass() == phase->C->env()->Class_klass()
&& (offset == java_lang_Class::klass_offset_in_bytes() ||
offset == java_lang_Class::array_klass_offset_in_bytes())) {
if (base->is_Load()) {
Node* adr2 = base->in(MemNode::Address);
const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
if (tkls != NULL && !tkls->empty()
&& (tkls->klass()->is_instance_klass() ||
tkls->klass()->is_array_klass())
&& adr2->is_AddP()
) {
int mirror_field = in_bytes(Klass::java_mirror_offset());
if (offset == java_lang_Class::array_klass_offset_in_bytes()) {
mirror_field = in_bytes(ArrayKlass::component_mirror_offset());
}
if (tkls->offset() == mirror_field) {
return adr2->in(AddPNode::Base);
}
}
}
}
return this;
}
const Type *LoadNKlassNode::Value( PhaseTransform *phase ) const {
const Type *t = klass_value_common(phase);
if (t == Type::TOP)
return t;
return t->make_narrowklass();
}
Node* LoadNKlassNode::Identity( PhaseTransform *phase ) {
Node *x = klass_identity_common(phase);
const Type *t = phase->type( x );
if( t == Type::TOP ) return x;
if( t->isa_narrowklass()) return x;
assert (!t->isa_narrowoop(), "no narrow oop here");
return phase->transform(new (phase->C) EncodePKlassNode(x, t->make_narrowklass()));
}
const Type *LoadRangeNode::Value( PhaseTransform *phase ) const {
const Type *t1 = phase->type( in(MemNode::Memory) );
if( t1 == Type::TOP ) return Type::TOP;
Node *adr = in(MemNode::Address);
const Type *t2 = phase->type( adr );
if( t2 == Type::TOP ) return Type::TOP;
const TypePtr *tp = t2->is_ptr();
if (TypePtr::above_centerline(tp->ptr())) return Type::TOP;
const TypeAryPtr *tap = tp->isa_aryptr();
if( !tap ) return _type;
return tap->size();
}
Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* p = MemNode::Ideal_common(phase, can_reshape);
if (p) return (p == NodeSentinel) ? NULL : p;
Node* adr = in(MemNode::Address);
intptr_t offset = 0;
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
if (base == NULL) return NULL;
const TypeAryPtr* tary = phase->type(adr)->isa_aryptr();
if (tary == NULL) return NULL;
if (offset == arrayOopDesc::length_offset_in_bytes()) {
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
if (alloc != NULL) {
Node* allocated_length = alloc->Ideal_length();
Node* len = alloc->make_ideal_length(tary, phase);
if (allocated_length != len) {
return len;
}
}
}
return NULL;
}
Node* LoadRangeNode::Identity( PhaseTransform *phase ) {
Node* x = LoadINode::Identity(phase);
if (x != this) return x;
Node* adr = in(MemNode::Address);
intptr_t offset = 0;
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
if (base == NULL) return this;
const TypeAryPtr* tary = phase->type(adr)->isa_aryptr();
if (tary == NULL) return this;
if (offset == arrayOopDesc::length_offset_in_bytes()) {
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
if (alloc != NULL) {
Node* allocated_length = alloc->Ideal_length();
Node* len = alloc->make_ideal_length(tary, phase, false);
if (allocated_length == len) {
return allocated_length;
}
}
}
return this;
}
StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
assert((mo == unordered || mo == release), "unexpected");
Compile* C = gvn.C;
assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
ctl != NULL, "raw memory operations should have control edge");
switch (bt) {
case T_BOOLEAN: val = gvn.transform(new (C) AndINode(val, gvn.intcon(0x1))); // Fall through to T_BYTE case
case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val, mo);
case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val, mo);
case T_CHAR:
case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val, mo);
case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo);
case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val, mo);
case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo);
case T_METADATA:
case T_ADDRESS:
case T_OBJECT:
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
return new (C) StoreNNode(ctl, mem, adr, adr_type, val, mo);
} else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
(UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
adr->bottom_type()->isa_rawptr())) {
val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
}
#endif
{
return new (C) StorePNode(ctl, mem, adr, adr_type, val, mo);
}
}
ShouldNotReachHere();
return (StoreNode*)NULL;
}
StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
bool require_atomic = true;
return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
}
StoreDNode* StoreDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
bool require_atomic = true;
return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
}
const Type *StoreNode::bottom_type() const {
return Type::MEMORY;
}
uint StoreNode::hash() const {
return NO_HASH;
}
Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* p = MemNode::Ideal_common(phase, can_reshape);
if (p) return (p == NodeSentinel) ? NULL : p;
Node* mem = in(MemNode::Memory);
Node* address = in(MemNode::Address);
if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(address) &&
mem->Opcode() != Op_StoreCM) {
assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
assert(Opcode() == mem->Opcode() ||
phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
(is_mismatched_access() || mem->as_Store()->is_mismatched_access()),
"no mismatched stores, except on raw memory");
if (mem->outcnt() == 1 && // check for intervening uses
mem->as_Store()->memory_size() <= this->memory_size()) {
if (can_reshape) { // (%%% is this an anachronism?)
set_req_X(MemNode::Memory, mem->in(MemNode::Memory),
phase->is_IterGVN());
} else {
set_req(MemNode::Memory, mem->in(MemNode::Memory));
}
return this;
}
}
if (ReduceFieldZeroing && /*can_reshape &&*/
mem->is_Proj() && mem->in(0)->is_Initialize()) {
InitializeNode* init = mem->in(0)->as_Initialize();
intptr_t offset = init->can_capture_store(this, phase, can_reshape);
if (offset > 0) {
Node* moved = init->capture_store(this, offset, phase, can_reshape);
if (moved != NULL) {
mem = MergeMemNode::make(phase->C, mem);
return mem; // fold me away
}
}
}
return NULL; // No further progress
}
const Type *StoreNode::Value( PhaseTransform *phase ) const {
const Type *t1 = phase->type( in(MemNode::Memory) );
if( t1 == Type::TOP ) return Type::TOP;
const Type *t2 = phase->type( in(MemNode::Address) );
if( t2 == Type::TOP ) return Type::TOP;
const Type *t3 = phase->type( in(MemNode::ValueIn) );
if( t3 == Type::TOP ) return Type::TOP;
return Type::MEMORY;
}
Node *StoreNode::Identity( PhaseTransform *phase ) {
Node* mem = in(MemNode::Memory);
Node* adr = in(MemNode::Address);
Node* val = in(MemNode::ValueIn);
Node* result = this;
if (val->is_Load() &&
val->in(MemNode::Address)->eqv_uncast(adr) &&
val->in(MemNode::Memory )->eqv_uncast(mem) &&
val->as_Load()->store_Opcode() == Opcode()) {
result = mem;
}
if (mem->is_Store() &&
mem->in(MemNode::Address)->eqv_uncast(adr) &&
mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
mem->Opcode() == Opcode()) {
result = mem;
}
if (result == this &&
ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
result = mem;
}
if (result == this) {
Node* prev_mem = find_previous_store(phase);
if (prev_mem != NULL) {
Node* prev_val = can_see_stored_value(prev_mem, phase);
if (prev_val != NULL && phase->eqv(prev_val, val)) {
result = mem;
}
}
}
}
if (result != this && phase->is_IterGVN() != NULL) {
MemBarNode* trailing = trailing_membar();
if (trailing != NULL) {
#ifdef ASSERT
const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();
assert(t_oop == NULL || t_oop->is_known_instance_field(), "only for non escaping objects");
#endif
PhaseIterGVN* igvn = phase->is_IterGVN();
trailing->remove(igvn);
}
}
return result;
}
uint StoreNode::match_edge(uint idx) const {
return idx == MemNode::Address || idx == MemNode::ValueIn;
}
uint StoreNode::cmp( const Node &n ) const {
return (&n == this); // Always fail except on self
}
Node *StoreNode::Ideal_masked_input(PhaseGVN *phase, uint mask) {
Node *val = in(MemNode::ValueIn);
if( val->Opcode() == Op_AndI ) {
const TypeInt *t = phase->type( val->in(2) )->isa_int();
if( t && t->is_con() && (t->get_con() & mask) == mask ) {
set_req(MemNode::ValueIn, val->in(1));
return this;
}
}
return NULL;
}
Node *StoreNode::Ideal_sign_extended_input(PhaseGVN *phase, int num_bits) {
Node *val = in(MemNode::ValueIn);
if( val->Opcode() == Op_RShiftI ) {
const TypeInt *t = phase->type( val->in(2) )->isa_int();
if( t && t->is_con() && (t->get_con() <= num_bits) ) {
Node *shl = val->in(1);
if( shl->Opcode() == Op_LShiftI ) {
const TypeInt *t2 = phase->type( shl->in(2) )->isa_int();
if( t2 && t2->is_con() && (t2->get_con() == t->get_con()) ) {
set_req(MemNode::ValueIn, shl->in(1));
return this;
}
}
}
}
return NULL;
}
bool StoreNode::value_never_loaded( PhaseTransform *phase) const {
Node *adr = in(Address);
const TypeOopPtr *adr_oop = phase->type(adr)->isa_oopptr();
if (adr_oop == NULL)
return false;
if (!adr_oop->is_known_instance_field())
return false; // if not a distinct instance, there may be aliases of the address
for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) {
Node *use = adr->fast_out(i);
int opc = use->Opcode();
if (use->is_Load() || use->is_LoadStore()) {
return false;
}
}
return true;
}
MemBarNode* StoreNode::trailing_membar() const {
if (is_release()) {
MemBarNode* trailing_mb = NULL;
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
Node* u = fast_out(i);
if (u->is_MemBar()) {
if (u->as_MemBar()->trailing_store()) {
assert(u->Opcode() == Op_MemBarVolatile, "");
assert(trailing_mb == NULL, "only one");
trailing_mb = u->as_MemBar();
#ifdef ASSERT
Node* leading = u->as_MemBar()->leading_membar();
assert(leading->Opcode() == Op_MemBarRelease, "incorrect membar");
assert(leading->as_MemBar()->leading_store(), "incorrect membar pair");
assert(leading->as_MemBar()->trailing_membar() == u, "incorrect membar pair");
#endif
} else {
assert(u->as_MemBar()->standalone(), "");
}
}
}
return trailing_mb;
}
return NULL;
}
Node *StoreBNode::Ideal(PhaseGVN *phase, bool can_reshape){
Node *progress = StoreNode::Ideal_masked_input(phase, 0xFF);
if( progress != NULL ) return progress;
progress = StoreNode::Ideal_sign_extended_input(phase, 24);
if( progress != NULL ) return progress;
return StoreNode::Ideal(phase, can_reshape);
}
Node *StoreCNode::Ideal(PhaseGVN *phase, bool can_reshape){
Node *progress = StoreNode::Ideal_masked_input(phase, 0xFFFF);
if( progress != NULL ) return progress;
progress = StoreNode::Ideal_sign_extended_input(phase, 16);
if( progress != NULL ) return progress;
return StoreNode::Ideal(phase, can_reshape);
}
Node *StoreCMNode::Identity( PhaseTransform *phase ) {
Node* my_store = in(MemNode::OopStore);
if (my_store->is_Store()) {
const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) );
if( t1 == TypePtr::NULL_PTR ) {
return in(MemNode::Memory);
}
}
return this;
}
Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){
Node* progress = StoreNode::Ideal(phase, can_reshape);
if (progress != NULL) return progress;
Node* my_store = in(MemNode::OopStore);
if (my_store->is_MergeMem()) {
Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx());
set_req(MemNode::OopStore, mem);
return this;
}
return NULL;
}
const Type *StoreCMNode::Value( PhaseTransform *phase ) const {
const Type *t = phase->type( in(MemNode::Memory) );
if( t == Type::TOP ) return Type::TOP;
t = phase->type( in(MemNode::Address) );
if( t == Type::TOP ) return Type::TOP;
t = phase->type( in(MemNode::ValueIn) );
if( t == Type::TOP ) return Type::TOP;
t = phase->type( in(MemNode::OopStore) );
if( t == Type::TOP ) return Type::TOP;
return StoreNode::Value( phase );
}
const Type * SCMemProjNode::Value( PhaseTransform *phase ) const
{
return bottom_type();
}
LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
: Node(required),
_type(rt),
_adr_type(at)
{
init_req(MemNode::Control, c );
init_req(MemNode::Memory , mem);
init_req(MemNode::Address, adr);
init_req(MemNode::ValueIn, val);
init_class_id(Class_LoadStore);
}
uint LoadStoreNode::ideal_reg() const {
return _type->ideal_reg();
}
bool LoadStoreNode::result_not_used() const {
for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
Node *x = fast_out(i);
if (x->Opcode() == Op_SCMemProj) continue;
return false;
}
return true;
}
MemBarNode* LoadStoreNode::trailing_membar() const {
MemBarNode* trailing = NULL;
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
Node* u = fast_out(i);
if (u->is_MemBar()) {
if (u->as_MemBar()->trailing_load_store()) {
assert(u->Opcode() == Op_MemBarAcquire, "");
assert(trailing == NULL, "only one");
trailing = u->as_MemBar();
#ifdef ASSERT
Node* leading = trailing->leading_membar();
assert(support_IRIW_for_not_multiple_copy_atomic_cpu || leading->Opcode() == Op_MemBarRelease, "incorrect membar");
assert(leading->as_MemBar()->leading_load_store(), "incorrect membar pair");
assert(leading->as_MemBar()->trailing_membar() == trailing, "incorrect membar pair");
#endif
} else {
assert(u->as_MemBar()->standalone(), "wrong barrier kind");
}
}
}
return trailing;
}
uint LoadStoreNode::size_of() const { return sizeof(*this); }
LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) {
init_req(ExpectedIn, ex );
}
const TypePtr* ClearArrayNode::adr_type() const {
Node *adr = in(3);
return MemNode::calculate_adr_type(adr->bottom_type());
}
uint ClearArrayNode::match_edge(uint idx) const {
return idx > 1;
}
Node *ClearArrayNode::Identity( PhaseTransform *phase ) {
return phase->type(in(2))->higher_equal(TypeX::ZERO) ? in(1) : this;
}
Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){
const int unit = BytesPerLong;
const TypeX* t = phase->type(in(2))->isa_intptr_t();
if (!t) return NULL;
if (!t->is_con()) return NULL;
intptr_t raw_count = t->get_con();
intptr_t size = raw_count;
if (!Matcher::init_array_count_is_in_bytes) size *= unit;
if (size <= 0 || size % unit != 0) return NULL;
intptr_t count = size / unit;
if (size > Matcher::init_array_short_size) return NULL;
Node *mem = in(1);
if( phase->type(mem)==Type::TOP ) return NULL;
Node *adr = in(3);
const Type* at = phase->type(adr);
if( at==Type::TOP ) return NULL;
const TypePtr* atp = at->isa_ptr();
if (atp == NULL) atp = TypePtr::BOTTOM;
else atp = atp->add_offset(Type::OffsetBot);
if( adr->Opcode() != Op_AddP ) Unimplemented();
Node *base = adr->in(1);
Node *zero = phase->makecon(TypeLong::ZERO);
Node *off = phase->MakeConX(BytesPerLong);
mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
count--;
while( count-- ) {
mem = phase->transform(mem);
adr = phase->transform(new (phase->C) AddPNode(base,adr,off));
mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
}
return mem;
}
bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
Node* n = *np;
assert(n->is_ClearArray(), "sanity");
intptr_t offset;
AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
assert(alloc != NULL, "should have allocation");
if (alloc->_idx == instance_id) {
return false;
}
InitializeNode* init = alloc->initialization();
if (init != NULL)
else
return true;
}
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
intptr_t start_offset,
Node* end_offset,
PhaseGVN* phase) {
Compile* C = phase->C;
intptr_t offset = start_offset;
int unit = BytesPerLong;
if ((offset % unit) != 0) {
Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset));
adr = phase->transform(adr);
const TypePtr* atp = TypeRawPtr::BOTTOM;
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
mem = phase->transform(mem);
offset += BytesPerInt;
}
assert((offset % unit) == 0, "");
return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
}
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
Node* start_offset,
Node* end_offset,
PhaseGVN* phase) {
if (start_offset == end_offset) {
return mem;
}
Compile* C = phase->C;
int unit = BytesPerLong;
Node* zbase = start_offset;
Node* zend = end_offset;
if (!Matcher::init_array_count_is_in_bytes) {
Node* shift = phase->intcon(exact_log2(unit));
zbase = phase->transform( new(C) URShiftXNode(zbase, shift) );
zend = phase->transform( new(C) URShiftXNode(zend, shift) );
}
Node* zsize = phase->transform( new(C) SubXNode(zend, zbase) );
Node* adr = phase->transform( new(C) AddPNode(dest, dest, start_offset) );
mem = new (C) ClearArrayNode(ctl, mem, zsize, adr);
return phase->transform(mem);
}
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
intptr_t start_offset,
intptr_t end_offset,
PhaseGVN* phase) {
if (start_offset == end_offset) {
return mem;
}
Compile* C = phase->C;
assert((end_offset % BytesPerInt) == 0, "odd end offset");
intptr_t done_offset = end_offset;
if ((done_offset % BytesPerLong) != 0) {
done_offset -= BytesPerInt;
}
if (done_offset > start_offset) {
mem = clear_memory(ctl, mem, dest,
start_offset, phase->MakeConX(done_offset), phase);
}
if (done_offset < end_offset) { // emit the final 32-bit store
Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset));
adr = phase->transform(adr);
const TypePtr* atp = TypeRawPtr::BOTTOM;
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
mem = phase->transform(mem);
done_offset += BytesPerInt;
}
assert(done_offset == end_offset, "");
return mem;
}
uint StrIntrinsicNode::match_edge(uint idx) const {
return idx == 2 || idx == 3;
}
Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this;
if (in(0) && in(0)->is_top()) return NULL;
if (can_reshape) {
Node* mem = phase->transform(in(MemNode::Memory));
uint alias_idx = phase->C->get_alias_index(adr_type());
mem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(alias_idx) : mem;
if (mem != in(MemNode::Memory)) {
set_req(MemNode::Memory, mem);
return this;
}
}
return NULL;
}
const Type *StrIntrinsicNode::Value( PhaseTransform *phase ) const {
if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
return bottom_type();
}
uint EncodeISOArrayNode::match_edge(uint idx) const {
return idx == 2 || idx == 3; // EncodeISOArray src (Binary dst len)
}
Node *EncodeISOArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return remove_dead_region(phase, can_reshape) ? this : NULL;
}
const Type *EncodeISOArrayNode::Value(PhaseTransform *phase) const {
if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
return bottom_type();
}
MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
: MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
_adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
#ifdef ASSERT
, _pair_idx(0)
#endif
{
init_class_id(Class_MemBar);
Node* top = C->top();
init_req(TypeFunc::I_O,top);
init_req(TypeFunc::FramePtr,top);
init_req(TypeFunc::ReturnAdr,top);
if (precedent != NULL)
init_req(TypeFunc::Parms, precedent);
}
uint MemBarNode::hash() const { return NO_HASH; }
uint MemBarNode::cmp( const Node &n ) const {
return (&n == this); // Always fail except on self
}
MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
switch (opcode) {
case Op_MemBarAcquire: return new(C) MemBarAcquireNode(C, atp, pn);
case Op_LoadFence: return new(C) LoadFenceNode(C, atp, pn);
case Op_MemBarRelease: return new(C) MemBarReleaseNode(C, atp, pn);
case Op_StoreFence: return new(C) StoreFenceNode(C, atp, pn);
case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn);
case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn);
case Op_MemBarVolatile: return new(C) MemBarVolatileNode(C, atp, pn);
case Op_MemBarCPUOrder: return new(C) MemBarCPUOrderNode(C, atp, pn);
case Op_Initialize: return new(C) InitializeNode(C, atp, pn);
case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C, atp, pn);
default: ShouldNotReachHere(); return NULL;
}
}
void MemBarNode::remove(PhaseIterGVN *igvn) {
if (outcnt() != 2) {
return;
}
if (trailing_store() || trailing_load_store()) {
MemBarNode* leading = leading_membar();
if (leading != NULL) {
assert(leading->trailing_membar() == this, "inconsistent leading/trailing membars");
leading->remove(igvn);
}
}
igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
}
Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this;
if (in(0) && in(0)->is_top()) {
return NULL;
}
if (can_reshape && req() == (Precedent+1)) {
bool eliminate = false;
int opc = Opcode();
if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
Node* my_mem = in(MemBarNode::Precedent);
if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) {
Node* load_node = my_mem->in(1);
set_req(MemBarNode::Precedent, load_node);
phase->is_IterGVN()->_worklist.push(my_mem);
my_mem = load_node;
} else {
assert(my_mem->unique_out() == this, "sanity");
del_req(Precedent);
phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later
my_mem = NULL;
}
}
if (my_mem != NULL && my_mem->is_Mem()) {
const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
if( t_oop != NULL && t_oop->is_known_instance_field() &&
t_oop->offset() != Type::OffsetBot &&
t_oop->offset() != Type::OffsetTop) {
eliminate = true;
}
}
} else if (opc == Op_MemBarRelease) {
Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase);
if ((alloc != NULL) && alloc->is_Allocate() &&
AARCH64_ONLY ( alloc->as_Allocate()->does_not_escape_thread() )
NOT_AARCH64 ( alloc->as_Allocate()->_is_non_escaping )
) {
eliminate = true;
}
}
if (eliminate) {
PhaseIterGVN* igvn = phase->is_IterGVN();
remove(igvn);
return new (phase->C) ConINode(TypeInt::ZERO);
}
}
return NULL;
}
const Type *MemBarNode::Value( PhaseTransform *phase ) const {
if( !in(0) ) return Type::TOP;
if( phase->type(in(0)) == Type::TOP )
return Type::TOP;
return TypeTuple::MEMBAR;
}
Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
switch (proj->_con) {
case TypeFunc::Control:
case TypeFunc::Memory:
return new (m->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
}
ShouldNotReachHere();
return NULL;
}
void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
trailing->_kind = TrailingStore;
leading->_kind = LeadingStore;
#ifdef ASSERT
trailing->_pair_idx = leading->_idx;
leading->_pair_idx = leading->_idx;
#endif
}
void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
trailing->_kind = TrailingLoadStore;
leading->_kind = LeadingLoadStore;
#ifdef ASSERT
trailing->_pair_idx = leading->_idx;
leading->_pair_idx = leading->_idx;
#endif
}
MemBarNode* MemBarNode::trailing_membar() const {
ResourceMark rm;
Node* trailing = (Node*)this;
VectorSet seen(Thread::current()->resource_area());
Node_Stack multis(0);
do {
Node* c = trailing;
uint i = 0;
do {
trailing = NULL;
for (; i < c->outcnt(); i++) {
Node* next = c->raw_out(i);
if (next != c && next->is_CFG()) {
if (c->is_MultiBranch()) {
if (multis.node() == c) {
multis.set_index(i+1);
} else {
multis.push(c, i+1);
}
}
trailing = next;
break;
}
}
if (trailing != NULL && !seen.test_set(trailing->_idx)) {
break;
}
while (multis.size() > 0) {
c = multis.node();
i = multis.index();
if (i < c->req()) {
break;
}
multis.pop();
}
} while (multis.size() > 0);
} while (!trailing->is_MemBar() || !trailing->as_MemBar()->trailing());
MemBarNode* mb = trailing->as_MemBar();
assert((mb->_kind == TrailingStore && _kind == LeadingStore) ||
(mb->_kind == TrailingLoadStore && _kind == LeadingLoadStore), "bad trailing membar");
assert(mb->_pair_idx == _pair_idx, "bad trailing membar");
return mb;
}
MemBarNode* MemBarNode::leading_membar() const {
ResourceMark rm;
VectorSet seen(Thread::current()->resource_area());
Node_Stack regions(0);
Node* leading = in(0);
while (leading != NULL && (!leading->is_MemBar() || !leading->as_MemBar()->leading())) {
while (leading == NULL || leading->is_top() || seen.test_set(leading->_idx)) {
leading = NULL;
while (regions.size() > 0 && leading == NULL) {
Node* r = regions.node();
uint i = regions.index();
if (i < r->req()) {
leading = r->in(i);
regions.set_index(i+1);
} else {
regions.pop();
}
}
if (leading == NULL) {
assert(regions.size() == 0, "all paths should have been tried");
return NULL;
}
}
if (leading->is_Region()) {
regions.push(leading, 2);
leading = leading->in(1);
} else {
leading = leading->in(0);
}
}
#ifdef ASSERT
Unique_Node_List wq;
wq.push((Node*)this);
uint found = 0;
for (uint i = 0; i < wq.size(); i++) {
Node* n = wq.at(i);
if (n->is_Region()) {
for (uint j = 1; j < n->req(); j++) {
Node* in = n->in(j);
if (in != NULL && !in->is_top()) {
wq.push(in);
}
}
} else {
if (n->is_MemBar() && n->as_MemBar()->leading()) {
assert(n == leading, "consistency check failed");
found++;
} else {
Node* in = n->in(0);
if (in != NULL && !in->is_top()) {
wq.push(in);
}
}
}
}
assert(found == 1 || (found == 0 && leading == NULL), "consistency check failed");
#endif
if (leading == NULL) {
return NULL;
}
MemBarNode* mb = leading->as_MemBar();
assert((mb->_kind == LeadingStore && _kind == TrailingStore) ||
(mb->_kind == LeadingLoadStore && _kind == TrailingLoadStore), "bad leading membar");
assert(mb->_pair_idx == _pair_idx, "bad leading membar");
return mb;
}
InitializeNode::InitializeNode(Compile* C, int adr_type, Node* rawoop)
: _is_complete(Incomplete), _does_not_escape(false),
MemBarNode(C, adr_type, rawoop)
{
init_class_id(Class_Initialize);
assert(adr_type == Compile::AliasIdxRaw, "only valid atp");
assert(in(RawAddress) == rawoop, "proper init");
}
const RegMask &InitializeNode::in_RegMask(uint idx) const {
if (idx == InitializeNode::RawAddress)
return *(Compile::current()->matcher()->idealreg2spillmask[in(idx)->ideal_reg()]);
return RegMask::Empty;
}
Node* InitializeNode::memory(uint alias_idx) {
Node* mem = in(Memory);
if (mem->is_MergeMem()) {
return mem->as_MergeMem()->memory_at(alias_idx);
} else {
return mem;
}
}
bool InitializeNode::is_non_zero() {
if (is_complete()) return false;
remove_extra_zeroes();
return (req() > RawStores);
}
void InitializeNode::set_complete(PhaseGVN* phase) {
assert(!is_complete(), "caller responsibility");
_is_complete = Complete;
PhaseIterGVN* igvn = phase->is_IterGVN();
if (igvn) igvn->add_users_to_worklist(this);
}
bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
InitializeNode* init = initialization();
if (init == NULL || init->is_complete()) return false;
init->remove_extra_zeroes();
if (init->is_non_zero()) return false;
init->set_complete(phase);
return true;
}
void InitializeNode::remove_extra_zeroes() {
if (req() == RawStores) return;
Node* zmem = zero_memory();
uint fill = RawStores;
for (uint i = fill; i < req(); i++) {
Node* n = in(i);
if (n->is_top() || n == zmem) continue; // skip
if (fill < i) set_req(fill, n); // compact
++fill;
}
while (fill < req()) {
del_req(fill);
}
}
intptr_t InitializeNode::get_store_offset(Node* st, PhaseTransform* phase) {
if (!st->is_Store()) return -1; // can happen to dead code via subsume_node
intptr_t offset = -1;
Node* base = AddPNode::Ideal_base_and_offset(st->in(MemNode::Address),
phase, offset);
if (base == NULL) return -1; // something is dead,
if (offset < 0) return -1; // dead, dead
return offset;
}
bool InitializeNode::detect_init_independence(Node* n, int& count) {
if (n == NULL) return true; // (can this really happen?)
if (n->is_Proj()) n = n->in(0);
if (n == this) return false; // found a cycle
if (n->is_Con()) return true;
if (n->is_Start()) return true; // params, etc., are OK
if (n->is_Root()) return true; // even better
Node* ctl = n->in(0);
if (ctl != NULL && !ctl->is_top()) {
if (ctl->is_Proj()) ctl = ctl->in(0);
if (ctl == this) return false;
if (!MemNode::all_controls_dominate(n, this))
return false; // failed to prove a good control
}
if ((count += 1) > 20) return false; // complexity limit
for (uint i = 1; i < n->req(); i++) {
Node* m = n->in(i);
if (m == NULL || m == n || m->is_top()) continue;
uint first_i = n->find_edge(m);
if (i != first_i) continue; // process duplicate edge just once
if (!detect_init_independence(m, count)) {
return false;
}
}
return true;
}
intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape) {
const int FAIL = 0;
if (st->req() != MemNode::ValueIn + 1)
return FAIL; // an inscrutable StoreNode (card mark?)
Node* ctl = st->in(MemNode::Control);
if (!(ctl != NULL && ctl->is_Proj() && ctl->in(0) == this))
return FAIL; // must be unconditional after the initialization
Node* mem = st->in(MemNode::Memory);
if (!(mem->is_Proj() && mem->in(0) == this))
return FAIL; // must not be preceded by other stores
Node* adr = st->in(MemNode::Address);
intptr_t offset;
AllocateNode* alloc = AllocateNode::Ideal_allocation(adr, phase, offset);
if (alloc == NULL)
return FAIL; // inscrutable address
if (alloc != allocation())
return FAIL; // wrong allocation! (store needs to float up)
int size_in_bytes = st->memory_size();
if ((size_in_bytes != 0) && (offset % size_in_bytes) != 0) {
return FAIL; // mismatched access
}
Node* val = st->in(MemNode::ValueIn);
int complexity_count = 0;
if (!detect_init_independence(val, complexity_count))
return FAIL; // stored value must be 'simple enough'
bool failed = false;
if (!is_complete_with_arraycopy()) {
const TypePtr* t_adr = phase->type(adr)->isa_ptr();
int alias_idx = phase->C->get_alias_index(t_adr);
ResourceMark rm;
Unique_Node_List mems;
mems.push(mem);
Node* unique_merge = NULL;
for (uint next = 0; next < mems.size(); ++next) {
Node *m = mems.at(next);
for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
Node *n = m->fast_out(j);
if (n->outcnt() == 0) {
continue;
}
if (n == st) {
continue;
} else if (n->in(0) != NULL && n->in(0) != ctl) {
continue;
} else if (n->is_MergeMem()) {
if (n->as_MergeMem()->memory_at(alias_idx) == m) {
mems.push(n);
}
} else if (n->is_Mem()) {
Node* other_adr = n->in(MemNode::Address);
if (other_adr == adr) {
failed = true;
break;
} else {
const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
if (other_t_adr != NULL) {
int other_alias_idx = phase->C->get_alias_index(other_t_adr);
if (other_alias_idx == alias_idx) {
assert(!n->is_Store(), "2 stores to same slice on same control?");
Node* base = other_adr;
assert(base->is_AddP(), err_msg_res("should be addp but is %s", base->Name()));
base = base->in(AddPNode::Base);
if (base != NULL) {
base = base->uncast();
if (base->is_Proj() && base->in(0) == alloc) {
failed = true;
break;
}
}
}
}
}
} else {
failed = true;
break;
}
}
}
}
if (failed) {
if (!can_reshape) {
phase->C->record_for_igvn(st);
}
return FAIL;
}
return offset; // success
}
int InitializeNode::captured_store_insertion_point(intptr_t start,
int size_in_bytes,
PhaseTransform* phase) {
const int FAIL = 0, MAX_STORE = BytesPerLong;
if (is_complete())
return FAIL; // arraycopy got here first; punt
assert(allocation() != NULL, "must be present");
if (start < (intptr_t) allocation()->minimum_header_size()) return FAIL;
intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
if (start >= ti_limit) return FAIL;
for (uint i = InitializeNode::RawStores, limit = req(); ; ) {
if (i >= limit) return -(int)i; // not found; here is where to put it
Node* st = in(i);
intptr_t st_off = get_store_offset(st, phase);
if (st_off < 0) {
if (st != zero_memory()) {
return FAIL; // bail out if there is dead garbage
}
} else if (st_off > start) {
if (st_off < start + size_in_bytes) {
return FAIL; // the next store overlaps
}
return -(int)i; // not found; here is where to put it
} else if (st_off < start) {
if (size_in_bytes != 0 &&
start < st_off + MAX_STORE &&
start < st_off + st->as_Store()->memory_size()) {
return FAIL; // the previous store overlaps
}
} else {
if (size_in_bytes != 0 &&
st->as_Store()->memory_size() != size_in_bytes) {
return FAIL; // mismatched store size
}
return i;
}
++i;
}
}
Node* InitializeNode::find_captured_store(intptr_t start, int size_in_bytes,
PhaseTransform* phase) {
assert(stores_are_sane(phase), "");
int i = captured_store_insertion_point(start, size_in_bytes, phase);
if (i == 0) {
return NULL; // something is dead
} else if (i < 0) {
return zero_memory(); // just primordial zero bits here
} else {
Node* st = in(i); // here is the store at this position
assert(get_store_offset(st->as_Store(), phase) == start, "sanity");
return st;
}
}
Node* InitializeNode::make_raw_address(intptr_t offset,
PhaseTransform* phase) {
Node* addr = in(RawAddress);
if (offset != 0) {
Compile* C = phase->C;
addr = phase->transform( new (C) AddPNode(C->top(), addr,
phase->MakeConX(offset)) );
}
return addr;
}
Node* InitializeNode::capture_store(StoreNode* st, intptr_t start,
PhaseTransform* phase, bool can_reshape) {
assert(stores_are_sane(phase), "");
if (start < 0) return NULL;
assert(can_capture_store(st, phase, can_reshape) == start, "sanity");
Compile* C = phase->C;
int size_in_bytes = st->memory_size();
int i = captured_store_insertion_point(start, size_in_bytes, phase);
if (i == 0) return NULL; // bail out
Node* prev_mem = NULL; // raw memory for the captured store
if (i > 0) {
prev_mem = in(i); // there is a pre-existing store under this one
set_req(i, C->top()); // temporarily disconnect it
} else {
i = -i; // no pre-existing store
prev_mem = zero_memory(); // a slice of the newly allocated object
if (i > InitializeNode::RawStores && in(i-1) == prev_mem)
set_req(--i, C->top()); // reuse this edge; it has been folded away
else
ins_req(i, C->top()); // build a new edge
}
Node* new_st = st->clone();
new_st->set_req(MemNode::Control, in(Control));
new_st->set_req(MemNode::Memory, prev_mem);
new_st->set_req(MemNode::Address, make_raw_address(start, phase));
new_st = phase->transform(new_st);
set_req(i, new_st);
DEBUG_ONLY(Node* check_st = find_captured_store(start, size_in_bytes, phase));
assert(check_st == new_st || check_st == NULL, "must be findable");
assert(!is_complete(), "");
return new_st;
}
static bool store_constant(jlong* tiles, int num_tiles,
intptr_t st_off, int st_size,
jlong con) {
if ((st_off & (st_size-1)) != 0)
return false; // strange store offset (assume size==2**N)
address addr = (address)tiles + st_off;
assert(st_off >= 0 && addr+st_size <= (address)&tiles[num_tiles], "oob");
switch (st_size) {
case sizeof(jbyte): *(jbyte*) addr = (jbyte) con; break;
case sizeof(jchar): *(jchar*) addr = (jchar) con; break;
case sizeof(jint): *(jint*) addr = (jint) con; break;
case sizeof(jlong): *(jlong*) addr = (jlong) con; break;
default: return false; // strange store size (detect size!=2**N here)
}
return true; // return success to caller
}
void
InitializeNode::coalesce_subword_stores(intptr_t header_size,
Node* size_in_bytes,
PhaseGVN* phase) {
Compile* C = phase->C;
assert(stores_are_sane(phase), "");
int old_subword = 0, old_long = 0, new_int = 0, new_long = 0;
intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, ti_limit);
size_limit = MIN2(size_limit, ti_limit);
size_limit = align_size_up(size_limit, BytesPerLong);
int num_tiles = size_limit / BytesPerLong;
const int small_len = DEBUG_ONLY(true ? 3 :) 30; // keep stack frames small
jlong tiles_buf[small_len];
Node* nodes_buf[small_len];
jlong inits_buf[small_len];
jlong* tiles = ((num_tiles <= small_len) ? &tiles_buf[0]
: NEW_RESOURCE_ARRAY(jlong, num_tiles));
Node** nodes = ((num_tiles <= small_len) ? &nodes_buf[0]
: NEW_RESOURCE_ARRAY(Node*, num_tiles));
jlong* inits = ((num_tiles <= small_len) ? &inits_buf[0]
: NEW_RESOURCE_ARRAY(jlong, num_tiles));
Copy::zero_to_bytes(tiles, sizeof(tiles[0]) * num_tiles);
Copy::zero_to_bytes(nodes, sizeof(nodes[0]) * num_tiles);
Copy::zero_to_bytes(inits, sizeof(inits[0]) * num_tiles);
Node* zmem = zero_memory(); // initially zero memory state
for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) {
Node* st = in(i);
intptr_t st_off = get_store_offset(st, phase);
if (st_off < header_size) continue; //skip (ignore header)
if (st->in(MemNode::Memory) != zmem) continue; //skip (odd store chain)
int st_size = st->as_Store()->memory_size();
if (st_off + st_size > size_limit) break;
if (!store_constant(inits, num_tiles, st_off, st_size, (jlong) -1))
continue; // skip (strange store size)
const Type* val = phase->type(st->in(MemNode::ValueIn));
if (!val->singleton()) continue; //skip (non-con store)
BasicType type = val->basic_type();
jlong con = 0;
switch (type) {
case T_INT: con = val->is_int()->get_con(); break;
case T_LONG: con = val->is_long()->get_con(); break;
case T_FLOAT: con = jint_cast(val->getf()); break;
case T_DOUBLE: con = jlong_cast(val->getd()); break;
default: continue; //skip (odd store type)
}
if (type == T_LONG && Matcher::isSimpleConstant64(con) &&
st->Opcode() == Op_StoreL) {
continue; // This StoreL is already optimal.
}
store_constant(tiles, num_tiles, st_off, st_size, con);
intptr_t j = st_off >> LogBytesPerLong;
if (type == T_INT && st_size == BytesPerInt
&& (st_off & BytesPerInt) == BytesPerInt) {
jlong lcon = tiles[j];
if (!Matcher::isSimpleConstant64(lcon) &&
st->Opcode() == Op_StoreI) {
jint* intcon = (jint*) &tiles[j];
intcon[1] = 0; // undo the store_constant()
st = nodes[j];
st_off -= BytesPerInt;
con = intcon[0];
if (con != 0 && st != NULL && st->Opcode() == Op_StoreI) {
assert(st_off >= header_size, "still ignoring header");
assert(get_store_offset(st, phase) == st_off, "must be");
assert(in(i-1) == zmem, "must be");
DEBUG_ONLY(const Type* tcon = phase->type(st->in(MemNode::ValueIn)));
assert(con == tcon->is_int()->get_con(), "must be");
intcon[0] = 0; // undo store_constant()
set_req(i-1, st); // undo set_req(i, zmem)
nodes[j] = NULL; // undo nodes[j] = st
--old_subword; // undo ++old_subword
}
continue; // This StoreI is already optimal.
}
}
set_req(i, zmem);
nodes[j] = st; // record for the moment
if (st_size < BytesPerLong) // something has changed
++old_subword; // includes int/float, but who's counting...
else ++old_long;
}
if ((old_subword + old_long) == 0)
return; // nothing more to do
for (int j = 0; j < num_tiles; j++) {
jlong con = tiles[j];
jlong init = inits[j];
if (con == 0) continue;
jint con0, con1; // split the constant, address-wise
jint init0, init1; // split the init map, address-wise
{ union { jlong con; jint intcon[2]; } u;
u.con = con;
con0 = u.intcon[0];
con1 = u.intcon[1];
u.con = init;
init0 = u.intcon[0];
init1 = u.intcon[1];
}
Node* old = nodes[j];
assert(old != NULL, "need the prior store");
intptr_t offset = (j * BytesPerLong);
bool split = !Matcher::isSimpleConstant64(con);
if (offset < header_size) {
assert(offset + BytesPerInt >= header_size, "second int counts");
assert(*(jint*)&tiles[j] == 0, "junk in header");
split = true; // only the second word counts
} else if (con0 == 0 && init0 == -1) {
split = true; // first word is covered by full inits
} else if (con1 == 0 && init1 == -1) {
split = true; // second word is covered by full inits
}
Node* ctl = old->in(MemNode::Control);
Node* adr = make_raw_address(offset, phase);
const TypePtr* atp = TypeRawPtr::BOTTOM;
Node* st[2];
intptr_t off[2];
int nst = 0;
if (!split) {
++new_long;
off[nst] = offset;
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
phase->longcon(con), T_LONG, MemNode::unordered);
} else {
if (con0 != 0) {
++new_int;
off[nst] = offset;
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
phase->intcon(con0), T_INT, MemNode::unordered);
}
if (con1 != 0) {
++new_int;
offset += BytesPerInt;
adr = make_raw_address(offset, phase);
off[nst] = offset;
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
phase->intcon(con1), T_INT, MemNode::unordered);
}
}
while (nst > 0) {
Node* st1 = st[--nst];
C->copy_node_notes_to(st1, old);
st1 = phase->transform(st1);
offset = off[nst];
assert(offset >= header_size, "do not smash header");
int ins_idx = captured_store_insertion_point(offset, /*size:*/0, phase);
guarantee(ins_idx != 0, "must re-insert constant store");
if (ins_idx < 0) ins_idx = -ins_idx; // never overlap
if (ins_idx > InitializeNode::RawStores && in(ins_idx-1) == zmem)
set_req(--ins_idx, st1);
else
ins_req(ins_idx, st1);
}
}
if (PrintCompilation && WizardMode)
tty->print_cr("Changed %d/%d subword/long constants into %d/%d int/long",
old_subword, old_long, new_int, new_long);
if (C->log() != NULL)
C->log()->elem("comment that='%d/%d subword/long to %d/%d int/long'",
old_subword, old_long, new_int, new_long);
remove_extra_zeroes();
}
intptr_t InitializeNode::find_next_fullword_store(uint start, PhaseGVN* phase) {
int int_map = 0;
intptr_t int_map_off = 0;
const int FULL_MAP = right_n_bits(BytesPerInt); // the int_map we hope for
for (uint i = start, limit = req(); i < limit; i++) {
Node* st = in(i);
intptr_t st_off = get_store_offset(st, phase);
if (st_off < 0) break; // return conservative answer
int st_size = st->as_Store()->memory_size();
if (st_size >= BytesPerInt && (st_off % BytesPerInt) == 0) {
return st_off; // we found a complete word init
}
intptr_t this_int_off = align_size_down(st_off, BytesPerInt);
if (this_int_off != int_map_off) {
int_map = 0;
int_map_off = this_int_off;
}
int subword_off = st_off - this_int_off;
int_map |= right_n_bits(st_size) << subword_off;
if ((int_map & FULL_MAP) == FULL_MAP) {
return this_int_off; // we found a complete word init
}
intptr_t next_int_off = align_size_down(st_off + st_size, BytesPerInt);
if (next_int_off == this_int_off + BytesPerInt) {
int_map_off = next_int_off;
int_map >>= BytesPerInt;
} else if (next_int_off > this_int_off + BytesPerInt) {
return this_int_off + BytesPerInt;
}
}
return -1;
}
Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
intptr_t header_size,
Node* size_in_bytes,
PhaseGVN* phase) {
assert(!is_complete(), "not already complete");
assert(stores_are_sane(phase), "");
assert(allocation() != NULL, "must be present");
remove_extra_zeroes();
if (ReduceFieldZeroing || ReduceBulkZeroing)
coalesce_subword_stores(header_size, size_in_bytes, phase);
Node* zmem = zero_memory(); // initially zero memory state
Node* inits = zmem; // accumulating a linearized chain of inits
#ifdef ASSERT
intptr_t first_offset = allocation()->minimum_header_size();
intptr_t last_init_off = first_offset; // previous init offset
intptr_t last_init_end = first_offset; // previous init offset+size
intptr_t last_tile_end = first_offset; // previous tile offset+size
#endif
intptr_t zeroes_done = header_size;
bool do_zeroing = true; // we might give up if inits are very sparse
int big_init_gaps = 0; // how many large gaps have we seen?
if (ZeroTLAB) do_zeroing = false;
if (!ReduceFieldZeroing && !ReduceBulkZeroing) do_zeroing = false;
for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) {
Node* st = in(i);
intptr_t st_off = get_store_offset(st, phase);
if (st_off < 0)
break; // unknown junk in the inits
if (st->in(MemNode::Memory) != zmem)
break; // complicated store chains somehow in list
int st_size = st->as_Store()->memory_size();
intptr_t next_init_off = st_off + st_size;
if (do_zeroing && zeroes_done < next_init_off) {
intptr_t zeroes_needed = st_off;
if (st_size < BytesPerInt) {
intptr_t next_full_store = find_next_fullword_store(i, phase);
if (next_full_store < 0) {
zeroes_needed = align_size_up(zeroes_needed, BytesPerInt);
} else {
assert(next_full_store >= zeroes_needed, "must go forward");
assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
zeroes_needed = next_full_store;
}
}
if (zeroes_needed > zeroes_done) {
intptr_t zsize = zeroes_needed - zeroes_done;
zeroes_done = align_size_down(zeroes_done, BytesPerInt);
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
zeroes_done, zeroes_needed,
phase);
zeroes_done = zeroes_needed;
if (zsize > Matcher::init_array_short_size && ++big_init_gaps > 2)
do_zeroing = false; // leave the hole, next time
}
}
st->set_req(MemNode::Memory, inits);
inits = st; // put it on the linearized chain
set_req(i, zmem); // unhook from previous position
if (zeroes_done == st_off)
zeroes_done = next_init_off;
assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
#ifdef ASSERT
assert(st_off >= last_init_off, "inits do not reverse");
last_init_off = st_off;
const Type* val = NULL;
if (st_size >= BytesPerInt &&
(val = phase->type(st->in(MemNode::ValueIn)))->singleton() &&
(int)val->basic_type() < (int)T_OBJECT) {
assert(st_off >= last_tile_end, "tiles do not overlap");
assert(st_off >= last_init_end, "tiles do not overwrite inits");
last_tile_end = MAX2(last_tile_end, next_init_off);
} else {
intptr_t st_tile_end = align_size_up(next_init_off, BytesPerLong);
assert(st_tile_end >= last_tile_end, "inits stay with tiles");
assert(st_off >= last_init_end, "inits do not overlap");
last_init_end = next_init_off; // it's a non-tile
}
#endif //ASSERT
}
remove_extra_zeroes(); // clear out all the zmems left over
add_req(inits);
if (!ZeroTLAB) {
zeroes_done = align_size_down(zeroes_done, BytesPerInt);
intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
if (zeroes_done + BytesPerLong >= size_limit) {
AllocateNode* alloc = allocation();
assert(alloc != NULL, "must be present");
if (alloc != NULL && alloc->Opcode() == Op_Allocate) {
Node* klass_node = alloc->in(AllocateNode::KlassNode);
ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
if (zeroes_done == k->layout_helper())
zeroes_done = size_limit;
}
}
if (zeroes_done < size_limit) {
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
zeroes_done, size_in_bytes, phase);
}
}
set_complete(phase);
return rawmem;
}
#ifdef ASSERT
bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
if (is_complete())
return true; // stores could be anything at this point
assert(allocation() != NULL, "must be present");
intptr_t last_off = allocation()->minimum_header_size();
for (uint i = InitializeNode::RawStores; i < req(); i++) {
Node* st = in(i);
intptr_t st_off = get_store_offset(st, phase);
if (st_off < 0) continue; // ignore dead garbage
if (last_off > st_off) {
tty->print_cr("*** bad store offset at %d: " INTX_FORMAT " > " INTX_FORMAT, i, last_off, st_off);
this->dump(2);
assert(false, "ascending store offsets");
return false;
}
last_off = st_off + st->as_Store()->memory_size();
}
return true;
}
#endif //ASSERT
Node* MergeMemNode::make_empty_memory() {
Node* empty_memory = (Node*) Compile::current()->top();
assert(empty_memory->is_top(), "correct sentinel identity");
return empty_memory;
}
MergeMemNode::MergeMemNode(Node *new_base) : Node(1+Compile::AliasIdxRaw) {
init_class_id(Class_MergeMem);
Node* empty_mem = make_empty_memory();
for (uint i = Compile::AliasIdxTop; i < req(); i++) {
init_req(i,empty_mem);
}
assert(empty_memory() == empty_mem, "");
if( new_base != NULL && new_base->is_MergeMem() ) {
MergeMemNode* mdef = new_base->as_MergeMem();
assert(mdef->empty_memory() == empty_mem, "consistent sentinels");
for (MergeMemStream mms(this, mdef); mms.next_non_empty2(); ) {
mms.set_memory(mms.memory2());
}
assert(base_memory() == mdef->base_memory(), "");
} else {
set_base_memory(new_base);
}
}
MergeMemNode* MergeMemNode::make(Compile* C, Node* mem) {
return new(C) MergeMemNode(mem);
}
uint MergeMemNode::hash() const { return NO_HASH; }
uint MergeMemNode::cmp( const Node &n ) const {
return (&n == this); // Always fail except on self
}
Node* MergeMemNode::Identity(PhaseTransform *phase) {
Node* base_mem = base_memory();
Node* empty_mem = empty_memory();
if (base_mem != empty_mem) { // Memory path is not dead?
for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
Node* mem = in(i);
if (mem != empty_mem && mem != base_mem) {
return this; // Many memory splits; no change
}
}
}
return base_mem; // No memory splits; ID on the one true input
}
Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node *progress = NULL;
Node* old_base = base_memory();
Node* empty_mem = empty_memory();
if (old_base == empty_mem)
return NULL; // Dead memory path.
MergeMemNode* old_mbase;
if (old_base != NULL && old_base->is_MergeMem())
old_mbase = old_base->as_MergeMem();
else
old_mbase = NULL;
Node* new_base = old_base;
if (old_mbase) new_base = old_mbase->base_memory();
if (old_mbase) grow_to_match(old_mbase);
PhiNode* phi_base;
if (new_base != NULL && new_base->is_Phi())
phi_base = new_base->as_Phi();
else
phi_base = NULL;
Node* phi_reg = NULL;
uint phi_len = (uint)-1;
if (phi_base != NULL && !phi_base->is_copy()) {
phi_reg = phi_base->region();
phi_len = phi_base->req();
for (uint i = 1; i < phi_len; i++) {
if (phi_base->in(i) == NULL) {
phi_reg = NULL;
phi_len = (uint)-1;
break;
}
}
}
assert(!old_mbase || old_mbase->is_empty_memory(empty_mem), "consistent sentinels");
for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
Node* old_in = in(i);
Node* old_mem = old_in;
if (old_mem == empty_mem) old_mem = old_base;
assert(old_mem == memory_at(i), "");
Node* new_mem = old_mem;
MergeMemNode* old_mmem;
if (old_mem != NULL && old_mem->is_MergeMem())
old_mmem = old_mem->as_MergeMem();
else
old_mmem = NULL;
if (old_mmem == this) {
new_mem = (new_base == this || new_base == empty_mem)? empty_mem : new_base;
}
else if (old_mmem != NULL) {
new_mem = old_mmem->memory_at(i);
}
if (new_mem != NULL && new_mem != new_base &&
new_mem->req() == phi_len && new_mem->in(0) == phi_reg) {
if (new_mem->is_Phi()) {
PhiNode* phi_mem = new_mem->as_Phi();
for (uint i = 1; i < phi_len; i++) {
if (phi_base->in(i) != phi_mem->in(i)) {
phi_mem = NULL;
break;
}
}
if (phi_mem != NULL) {
new_mem = new_base;
}
}
}
Node* new_in = new_mem;
if (new_in == new_base) new_in = empty_mem;
if (new_in != old_in) {
set_req(i, new_in);
progress = this; // Report progress
}
}
if (new_base != old_base) {
set_req(Compile::AliasIdxBot, new_base);
assert(base_memory() == new_base, "");
progress = this;
}
if( base_memory() == this ) {
set_req(Compile::AliasIdxBot, empty_mem);
}
if( base_memory()->is_MergeMem() ) {
MergeMemNode *new_mbase = base_memory()->as_MergeMem();
Node *m = phase->transform(new_mbase); // Rollup any cycles
if( m != NULL && (m->is_top() ||
m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem) ) {
set_req(Compile::AliasIdxBot, empty_mem);
}
}
if( base_memory() == empty_mem ) {
progress = this;
if( !can_reshape ) {
for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
if( in(i) != empty_mem ) { set_req(i, empty_mem); }
}
}
}
if( !progress && base_memory()->is_Phi() && can_reshape ) {
uint merge_width = req();
if (merge_width > Compile::AliasIdxRaw) {
PhiNode* phi = base_memory()->as_Phi();
for( uint i = 1; i < phi->req(); ++i ) {// For all paths in
if (phi->in(i) == this) {
phase->is_IterGVN()->_worklist.push(phi);
break;
}
}
}
}
assert(progress || verify_sparse(), "please, no dups of base");
return progress;
}
void MergeMemNode::set_base_memory(Node *new_base) {
Node* empty_mem = empty_memory();
set_req(Compile::AliasIdxBot, new_base);
assert(memory_at(req()) == new_base, "must set default memory");
if (new_base != empty_mem) {
for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
if (in(i) == new_base) set_req(i, empty_mem);
}
}
}
const RegMask &MergeMemNode::out_RegMask() const {
return RegMask::Empty;
}
#ifndef PRODUCT
void MergeMemNode::dump_spec(outputStream *st) const {
st->print(" {");
Node* base_mem = base_memory();
for( uint i = Compile::AliasIdxRaw; i < req(); i++ ) {
Node* mem = memory_at(i);
if (mem == base_mem) { st->print(" -"); continue; }
st->print( " N%d:", mem->_idx );
Compile::current()->get_adr_type(i)->dump_on(st);
}
st->print(" }");
}
#endif // !PRODUCT
#ifdef ASSERT
static bool might_be_same(Node* a, Node* b) {
if (a == b) return true;
if (!(a->is_Phi() || b->is_Phi())) return false;
return true; // pretty stupid...
}
static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) {
if (!VerifyAliases) return; // don't bother to verify unless requested
if (is_error_reported()) return; // muzzle asserts when debugging an error
if (Node::in_dump()) return; // muzzle asserts when printing
assert(alias_idx >= Compile::AliasIdxRaw, "must not disturb base_memory or sentinel");
assert(n != NULL, "");
while (n->is_MergeMem()) {
n = n->as_MergeMem()->memory_at(alias_idx);
}
Compile* C = Compile::current();
const TypePtr* n_adr_type = n->adr_type();
if (n == m->empty_memory()) {
} else if (n_adr_type != TypePtr::BOTTOM) {
assert(n_adr_type != NULL, "new memory must have a well-defined adr_type");
assert(C->must_alias(n_adr_type, alias_idx), "new memory must match selected slice");
} else {
bool expected_wide_mem = false;
if (n == m->base_memory()) {
expected_wide_mem = true;
} else if (alias_idx == Compile::AliasIdxRaw ||
n == m->memory_at(Compile::AliasIdxRaw)) {
expected_wide_mem = true;
} else if (!C->alias_type(alias_idx)->is_rewritable()) {
expected_wide_mem = true;
}
assert(expected_wide_mem, "expected narrow slice replacement");
}
}
#else // !ASSERT
#define verify_memory_slice(m,i,n) (void)(0) // PRODUCT version is no-op
#endif
Node* MergeMemNode::memory_at(uint alias_idx) const {
assert(alias_idx >= Compile::AliasIdxRaw ||
alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
"must avoid base_memory and AliasIdxTop");
Node* n = alias_idx < req() ? in(alias_idx) : empty_memory();
Compile *C = Compile::current();
if (is_empty_memory(n)) {
n = base_memory();
assert(Node::in_dump()
|| n == NULL || n->bottom_type() == Type::TOP
|| n->adr_type() == NULL // address is TOP
|| n->adr_type() == TypePtr::BOTTOM
|| n->adr_type() == TypeRawPtr::BOTTOM
|| Compile::current()->AliasLevel() == 0,
"must be a wide memory");
} else {
#ifdef ASSERT
if (is_error_reported() || Node::in_dump()) {
} else if (might_be_same(n, base_memory())) {
} else {
verify_memory_slice(this, alias_idx, n);
}
#endif
}
return n;
}
void MergeMemNode::set_memory_at(uint alias_idx, Node *n) {
verify_memory_slice(this, alias_idx, n);
Node* empty_mem = empty_memory();
if (n == base_memory()) n = empty_mem; // collapse default
uint need_req = alias_idx+1;
if (req() < need_req) {
if (n == empty_mem) return; // already the default, so do not grow me
do {
add_req(empty_mem);
} while (req() < need_req);
}
set_req( alias_idx, n );
}
void MergeMemNode::iteration_setup(const MergeMemNode* other) {
if (other != NULL) {
grow_to_match(other);
#ifdef ASSERT
for (uint i = req(); i < other->req(); i++) {
assert(other->is_empty_memory(other->in(i)), "slice left uncovered");
}
#endif
}
Node* base_mem = base_memory();
if (base_mem != NULL && !base_mem->is_top()) {
for (uint i = Compile::AliasIdxBot+1, imax = req(); i < imax; i++) {
if (in(i) == base_mem)
set_req(i, empty_memory());
}
}
}
void MergeMemNode::grow_to_match(const MergeMemNode* other) {
Node* empty_mem = empty_memory();
assert(other->is_empty_memory(empty_mem), "consistent sentinels");
for (uint i = other->req(); --i >= req(); ) {
if (other->in(i) != empty_mem) {
uint new_len = i+1;
while (req() < new_len) add_req(empty_mem);
break;
}
}
}
#ifndef PRODUCT
bool MergeMemNode::verify_sparse() const {
assert(is_empty_memory(make_empty_memory()), "sane sentinel");
Node* base_mem = base_memory();
if (is_empty_memory(base_mem)) return true;
for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
assert(in(i) != NULL, "sane slice");
if (in(i) == base_mem) return false; // should have been the sentinel value!
}
return true;
}
bool MergeMemStream::match_memory(Node* mem, const MergeMemNode* mm, int idx) {
Node* n;
n = mm->in(idx);
if (mem == n) return true; // might be empty_memory()
n = (idx == Compile::AliasIdxBot)? mm->base_memory(): mm->memory_at(idx);
if (mem == n) return true;
while (n->is_Phi() && (n = n->as_Phi()->is_copy()) != NULL) {
if (mem == n) return true;
if (n == NULL) break;
}
return false;
}
#endif // !PRODUCT
C:\hotspot-69087d08d473\src\share\vm/opto/memnode.hpp
#ifndef SHARE_VM_OPTO_MEMNODE_HPP
#define SHARE_VM_OPTO_MEMNODE_HPP
#include "opto/multnode.hpp"
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
#include "opto/type.hpp"
class MultiNode;
class PhaseCCP;
class PhaseTransform;
class MemNode : public Node {
private:
bool _unaligned_access; // Unaligned access from unsafe
bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
protected:
#ifdef ASSERT
const TypePtr* _adr_type; // What kind of memory is being addressed?
#endif
virtual uint size_of() const;
public:
enum { Control, // When is it safe to do this load?
Memory, // Chunk of memory is being loaded from
Address, // Actually address, derived from base
ValueIn, // Value to store
OopStore // Preceeding oop store, only in StoreCM
};
typedef enum { unordered = 0,
acquire, // Load has to acquire or be succeeded by MemBarAcquire.
release // Store has to release or be preceded by MemBarRelease.
} MemOrd;
protected:
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
: Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
}
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
: Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
}
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
: Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
}
static bool check_if_adr_maybe_raw(Node* adr);
public:
static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
Node* p2, AllocateNode* a2,
PhaseTransform* phase);
static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
static bool all_controls_dominate(Node* dom, Node* sub);
static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
virtual const class TypePtr *adr_type() const; // returns bottom_type of address
Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL.
static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
const TypePtr *raw_adr_type() const {
#ifdef ASSERT
return _adr_type;
#else
return 0;
#endif
}
virtual int store_Opcode() const { return -1; }
virtual BasicType memory_type() const = 0;
virtual int memory_size() const {
#ifdef ASSERT
return type2aelembytes(memory_type(), true);
#else
return type2aelembytes(memory_type());
#endif
}
Node* find_previous_store(PhaseTransform* phase);
Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
void set_unaligned_access() { _unaligned_access = true; }
bool is_unaligned_access() const { return _unaligned_access; }
void set_mismatched_access() { _mismatched_access = true; }
bool is_mismatched_access() const { return _mismatched_access; }
#ifndef PRODUCT
static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
virtual void dump_spec(outputStream *st) const;
#endif
};
class LoadNode : public MemNode {
public:
enum ControlDependency {
Pinned,
DependsOnlyOnTest
};
private:
bool _depends_only_on_test;
const MemOrd _mo;
protected:
virtual uint cmp(const Node &n) const;
virtual uint size_of() const; // Size is bigger
virtual bool can_remove_control() const;
const Type* const _type; // What kind of value is loaded?
public:
LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
: MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) {
init_class_id(Class_Load);
}
inline bool is_unordered() const { return !is_acquire(); }
inline bool is_acquire() const {
assert(_mo == unordered || _mo == acquire, "unexpected");
return _mo == acquire;
}
static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
const TypePtr* at, const Type *rt, BasicType bt,
MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
virtual uint hash() const; // Check the type
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
Node* split_through_phi(PhaseGVN *phase);
Node *eliminate_autobox(PhaseGVN *phase);
virtual const Type *Value( PhaseTransform *phase ) const;
const Type *klass_value_common( PhaseTransform *phase ) const;
Node *klass_identity_common( PhaseTransform *phase );
virtual uint ideal_reg() const;
virtual const Type *bottom_type() const;
void set_type(const Type* t) {
assert(t != NULL, "sanity");
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
}
const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
virtual uint match_edge(uint idx) const;
virtual int store_Opcode() const = 0;
bool is_instance_field_load_with_local_phi(Node* ctrl);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
#ifdef ASSERT
static bool is_immutable_value(Node* adr);
#endif
protected:
const Type* load_array_final_field(const TypeKlassPtr *tkls,
ciKlass* klass) const;
virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
};
class LoadBNode : public LoadNode {
public:
LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value(PhaseTransform *phase) const;
virtual int store_Opcode() const { return Op_StoreB; }
virtual BasicType memory_type() const { return T_BYTE; }
};
class LoadUBNode : public LoadNode {
public:
LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value(PhaseTransform *phase) const;
virtual int store_Opcode() const { return Op_StoreB; }
virtual BasicType memory_type() const { return T_BYTE; }
};
class LoadUSNode : public LoadNode {
public:
LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value(PhaseTransform *phase) const;
virtual int store_Opcode() const { return Op_StoreC; }
virtual BasicType memory_type() const { return T_CHAR; }
};
class LoadSNode : public LoadNode {
public:
LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value(PhaseTransform *phase) const;
virtual int store_Opcode() const { return Op_StoreC; }
virtual BasicType memory_type() const { return T_SHORT; }
};
class LoadINode : public LoadNode {
public:
LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual int store_Opcode() const { return Op_StoreI; }
virtual BasicType memory_type() const { return T_INT; }
};
class LoadRangeNode : public LoadINode {
public:
LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
: LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
};
class LoadLNode : public LoadNode {
virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
virtual uint cmp( const Node &n ) const {
return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
&& LoadNode::cmp(n);
}
virtual uint size_of() const { return sizeof(*this); }
const bool _require_atomic_access; // is piecewise load forbidden?
public:
LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
: LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegL; }
virtual int store_Opcode() const { return Op_StoreL; }
virtual BasicType memory_type() const { return T_LONG; }
bool require_atomic_access() const { return _require_atomic_access; }
static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st);
if (_require_atomic_access) st->print(" Atomic!");
}
#endif
};
class LoadL_unalignedNode : public LoadLNode {
public:
LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
virtual int Opcode() const;
};
class LoadFNode : public LoadNode {
public:
LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegF; }
virtual int store_Opcode() const { return Op_StoreF; }
virtual BasicType memory_type() const { return T_FLOAT; }
};
class LoadDNode : public LoadNode {
virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
virtual uint cmp( const Node &n ) const {
return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
&& LoadNode::cmp(n);
}
virtual uint size_of() const { return sizeof(*this); }
const bool _require_atomic_access; // is piecewise load forbidden?
public:
LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
: LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegD; }
virtual int store_Opcode() const { return Op_StoreD; }
virtual BasicType memory_type() const { return T_DOUBLE; }
bool require_atomic_access() const { return _require_atomic_access; }
static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st);
if (_require_atomic_access) st->print(" Atomic!");
}
#endif
};
class LoadD_unalignedNode : public LoadDNode {
public:
LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
virtual int Opcode() const;
};
class LoadPNode : public LoadNode {
public:
LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
virtual int store_Opcode() const { return Op_StoreP; }
virtual BasicType memory_type() const { return T_ADDRESS; }
};
class LoadNNode : public LoadNode {
public:
LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
: LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegN; }
virtual int store_Opcode() const { return Op_StoreN; }
virtual BasicType memory_type() const { return T_NARROWOOP; }
};
class LoadKlassNode : public LoadPNode {
protected:
virtual bool can_remove_control() const;
public:
LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
: LoadPNode(c, mem, adr, at, tk, mo) {}
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual bool depends_only_on_test() const { return true; }
static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
};
class LoadNKlassNode : public LoadNNode {
public:
LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
: LoadNNode(c, mem, adr, at, tk, mo) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegN; }
virtual int store_Opcode() const { return Op_StoreNKlass; }
virtual BasicType memory_type() const { return T_NARROWKLASS; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual bool depends_only_on_test() const { return true; }
};
class StoreNode : public MemNode {
private:
const MemOrd _mo;
virtual uint size_of() const { return sizeof(*this); }
protected:
virtual uint cmp( const Node &n ) const;
virtual bool depends_only_on_test() const { return false; }
Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
public:
StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
: MemNode(c, mem, adr, at, val), _mo(mo) {
init_class_id(Class_Store);
}
StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
: MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
init_class_id(Class_Store);
}
inline bool is_unordered() const { return !is_release(); }
inline bool is_release() const {
assert((_mo == unordered || _mo == release), "unexpected");
return _mo == release;
}
static inline MemOrd release_if_reference(const BasicType t) {
const MemOrd mo = (t == T_ARRAY ||
t == T_ADDRESS || // Might be the address of an object reference (`boxing').
t == T_OBJECT) ? release : unordered;
return mo;
}
static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
const TypePtr* at, Node *val, BasicType bt, MemOrd mo);
virtual uint hash() const; // Check the type
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
virtual uint match_edge(uint idx) const;
virtual const Type *bottom_type() const; // returns Type::MEMORY
virtual int store_Opcode() const { return Opcode(); }
bool value_never_loaded(PhaseTransform *phase) const;
MemBarNode* trailing_membar() const;
};
class StoreBNode : public StoreNode {
public:
StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
: StoreNode(c, mem, adr, at, val, mo) {}
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual BasicType memory_type() const { return T_BYTE; }
};
class StoreCNode : public StoreNode {
public:
StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
: StoreNode(c, mem, adr, at, val, mo) {}
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual BasicType memory_type() const { return T_CHAR; }
};
class StoreINode : public StoreNode {
public:
StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
: StoreNode(c, mem, adr, at, val, mo) {}
virtual int Opcode() const;
virtual BasicType memory_type() const { return T_INT; }
};
class StoreLNode : public StoreNode {
virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
virtual uint cmp( const Node &n ) const {
return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
&& StoreNode::cmp(n);
}
virtual uint size_of() const { return sizeof(*this); }
const bool _require_atomic_access; // is piecewise store forbidden?
public:
StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
: StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const;
virtual BasicType memory_type() const { return T_LONG; }
bool require_atomic_access() const { return _require_atomic_access; }
static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
StoreNode::dump_spec(st);
if (_require_atomic_access) st->print(" Atomic!");
}
#endif
};
class StoreFNode : public StoreNode {
public:
StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
: StoreNode(c, mem, adr, at, val, mo) {}
virtual int Opcode() const;
virtual BasicType memory_type() const { return T_FLOAT; }
};
class StoreDNode : public StoreNode {
virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
virtual uint cmp( const Node &n ) const {
return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
&& StoreNode::cmp(n);
}
virtual uint size_of() const { return sizeof(*this); }
const bool _require_atomic_access; // is piecewise store forbidden?
public:
StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
MemOrd mo, bool require_atomic_access = false)
: StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
virtual int Opcode() const;
virtual BasicType memory_type() const { return T_DOUBLE; }
bool require_atomic_access() const { return _require_atomic_access; }
static StoreDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
StoreNode::dump_spec(st);
if (_require_atomic_access) st->print(" Atomic!");
}
#endif
};
class StorePNode : public StoreNode {
public:
StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
: StoreNode(c, mem, adr, at, val, mo) {}
virtual int Opcode() const;
virtual BasicType memory_type() const { return T_ADDRESS; }
};
class StoreNNode : public StoreNode {
public:
StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
: StoreNode(c, mem, adr, at, val, mo) {}
virtual int Opcode() const;
virtual BasicType memory_type() const { return T_NARROWOOP; }
};
class StoreNKlassNode : public StoreNNode {
public:
StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
: StoreNNode(c, mem, adr, at, val, mo) {}
virtual int Opcode() const;
virtual BasicType memory_type() const { return T_NARROWKLASS; }
};
class StoreCMNode : public StoreNode {
private:
virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
virtual uint cmp( const Node &n ) const {
return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
&& StoreNode::cmp(n);
}
virtual uint size_of() const { return sizeof(*this); }
int _oop_alias_idx; // The alias_idx of OopStore
public:
StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
StoreNode(c, mem, adr, at, val, oop_store, MemNode::release),
_oop_alias_idx(oop_alias_idx) {
assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
_oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
"bad oop alias idx");
}
virtual int Opcode() const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value( PhaseTransform *phase ) const;
virtual BasicType memory_type() const { return T_VOID; } // unspecific
int oop_alias_idx() const { return _oop_alias_idx; }
};
class LoadPLockedNode : public LoadPNode {
public:
LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo)
: LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {}
virtual int Opcode() const;
virtual int store_Opcode() const { return Op_StorePConditional; }
virtual bool depends_only_on_test() const { return true; }
};
class SCMemProjNode : public ProjNode {
public:
enum {SCMEMPROJCON = (uint)-2};
SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
virtual int Opcode() const;
virtual bool is_CFG() const { return false; }
virtual const Type *bottom_type() const {return Type::MEMORY;}
virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
virtual const Type *Value( PhaseTransform *phase ) const;
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {};
#endif
};
class LoadStoreNode : public Node {
private:
const Type* const _type; // What kind of value is loaded?
const TypePtr* _adr_type; // What kind of memory is being addressed?
virtual uint size_of() const; // Size is bigger
public:
LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
virtual bool depends_only_on_test() const { return false; }
virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
virtual const Type *bottom_type() const { return _type; }
virtual uint ideal_reg() const;
virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address
bool result_not_used() const;
MemBarNode* trailing_membar() const;
};
class LoadStoreConditionalNode : public LoadStoreNode {
public:
enum {
ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
};
LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
};
class StorePConditionalNode : public LoadStoreConditionalNode {
public:
StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegFlags; }
};
class StoreIConditionalNode : public LoadStoreConditionalNode {
public:
StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { }
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegFlags; }
};
class StoreLConditionalNode : public LoadStoreConditionalNode {
public:
StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegFlags; }
};
class CompareAndSwapLNode : public LoadStoreConditionalNode {
public:
CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
virtual int Opcode() const;
};
class CompareAndSwapINode : public LoadStoreConditionalNode {
public:
CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
virtual int Opcode() const;
};
class CompareAndSwapPNode : public LoadStoreConditionalNode {
public:
CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
virtual int Opcode() const;
};
class CompareAndSwapNNode : public LoadStoreConditionalNode {
public:
CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
virtual int Opcode() const;
};
class GetAndAddINode : public LoadStoreNode {
public:
GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
virtual int Opcode() const;
};
class GetAndAddLNode : public LoadStoreNode {
public:
GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
virtual int Opcode() const;
};
class GetAndSetINode : public LoadStoreNode {
public:
GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
virtual int Opcode() const;
};
class GetAndSetLNode : public LoadStoreNode {
public:
GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
virtual int Opcode() const;
};
class GetAndSetPNode : public LoadStoreNode {
public:
GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
virtual int Opcode() const;
};
class GetAndSetNNode : public LoadStoreNode {
public:
GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
virtual int Opcode() const;
};
class ClearArrayNode: public Node {
public:
ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
: Node(ctrl,arymem,word_cnt,base) {
init_class_id(Class_ClearArray);
}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::MEMORY; }
virtual const class TypePtr *adr_type() const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint match_edge(uint idx) const;
static Node* clear_memory(Node* control, Node* mem, Node* dest,
intptr_t start_offset,
intptr_t end_offset,
PhaseGVN* phase);
static Node* clear_memory(Node* control, Node* mem, Node* dest,
intptr_t start_offset,
Node* end_offset,
PhaseGVN* phase);
static Node* clear_memory(Node* control, Node* mem, Node* dest,
Node* start_offset,
Node* end_offset,
PhaseGVN* phase);
static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
};
class StrIntrinsicNode: public Node {
public:
StrIntrinsicNode(Node* control, Node* char_array_mem,
Node* s1, Node* c1, Node* s2, Node* c2):
Node(control, char_array_mem, s1, c1, s2, c2) {
}
StrIntrinsicNode(Node* control, Node* char_array_mem,
Node* s1, Node* s2, Node* c):
Node(control, char_array_mem, s1, s2, c) {
}
StrIntrinsicNode(Node* control, Node* char_array_mem,
Node* s1, Node* s2):
Node(control, char_array_mem, s1, s2) {
}
virtual bool depends_only_on_test() const { return false; }
virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
virtual uint match_edge(uint idx) const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value(PhaseTransform *phase) const;
};
class StrCompNode: public StrIntrinsicNode {
public:
StrCompNode(Node* control, Node* char_array_mem,
Node* s1, Node* c1, Node* s2, Node* c2):
StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::INT; }
};
class StrEqualsNode: public StrIntrinsicNode {
public:
StrEqualsNode(Node* control, Node* char_array_mem,
Node* s1, Node* s2, Node* c):
StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::BOOL; }
};
class StrIndexOfNode: public StrIntrinsicNode {
public:
StrIndexOfNode(Node* control, Node* char_array_mem,
Node* s1, Node* c1, Node* s2, Node* c2):
StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::INT; }
};
class AryEqNode: public StrIntrinsicNode {
public:
AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
StrIntrinsicNode(control, char_array_mem, s1, s2) {};
virtual int Opcode() const;
virtual const Type* bottom_type() const { return TypeInt::BOOL; }
};
class EncodeISOArrayNode: public Node {
public:
EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
virtual int Opcode() const;
virtual bool depends_only_on_test() const { return false; }
virtual const Type* bottom_type() const { return TypeInt::INT; }
virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
virtual uint match_edge(uint idx) const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value(PhaseTransform *phase) const;
};
class MemBarNode: public MultiNode {
virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const ; // Always fail, except on self
virtual uint size_of() const { return sizeof(*this); }
const TypePtr* _adr_type;
enum {
Standalone,
TrailingLoad,
TrailingStore,
LeadingStore,
TrailingLoadStore,
LeadingLoadStore
} _kind;
#ifdef ASSERT
uint _pair_idx;
#endif
public:
enum {
Precedent = TypeFunc::Parms // optional edge to force precedence
};
MemBarNode(Compile* C, int alias_idx, Node* precedent);
virtual int Opcode() const = 0;
virtual const class TypePtr *adr_type() const { return _adr_type; }
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint match_edge(uint idx) const { return 0; }
virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
virtual Node *match( const ProjNode *proj, const Matcher *m );
static MemBarNode* make(Compile* C, int opcode,
int alias_idx = Compile::AliasIdxBot,
Node* precedent = NULL);
MemBarNode* trailing_membar() const;
MemBarNode* leading_membar() const;
void set_trailing_load() { _kind = TrailingLoad; }
bool trailing_load() const { return _kind == TrailingLoad; }
bool trailing_store() const { return _kind == TrailingStore; }
bool leading_store() const { return _kind == LeadingStore; }
bool trailing_load_store() const { return _kind == TrailingLoadStore; }
bool leading_load_store() const { return _kind == LeadingLoadStore; }
bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
bool standalone() const { return _kind == Standalone; }
static void set_store_pair(MemBarNode* leading, MemBarNode* trailing);
static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing);
void remove(PhaseIterGVN *igvn);
};
class MemBarAcquireNode: public MemBarNode {
public:
MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
: MemBarNode(C, alias_idx, precedent) {}
virtual int Opcode() const;
};
class LoadFenceNode: public MemBarNode {
public:
LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
: MemBarNode(C, alias_idx, precedent) {}
virtual int Opcode() const;
};
class MemBarReleaseNode: public MemBarNode {
public:
MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
: MemBarNode(C, alias_idx, precedent) {}
virtual int Opcode() const;
};
class StoreFenceNode: public MemBarNode {
public:
StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
: MemBarNode(C, alias_idx, precedent) {}
virtual int Opcode() const;
};
class MemBarAcquireLockNode: public MemBarNode {
public:
MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
: MemBarNode(C, alias_idx, precedent) {}
virtual int Opcode() const;
};
class MemBarReleaseLockNode: public MemBarNode {
public:
MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
: MemBarNode(C, alias_idx, precedent) {}
virtual int Opcode() const;
};
class MemBarStoreStoreNode: public MemBarNode {
public:
MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
: MemBarNode(C, alias_idx, precedent) {
init_class_id(Class_MemBarStoreStore);
}
virtual int Opcode() const;
};
class MemBarVolatileNode: public MemBarNode {
public:
MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
: MemBarNode(C, alias_idx, precedent) {}
virtual int Opcode() const;
};
class MemBarCPUOrderNode: public MemBarNode {
public:
MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
: MemBarNode(C, alias_idx, precedent) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return 0; } // not matched in the AD file
};
class InitializeNode: public MemBarNode {
friend class AllocateNode;
enum {
Incomplete = 0,
Complete = 1,
WithArraycopy = 2
};
int _is_complete;
bool _does_not_escape;
public:
enum {
Control = TypeFunc::Control,
Memory = TypeFunc::Memory, // MergeMem for states affected by this op
RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address
RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP)
};
InitializeNode(Compile* C, int adr_type, Node* rawoop);
virtual int Opcode() const;
virtual uint size_of() const { return sizeof(*this); }
virtual uint ideal_reg() const { return 0; } // not matched in the AD file
virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress
Node* memory(uint alias_idx);
Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
AllocateNode* allocation();
bool is_non_zero();
bool is_complete() { return _is_complete != Incomplete; }
bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
void set_complete(PhaseGVN* phase);
void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
bool does_not_escape() { return _does_not_escape; }
void set_does_not_escape() { _does_not_escape = true; }
#ifdef ASSERT
bool stores_are_sane(PhaseTransform* phase);
#endif //ASSERT
intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape);
Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape);
Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
intptr_t header_size, Node* size_in_bytes,
PhaseGVN* phase);
private:
void remove_extra_zeroes();
int captured_store_insertion_point(intptr_t start, int size_in_bytes,
PhaseTransform* phase);
static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
bool detect_init_independence(Node* n, int& count);
void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
PhaseGVN* phase);
intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
};
class MergeMemNode: public Node {
virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const ; // Always fail, except on self
friend class MergeMemStream;
MergeMemNode(Node* def); // clients use MergeMemNode::make
public:
static MergeMemNode* make(Compile* C, Node* base_memory);
virtual int Opcode() const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual uint ideal_reg() const { return NotAMachineReg; }
virtual uint match_edge(uint idx) const { return 0; }
virtual const RegMask &out_RegMask() const;
virtual const Type *bottom_type() const { return Type::MEMORY; }
virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
Node* memory_at(uint alias_idx) const;
void set_memory_at(uint alias_idx, Node* n);
Node* base_memory() const { return in(Compile::AliasIdxBot); }
void set_base_memory(Node* def);
Node* empty_memory() const { return in(Compile::AliasIdxTop); }
static Node* make_empty_memory(); // where the sentinel comes from
bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
void iteration_setup(const MergeMemNode* other = NULL);
void grow_to_match(const MergeMemNode* other);
bool verify_sparse() const PRODUCT_RETURN0;
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
};
class MergeMemStream : public StackObj {
private:
MergeMemNode* _mm;
const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations
Node* _mm_base; // loop-invariant base memory of _mm
int _idx;
int _cnt;
Node* _mem;
Node* _mem2;
int _cnt2;
void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
assert(mm->verify_sparse(), "please, no dups of base");
assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
_mm = mm;
_mm_base = mm->base_memory();
_mm2 = mm2;
_cnt = mm->req();
_idx = Compile::AliasIdxBot-1; // start at the base memory
_mem = NULL;
_mem2 = NULL;
}
#ifdef ASSERT
Node* check_memory() const {
if (at_base_memory())
return _mm->base_memory();
else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
return _mm->memory_at(_idx);
else
return _mm_base;
}
Node* check_memory2() const {
return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
}
#endif
static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
void assert_synch() const {
assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
"no side-effects except through the stream");
}
public:
MergeMemStream(MergeMemNode* mm) {
mm->iteration_setup();
init(mm);
debug_only(_cnt2 = 999);
}
MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
assert(mm2, "second argument must be a MergeMem also");
((MergeMemNode*)mm2)->iteration_setup(); // update hidden state
mm->iteration_setup(mm2);
init(mm, mm2);
_cnt2 = mm2->req();
}
#ifdef ASSERT
~MergeMemStream() {
assert_synch();
}
#endif
MergeMemNode* all_memory() const {
return _mm;
}
Node* base_memory() const {
assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
return _mm_base;
}
const MergeMemNode* all_memory2() const {
assert(_mm2 != NULL, "");
return _mm2;
}
bool at_base_memory() const {
return _idx == Compile::AliasIdxBot;
}
int alias_idx() const {
assert(_mem, "must call next 1st");
return _idx;
}
const TypePtr* adr_type() const {
return Compile::current()->get_adr_type(alias_idx());
}
const TypePtr* adr_type(Compile* C) const {
return C->get_adr_type(alias_idx());
}
bool is_empty() const {
assert(_mem, "must call next 1st");
assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
return _mem->is_top();
}
bool is_empty2() const {
assert(_mem2, "must call next 1st");
assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
return _mem2->is_top();
}
Node* memory() const {
assert(!is_empty(), "must not be empty");
assert_synch();
return _mem;
}
Node* force_memory() const {
assert(!is_empty() || !at_base_memory(), "");
Node *mem = _mem->is_top() ? _mm_base : _mem;
assert(mem == check_memory(), "");
return mem;
}
Node* memory2() const {
assert(_mem2 == check_memory2(), "");
return _mem2;
}
void set_memory(Node* mem) {
if (at_base_memory()) {
_mm->set_base_memory(mem);
} else {
_mm->set_memory_at(_idx, mem);
}
_mem = mem;
assert_synch();
}
void set_memory() {
_mem = _mm->in(_idx);
}
bool next() { return next(false); }
bool next2() { return next(true); }
bool next_non_empty() { return next_non_empty(false); }
bool next_non_empty2() { return next_non_empty(true); }
private:
bool next(bool have_mm2) {
assert((_mm2 != NULL) == have_mm2, "use other next");
assert_synch();
if (++_idx < _cnt) {
_mem = _mm->in(_idx);
if (have_mm2)
_mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
return true;
}
return false;
}
bool next_non_empty(bool have_mm2) {
while (next(have_mm2)) {
if (!is_empty()) {
if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory();
return true;
} else if (have_mm2 && !is_empty2()) {
return true; // is_empty() == true
}
}
return false;
}
};
class PrefetchReadNode : public Node {
public:
PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return NotAMachineReg; }
virtual uint match_edge(uint idx) const { return idx==2; }
virtual const Type *bottom_type() const { return Type::ABIO; }
};
class PrefetchWriteNode : public Node {
public:
PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return NotAMachineReg; }
virtual uint match_edge(uint idx) const { return idx==2; }
virtual const Type *bottom_type() const { return Type::ABIO; }
};
class PrefetchAllocationNode : public Node {
public:
PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {}
virtual int Opcode() const;
virtual uint ideal_reg() const { return NotAMachineReg; }
virtual uint match_edge(uint idx) const { return idx==2; }
virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
};
#endif // SHARE_VM_OPTO_MEMNODE_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/mulnode.cpp
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/connode.hpp"
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/subnode.hpp"
uint MulNode::hash() const {
return (uintptr_t)in(1) + (uintptr_t)in(2) + Opcode();
}
Node *MulNode::Identity( PhaseTransform *phase ) {
register const Type *one = mul_id(); // The multiplicative identity
if( phase->type( in(1) )->higher_equal( one ) ) return in(2);
if( phase->type( in(2) )->higher_equal( one ) ) return in(1);
return this;
}
Node *MulNode::Ideal(PhaseGVN *phase, bool can_reshape) {
const Type *t1 = phase->type( in(1) );
const Type *t2 = phase->type( in(2) );
Node *progress = NULL; // Progress flag
if( !(t2->singleton() ||
(in(2)->is_Load() && !(t1->singleton() || in(1)->is_Load())) ) ) {
if( t1->singleton() || // Left input is a constant?
(in(1)->_idx > in(2)->_idx) ) {
swap_edges(1, 2);
const Type *t = t1;
t1 = t2;
t2 = t;
progress = this; // Made progress
}
}
uint op = Opcode();
if( t2->singleton() && // Right input is a constant?
op != Op_MulF && // Float & double cannot reassociate
op != Op_MulD ) {
if( t2 == Type::TOP ) return NULL;
Node *mul1 = in(1);
#ifdef ASSERT
int op1 = mul1->Opcode();
if( phase->eqv( mul1, this ) || phase->eqv( in(2), this ) ||
( op1 == mul_opcode() || op1 == add_opcode() ) &&
( phase->eqv( mul1->in(1), this ) || phase->eqv( mul1->in(2), this ) ||
phase->eqv( mul1->in(1), mul1 ) || phase->eqv( mul1->in(2), mul1 ) ) )
assert(false, "dead loop in MulNode::Ideal");
#endif
if( mul1->Opcode() == mul_opcode() ) { // Left input is a multiply?
const Type *t12 = phase->type( mul1->in(2) );
if( t12->singleton() && t12 != Type::TOP) { // Left input is an add of a constant?
const Type *tcon01 = ((MulNode*)mul1)->mul_ring(t2,t12);
if( tcon01->singleton() ) {
set_req(1, mul1->in(1));
set_req(2, phase->makecon( tcon01 ));
t2 = tcon01;
progress = this; // Made progress
}
}
}
const Node *add1 = in(1);
if( add1->Opcode() == add_opcode() ) { // Left input is an add?
const Type *t12 = phase->type( add1->in(2) );
if( t12->singleton() && t12 != Type::TOP ) { // Left input is an add of a constant?
assert( add1->in(1) != add1, "dead loop in MulNode::Ideal" );
const Type *tcon01 = mul_ring(t2,t12);
if( tcon01->singleton() ) {
Node *mul = clone(); // mul = ()*con0
mul->set_req(1,add1->in(1)); // mul = X*con0
mul = phase->transform(mul);
Node *add2 = add1->clone();
add2->set_req(1, mul); // X*con0 + con0*con1
add2->set_req(2, phase->makecon(tcon01) );
progress = add2;
}
}
} // End of is left input an add
} // End of is right input a Mul
return progress;
}
const Type *MulNode::Value( PhaseTransform *phase ) const {
const Type *t1 = phase->type( in(1) );
const Type *t2 = phase->type( in(2) );
if( t1 == Type::TOP ) return Type::TOP;
if( t2 == Type::TOP ) return Type::TOP;
int op = Opcode();
if( op == Op_MulI || op == Op_AndI || op == Op_MulL || op == Op_AndL ) {
const Type *zero = add_id(); // The multiplicative zero
if( t1->higher_equal( zero ) ) return zero;
if( t2->higher_equal( zero ) ) return zero;
}
if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
return bottom_type();
#if defined(IA32)
if (op == Op_MulD && phase->C->method()->is_strict()) {
return TypeD::DOUBLE;
}
#endif
return mul_ring(t1,t2); // Local flavor of type multiplication
}
Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) {
jint con;
if ((con = in(1)->find_int_con(0)) != 0) {
swap_edges(1, 2);
} else if ((con = in(2)->find_int_con(0)) == 0) {
return MulNode::Ideal(phase, can_reshape);
}
if (con == 0) return NULL; // By zero is handled by Value call
if (con == 1) return NULL; // By one is handled by Identity call
bool sign_flip = false;
unsigned int abs_con = uabs(con);
if (abs_con != (unsigned int)con) {
sign_flip = true;
}
Node *res = NULL;
unsigned int bit1 = abs_con & (0-abs_con); // Extract low bit
if (bit1 == abs_con) { // Found a power of 2?
res = new (phase->C) LShiftINode(in(1), phase->intcon(log2_uint(bit1)));
} else {
unsigned int bit2 = abs_con-bit1;
bit2 = bit2 & (0-bit2); // Extract 2nd bit
if (bit2 + bit1 == abs_con) { // Found all bits in con?
Node *n1 = phase->transform( new (phase->C) LShiftINode(in(1), phase->intcon(log2_uint(bit1))));
Node *n2 = phase->transform( new (phase->C) LShiftINode(in(1), phase->intcon(log2_uint(bit2))));
res = new (phase->C) AddINode(n2, n1);
} else if (is_power_of_2(abs_con+1)) {
unsigned int temp = abs_con + 1;
Node *n1 = phase->transform(new (phase->C) LShiftINode(in(1), phase->intcon(log2_uint(temp))));
res = new (phase->C) SubINode(n1, in(1));
} else {
return MulNode::Ideal(phase, can_reshape);
}
}
if (sign_flip) { // Need to negate result?
res = phase->transform(res);// Transform, before making the zero con
res = new (phase->C) SubINode(phase->intcon(0),res);
}
return res; // Return final result
}
const Type *MulINode::mul_ring(const Type *t0, const Type *t1) const {
const TypeInt *r0 = t0->is_int(); // Handy access
const TypeInt *r1 = t1->is_int();
int32 lo0 = r0->_lo;
double a = (double)lo0;
int32 hi0 = r0->_hi;
double b = (double)hi0;
int32 lo1 = r1->_lo;
double c = (double)lo1;
int32 hi1 = r1->_hi;
double d = (double)hi1;
int32 A = java_multiply(lo0, lo1);
if( (double)A != a*c ) return TypeInt::INT; // Overflow?
int32 B = java_multiply(lo0, hi1);
if( (double)B != a*d ) return TypeInt::INT; // Overflow?
int32 C = java_multiply(hi0, lo1);
if( (double)C != b*c ) return TypeInt::INT; // Overflow?
int32 D = java_multiply(hi0, hi1);
if( (double)D != b*d ) return TypeInt::INT; // Overflow?
if( A < B ) { lo0 = A; hi0 = B; } // Sort range endpoints
else { lo0 = B; hi0 = A; }
if( C < D ) {
if( C < lo0 ) lo0 = C;
if( D > hi0 ) hi0 = D;
} else {
if( D < lo0 ) lo0 = D;
if( C > hi0 ) hi0 = C;
}
return TypeInt::make(lo0, hi0, MAX2(r0->_widen,r1->_widen));
}
Node *MulLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
jlong con;
if ((con = in(1)->find_long_con(0)) != 0) {
swap_edges(1, 2);
} else if ((con = in(2)->find_long_con(0)) == 0) {
return MulNode::Ideal(phase, can_reshape);
}
if (con == CONST64(0)) return NULL; // By zero is handled by Value call
if (con == CONST64(1)) return NULL; // By one is handled by Identity call
bool sign_flip = false;
julong abs_con = uabs(con);
if (abs_con != (julong)con) {
sign_flip = true;
}
Node *res = NULL;
julong bit1 = abs_con & (0-abs_con); // Extract low bit
if (bit1 == abs_con) { // Found a power of 2?
res = new (phase->C) LShiftLNode(in(1), phase->intcon(log2_long(bit1)));
} else {
julong bit2 = abs_con-bit1;
bit2 = bit2 & (0-bit2); // Extract 2nd bit
if (bit2 + bit1 == abs_con) { // Found all bits in con?
Node *n1 = phase->transform(new (phase->C) LShiftLNode(in(1), phase->intcon(log2_long(bit1))));
Node *n2 = phase->transform(new (phase->C) LShiftLNode(in(1), phase->intcon(log2_long(bit2))));
res = new (phase->C) AddLNode(n2, n1);
} else if (is_power_of_2_long(abs_con+1)) {
julong temp = abs_con + 1;
Node *n1 = phase->transform( new (phase->C) LShiftLNode(in(1), phase->intcon(log2_long(temp))));
res = new (phase->C) SubLNode(n1, in(1));
} else {
return MulNode::Ideal(phase, can_reshape);
}
}
if (sign_flip) { // Need to negate result?
res = phase->transform(res);// Transform, before making the zero con
res = new (phase->C) SubLNode(phase->longcon(0),res);
}
return res; // Return final result
}
const Type *MulLNode::mul_ring(const Type *t0, const Type *t1) const {
const TypeLong *r0 = t0->is_long(); // Handy access
const TypeLong *r1 = t1->is_long();
jlong lo0 = r0->_lo;
double a = (double)lo0;
jlong hi0 = r0->_hi;
double b = (double)hi0;
jlong lo1 = r1->_lo;
double c = (double)lo1;
jlong hi1 = r1->_hi;
double d = (double)hi1;
jlong A = java_multiply(lo0, lo1);
if( (double)A != a*c ) return TypeLong::LONG; // Overflow?
jlong B = java_multiply(lo0, hi1);
if( (double)B != a*d ) return TypeLong::LONG; // Overflow?
jlong C = java_multiply(hi0, lo1);
if( (double)C != b*c ) return TypeLong::LONG; // Overflow?
jlong D = java_multiply(hi0, hi1);
if( (double)D != b*d ) return TypeLong::LONG; // Overflow?
if( A < B ) { lo0 = A; hi0 = B; } // Sort range endpoints
else { lo0 = B; hi0 = A; }
if( C < D ) {
if( C < lo0 ) lo0 = C;
if( D > hi0 ) hi0 = D;
} else {
if( D < lo0 ) lo0 = D;
if( C > hi0 ) hi0 = C;
}
return TypeLong::make(lo0, hi0, MAX2(r0->_widen,r1->_widen));
}
const Type *MulFNode::mul_ring(const Type *t0, const Type *t1) const {
if( t0 == Type::FLOAT || t1 == Type::FLOAT ) return Type::FLOAT;
return TypeF::make( t0->getf() * t1->getf() );
}
const Type *MulDNode::mul_ring(const Type *t0, const Type *t1) const {
if( t0 == Type::DOUBLE || t1 == Type::DOUBLE ) return Type::DOUBLE;
return TypeD::make( t0->getd() * t1->getd() );
}
const Type *MulHiLNode::Value( PhaseTransform *phase ) const {
const Type *t1 = phase->type( in(1) );
const Type *t2 = phase->type( in(2) );
if( t1 == Type::TOP ) return Type::TOP;
if( t2 == Type::TOP ) return Type::TOP;
const Type *bot = bottom_type();
if( (t1 == bot) || (t2 == bot) ||
(t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
return bot;
return TypeLong::LONG;
}
const Type *AndINode::mul_ring( const Type *t0, const Type *t1 ) const {
const TypeInt *r0 = t0->is_int(); // Handy access
const TypeInt *r1 = t1->is_int();
int widen = MAX2(r0->_widen,r1->_widen);
if( !r0->is_con() && !r1->is_con() )
return TypeInt::INT; // No constants to be had
if( r0->is_con() && r1->is_con() )
return TypeInt::make( r0->get_con() & r1->get_con() );
if( r0->is_con() && r0->get_con() > 0 )
return TypeInt::make(0, r0->get_con(), widen);
if( r1->is_con() && r1->get_con() > 0 )
return TypeInt::make(0, r1->get_con(), widen);
if( r0 == TypeInt::BOOL || r1 == TypeInt::BOOL ) {
return TypeInt::BOOL;
}
return TypeInt::INT; // No constants to be had
}
Node *AndINode::Identity( PhaseTransform *phase ) {
if (phase->eqv(in(1), in(2))) return in(1);
Node* in1 = in(1);
uint op = in1->Opcode();
const TypeInt* t2 = phase->type(in(2))->isa_int();
if (t2 && t2->is_con()) {
int con = t2->get_con();
const TypeInt* t1 = phase->type( in(1) )->isa_int();
if (t1 != NULL && t1->_lo >= 0) {
jint t1_support = right_n_bits(1 + log2_jint(t1->_hi));
if ((t1_support & con) == t1_support)
return in1;
}
if (op == Op_URShiftI) {
const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
if (t12 && t12->is_con()) { // Shift is by a constant
int shift = t12->get_con();
shift &= BitsPerJavaInteger - 1; // semantics of Java shifts
int mask = max_juint >> shift;
if ((mask & con) == mask) // If AND is useless, skip it
return in1;
}
}
}
return MulNode::Identity(phase);
}
Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) {
const TypeInt *t2 = phase->type( in(2) )->isa_int();
if( !t2 || !t2->is_con() ) return MulNode::Ideal(phase, can_reshape);
const int mask = t2->get_con();
Node *load = in(1);
uint lop = load->Opcode();
if( lop == Op_LoadUS &&
(mask & 0xFFFF0000) ) // Can we make a smaller mask?
return new (phase->C) AndINode(load,phase->intcon(mask&0xFFFF));
if (can_reshape &&
load->outcnt() == 1 && load->unique_out() == this) {
if (lop == Op_LoadS && (mask & 0xFFFF0000) == 0 ) {
Node *ldus = new (phase->C) LoadUSNode(load->in(MemNode::Control),
load->in(MemNode::Memory),
load->in(MemNode::Address),
load->adr_type(),
TypeInt::CHAR, MemNode::unordered);
ldus = phase->transform(ldus);
return new (phase->C) AndINode(ldus, phase->intcon(mask & 0xFFFF));
}
if (lop == Op_LoadB && (mask & 0xFFFFFF00) == 0) {
Node* ldub = new (phase->C) LoadUBNode(load->in(MemNode::Control),
load->in(MemNode::Memory),
load->in(MemNode::Address),
load->adr_type(),
TypeInt::UBYTE, MemNode::unordered);
ldub = phase->transform(ldub);
return new (phase->C) AndINode(ldub, phase->intcon(mask));
}
}
if( lop == Op_RShiftI ) {
const TypeInt *t12 = phase->type(load->in(2))->isa_int();
if( t12 && t12->is_con() ) { // Shift is by a constant
int shift = t12->get_con();
shift &= BitsPerJavaInteger-1; // semantics of Java shifts
const int sign_bits_mask = ~right_n_bits(BitsPerJavaInteger - shift);
if( (sign_bits_mask & mask) == 0 ) {
Node *zshift = phase->transform(new (phase->C) URShiftINode(load->in(1),load->in(2)));
return new (phase->C) AndINode( zshift, in(2) );
}
}
}
if( lop == Op_SubI && mask == 1 && load->in(1) &&
phase->type(load->in(1)) == TypeInt::ZERO )
return new (phase->C) AndINode( load->in(2), in(2) );
return MulNode::Ideal(phase, can_reshape);
}
const Type *AndLNode::mul_ring( const Type *t0, const Type *t1 ) const {
const TypeLong *r0 = t0->is_long(); // Handy access
const TypeLong *r1 = t1->is_long();
int widen = MAX2(r0->_widen,r1->_widen);
if( !r0->is_con() && !r1->is_con() )
return TypeLong::LONG; // No constants to be had
if( r0->is_con() && r1->is_con() )
return TypeLong::make( r0->get_con() & r1->get_con() );
if( r0->is_con() && r0->get_con() > 0 )
return TypeLong::make(CONST64(0), r0->get_con(), widen);
if( r1->is_con() && r1->get_con() > 0 )
return TypeLong::make(CONST64(0), r1->get_con(), widen);
return TypeLong::LONG; // No constants to be had
}
Node *AndLNode::Identity( PhaseTransform *phase ) {
if (phase->eqv(in(1), in(2))) return in(1);
Node *usr = in(1);
const TypeLong *t2 = phase->type( in(2) )->isa_long();
if( t2 && t2->is_con() ) {
jlong con = t2->get_con();
const TypeLong* t1 = phase->type( in(1) )->isa_long();
if (t1 != NULL && t1->_lo >= 0) {
int bit_count = log2_long(t1->_hi) + 1;
jlong t1_support = jlong(max_julong >> (BitsPerJavaLong - bit_count));
if ((t1_support & con) == t1_support)
return usr;
}
uint lop = usr->Opcode();
if( lop == Op_URShiftL ) {
const TypeInt *t12 = phase->type( usr->in(2) )->isa_int();
if( t12 && t12->is_con() ) { // Shift is by a constant
int shift = t12->get_con();
shift &= BitsPerJavaLong - 1; // semantics of Java shifts
jlong mask = max_julong >> shift;
if( (mask&con) == mask ) // If AND is useless, skip it
return usr;
}
}
}
return MulNode::Identity(phase);
}
Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
const TypeLong *t2 = phase->type( in(2) )->isa_long();
if( !t2 || !t2->is_con() ) return MulNode::Ideal(phase, can_reshape);
const jlong mask = t2->get_con();
Node* in1 = in(1);
uint op = in1->Opcode();
if (op == Op_ConvI2L && (mask & CONST64(0xFFFFFFFF80000000)) == 0) {
Node* andi = new (phase->C) AndINode(in1->in(1), phase->intcon(mask));
andi = phase->transform(andi);
return new (phase->C) ConvI2LNode(andi);
}
if (op == Op_RShiftL) {
const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
if( t12 && t12->is_con() ) { // Shift is by a constant
int shift = t12->get_con();
shift &= BitsPerJavaLong - 1; // semantics of Java shifts
const jlong sign_bits_mask = ~(((jlong)CONST64(1) << (jlong)(BitsPerJavaLong - shift)) -1);
if( (sign_bits_mask & mask) == 0 ) {
Node *zshift = phase->transform(new (phase->C) URShiftLNode(in1->in(1), in1->in(2)));
return new (phase->C) AndLNode(zshift, in(2));
}
}
}
return MulNode::Ideal(phase, can_reshape);
}
Node *LShiftINode::Identity( PhaseTransform *phase ) {
const TypeInt *ti = phase->type( in(2) )->isa_int(); // shift count is an int
return ( ti && ti->is_con() && ( ti->get_con() & ( BitsPerInt - 1 ) ) == 0 ) ? in(1) : this;
}
Node *LShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
const Type *t = phase->type( in(2) );
if( t == Type::TOP ) return NULL; // Right input is dead
const TypeInt *t2 = t->isa_int();
if( !t2 || !t2->is_con() ) return NULL; // Right input is a constant
const int con = t2->get_con() & ( BitsPerInt - 1 ); // masked shift count
if ( con == 0 ) return NULL; // let Identity() handle 0 shift count
Node *add1 = in(1);
int add1_op = add1->Opcode();
if( add1_op == Op_AddI ) { // Left input is an add?
assert( add1 != add1->in(1), "dead loop in LShiftINode::Ideal" );
const TypeInt *t12 = phase->type(add1->in(2))->isa_int();
if( t12 && t12->is_con() ){ // Left input is an add of a con?
if( con < 16 ) {
Node *lsh = phase->transform( new (phase->C) LShiftINode( add1->in(1), in(2) ) );
return new (phase->C) AddINode( lsh, phase->intcon(t12->get_con() << con));
}
}
}
if( (add1_op == Op_RShiftI || add1_op == Op_URShiftI ) &&
add1->in(2) == in(2) )
return new (phase->C) AndINode(add1->in(1),phase->intcon( -(1<<con)));
if( add1_op == Op_AndI ) {
Node *add2 = add1->in(1);
int add2_op = add2->Opcode();
if( (add2_op == Op_RShiftI || add2_op == Op_URShiftI ) &&
add2->in(2) == in(2) ) {
Node *y_sh = phase->transform( new (phase->C) LShiftINode( add1->in(2), in(2) ) );
return new (phase->C) AndINode( add2->in(1), y_sh );
}
}
const jint bits_mask = right_n_bits(BitsPerJavaInteger-con);
if( add1_op == Op_AndI &&
phase->type(add1->in(2)) == TypeInt::make( bits_mask ) )
return new (phase->C) LShiftINode( add1->in(1), in(2) );
return NULL;
}
const Type *LShiftINode::Value( PhaseTransform *phase ) const {
const Type *t1 = phase->type( in(1) );
const Type *t2 = phase->type( in(2) );
if( t1 == Type::TOP ) return Type::TOP;
if( t2 == Type::TOP ) return Type::TOP;
if( t1 == TypeInt::ZERO ) return TypeInt::ZERO;
if( t2 == TypeInt::ZERO ) return t1;
if( (t1 == TypeInt::INT) || (t2 == TypeInt::INT) ||
(t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
return TypeInt::INT;
const TypeInt *r1 = t1->is_int(); // Handy access
const TypeInt *r2 = t2->is_int(); // Handy access
if (!r2->is_con())
return TypeInt::INT;
uint shift = r2->get_con();
shift &= BitsPerJavaInteger-1; // semantics of Java shifts
if (shift == 0) return t1;
if (!r1->is_con()) {
jint lo = r1->_lo, hi = r1->_hi;
if (((lo << shift) >> shift) == lo &&
((hi << shift) >> shift) == hi) {
return TypeInt::make((jint)lo << (jint)shift,
(jint)hi << (jint)shift,
MAX2(r1->_widen,r2->_widen));
}
return TypeInt::INT;
}
return TypeInt::make( (jint)r1->get_con() << (jint)shift );
}
Node *LShiftLNode::Identity( PhaseTransform *phase ) {
const TypeInt *ti = phase->type( in(2) )->isa_int(); // shift count is an int
return ( ti && ti->is_con() && ( ti->get_con() & ( BitsPerLong - 1 ) ) == 0 ) ? in(1) : this;
}
Node *LShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
const Type *t = phase->type( in(2) );
if( t == Type::TOP ) return NULL; // Right input is dead
const TypeInt *t2 = t->isa_int();
if( !t2 || !t2->is_con() ) return NULL; // Right input is a constant
const int con = t2->get_con() & ( BitsPerLong - 1 ); // masked shift count
if ( con == 0 ) return NULL; // let Identity() handle 0 shift count
Node *add1 = in(1);
int add1_op = add1->Opcode();
if( add1_op == Op_AddL ) { // Left input is an add?
assert( add1 != add1->in(1), "dead loop in LShiftLNode::Ideal" );
const TypeLong *t12 = phase->type(add1->in(2))->isa_long();
if( t12 && t12->is_con() ){ // Left input is an add of a con?
Node *lsh = phase->transform( new (phase->C) LShiftLNode( add1->in(1), in(2) ) );
return new (phase->C) AddLNode( lsh, phase->longcon(t12->get_con() << con));
}
}
if( (add1_op == Op_RShiftL || add1_op == Op_URShiftL ) &&
add1->in(2) == in(2) )
return new (phase->C) AndLNode(add1->in(1),phase->longcon( -(CONST64(1)<<con)));
if( add1_op == Op_AndL ) {
Node *add2 = add1->in(1);
int add2_op = add2->Opcode();
if( (add2_op == Op_RShiftL || add2_op == Op_URShiftL ) &&
add2->in(2) == in(2) ) {
Node *y_sh = phase->transform( new (phase->C) LShiftLNode( add1->in(2), in(2) ) );
return new (phase->C) AndLNode( add2->in(1), y_sh );
}
}
const jlong bits_mask = jlong(max_julong >> con);
if( add1_op == Op_AndL &&
phase->type(add1->in(2)) == TypeLong::make( bits_mask ) )
return new (phase->C) LShiftLNode( add1->in(1), in(2) );
return NULL;
}
const Type *LShiftLNode::Value( PhaseTransform *phase ) const {
const Type *t1 = phase->type( in(1) );
const Type *t2 = phase->type( in(2) );
if( t1 == Type::TOP ) return Type::TOP;
if( t2 == Type::TOP ) return Type::TOP;
if( t1 == TypeLong::ZERO ) return TypeLong::ZERO;
if( t2 == TypeInt::ZERO ) return t1;
if( (t1 == TypeLong::LONG) || (t2 == TypeInt::INT) ||
(t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
return TypeLong::LONG;
const TypeLong *r1 = t1->is_long(); // Handy access
const TypeInt *r2 = t2->is_int(); // Handy access
if (!r2->is_con())
return TypeLong::LONG;
uint shift = r2->get_con();
shift &= BitsPerJavaLong - 1; // semantics of Java shifts
if (shift == 0) return t1;
if (!r1->is_con()) {
jlong lo = r1->_lo, hi = r1->_hi;
if (((lo << shift) >> shift) == lo &&
((hi << shift) >> shift) == hi) {
return TypeLong::make((jlong)lo << (jint)shift,
(jlong)hi << (jint)shift,
MAX2(r1->_widen,r2->_widen));
}
return TypeLong::LONG;
}
return TypeLong::make( (jlong)r1->get_con() << (jint)shift );
}
Node *RShiftINode::Identity( PhaseTransform *phase ) {
const TypeInt *t2 = phase->type(in(2))->isa_int();
if( !t2 ) return this;
if ( t2->is_con() && ( t2->get_con() & ( BitsPerInt - 1 ) ) == 0 )
return in(1);
if( in(1)->Opcode() == Op_LShiftI &&
in(1)->req() == 3 &&
in(1)->in(2) == in(2) &&
t2->is_con() ) {
uint shift = t2->get_con();
shift &= BitsPerJavaInteger-1; // semantics of Java shifts
int lo = (-1 << (BitsPerJavaInteger - shift-1)); // FFFF8000
int hi = ~lo; // 00007FFF
const TypeInt *t11 = phase->type(in(1)->in(1))->isa_int();
if( !t11 ) return this;
if( lo <= t11->_lo && t11->_hi <= hi )
return in(1)->in(1); // Then shifting is a nop
}
return this;
}
Node *RShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
const TypeInt *t1 = phase->type( in(1) )->isa_int();
if( !t1 ) return NULL; // Left input is an integer
const TypeInt *t2 = phase->type( in(2) )->isa_int();
if( !t2 || !t2->is_con() ) return NULL; // Right input is a constant
const TypeInt *t3; // type of in(1).in(2)
int shift = t2->get_con();
shift &= BitsPerJavaInteger-1; // semantics of Java shifts
if ( shift == 0 ) return NULL; // let Identity() handle 0 shift count
const Node *mask = in(1);
if( mask->Opcode() == Op_AndI &&
(t3 = phase->type(mask->in(2))->isa_int()) &&
t3->is_con() ) {
Node *x = mask->in(1);
jint maskbits = t3->get_con();
Node *shr_nomask = phase->transform( new (phase->C) RShiftINode(mask->in(1), in(2)) );
return new (phase->C) AndINode(shr_nomask, phase->intcon( maskbits >> shift));
}
const Node *shl = in(1);
if( shl->Opcode() != Op_LShiftI ) return NULL;
if( shift == 16 &&
(t3 = phase->type(shl->in(2))->isa_int()) &&
t3->is_con(16) ) {
Node *ld = shl->in(1);
if( ld->Opcode() == Op_LoadS ) {
set_req(1, ld);
set_req(2, phase->intcon(0));
return this;
}
else if( can_reshape &&
ld->Opcode() == Op_LoadUS &&
ld->outcnt() == 1 && ld->unique_out() == shl)
return new (phase->C) LoadSNode( ld->in(MemNode::Control),
ld->in(MemNode::Memory),
ld->in(MemNode::Address),
ld->adr_type(), TypeInt::SHORT,
MemNode::unordered);
}
if( shift == 24 &&
(t3 = phase->type(shl->in(2))->isa_int()) &&
t3->is_con(24) ) {
Node *ld = shl->in(1);
if( ld->Opcode() == Op_LoadB ) {
set_req(1, ld);
set_req(2, phase->intcon(0));
return this;
}
}
return NULL;
}
const Type *RShiftINode::Value( PhaseTransform *phase ) const {
const Type *t1 = phase->type( in(1) );
const Type *t2 = phase->type( in(2) );
if( t1 == Type::TOP ) return Type::TOP;
if( t2 == Type::TOP ) return Type::TOP;
if( t1 == TypeInt::ZERO ) return TypeInt::ZERO;
if( t2 == TypeInt::ZERO ) return t1;
if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
return TypeInt::INT;
if (t2 == TypeInt::INT)
return TypeInt::INT;
const TypeInt *r1 = t1->is_int(); // Handy access
const TypeInt *r2 = t2->is_int(); // Handy access
if (r2->is_con()) {
uint shift = r2->get_con();
shift &= BitsPerJavaInteger-1; // semantics of Java shifts
if (shift == 0) return t1;
jint lo = (jint)r1->_lo >> (jint)shift;
jint hi = (jint)r1->_hi >> (jint)shift;
assert(lo <= hi, "must have valid bounds");
const TypeInt* ti = TypeInt::make(lo, hi, MAX2(r1->_widen,r2->_widen));
#ifdef ASSERT
if (shift == BitsPerJavaInteger-1) {
if (r1->_lo >= 0) assert(ti == TypeInt::ZERO, ">>31 of + is 0");
if (r1->_hi < 0) assert(ti == TypeInt::MINUS_1, ">>31 of - is -1");
}
#endif
return ti;
}
if( !r1->is_con() || !r2->is_con() )
return TypeInt::INT;
return TypeInt::make( r1->get_con() >> (r2->get_con()&31) );
}
Node *RShiftLNode::Identity( PhaseTransform *phase ) {
const TypeInt *ti = phase->type( in(2) )->isa_int(); // shift count is an int
return ( ti && ti->is_con() && ( ti->get_con() & ( BitsPerLong - 1 ) ) == 0 ) ? in(1) : this;
}
const Type *RShiftLNode::Value( PhaseTransform *phase ) const {
const Type *t1 = phase->type( in(1) );
const Type *t2 = phase->type( in(2) );
if( t1 == Type::TOP ) return Type::TOP;
if( t2 == Type::TOP ) return Type::TOP;
if( t1 == TypeLong::ZERO ) return TypeLong::ZERO;
if( t2 == TypeInt::ZERO ) return t1;
if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
return TypeLong::LONG;
if (t2 == TypeInt::INT)
return TypeLong::LONG;
const TypeLong *r1 = t1->is_long(); // Handy access
const TypeInt *r2 = t2->is_int (); // Handy access
if (r2->is_con()) {
uint shift = r2->get_con();
shift &= (2*BitsPerJavaInteger)-1; // semantics of Java shifts
if (shift == 0) return t1;
jlong lo = (jlong)r1->_lo >> (jlong)shift;
jlong hi = (jlong)r1->_hi >> (jlong)shift;
assert(lo <= hi, "must have valid bounds");
const TypeLong* tl = TypeLong::make(lo, hi, MAX2(r1->_widen,r2->_widen));
#ifdef ASSERT
if (shift == (2*BitsPerJavaInteger)-1) {
if (r1->_lo >= 0) assert(tl == TypeLong::ZERO, ">>63 of + is 0");
if (r1->_hi < 0) assert(tl == TypeLong::MINUS_1, ">>63 of - is -1");
}
#endif
return tl;
}
return TypeLong::LONG; // Give up
}
Node *URShiftINode::Identity( PhaseTransform *phase ) {
const TypeInt *ti = phase->type( in(2) )->isa_int();
if ( ti && ti->is_con() && ( ti->get_con() & ( BitsPerInt - 1 ) ) == 0 ) return in(1);
Node *add = in(1);
if( add->Opcode() == Op_AddI ) {
const TypeInt *t2 = phase->type(add->in(2))->isa_int();
if( t2 && t2->is_con(wordSize - 1) &&
add->in(1)->Opcode() == Op_LShiftI ) {
Node *lshift_count = add->in(1)->in(2);
const TypeInt *t_lshift_count = phase->type(lshift_count)->isa_int();
if( t_lshift_count && t_lshift_count->is_con(LogBytesPerWord) &&
t_lshift_count == phase->type(in(2)) ) {
Node *x = add->in(1)->in(1);
const TypeInt *t_x = phase->type(x)->isa_int();
if( t_x != NULL && 0 <= t_x->_lo && t_x->_hi <= (max_jint>>LogBytesPerWord) ) {
return x;
}
}
}
}
return (phase->type(in(2))->higher_equal(TypeInt::ZERO)) ? in(1) : this;
}
Node *URShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
const TypeInt *t2 = phase->type( in(2) )->isa_int();
if( !t2 || !t2->is_con() ) return NULL; // Right input is a constant
const int con = t2->get_con() & 31; // Shift count is always masked
if ( con == 0 ) return NULL; // let Identity() handle a 0 shift count
const int mask = right_n_bits(BitsPerJavaInteger - con);
int in1_op = in(1)->Opcode();
if( in1_op == Op_URShiftI ) {
const TypeInt *t12 = phase->type( in(1)->in(2) )->isa_int();
if( t12 && t12->is_con() ) { // Right input is a constant
assert( in(1) != in(1)->in(1), "dead loop in URShiftINode::Ideal" );
const int con2 = t12->get_con() & 31; // Shift count is always masked
const int con3 = con+con2;
if( con3 < 32 ) // Only merge shifts if total is < 32
return new (phase->C) URShiftINode( in(1)->in(1), phase->intcon(con3) );
}
}
Node *add = in(1);
if( in1_op == Op_AddI ) {
Node *lshl = add->in(1);
if( lshl->Opcode() == Op_LShiftI &&
phase->type(lshl->in(2)) == t2 ) {
Node *y_z = phase->transform( new (phase->C) URShiftINode(add->in(2),in(2)) );
Node *sum = phase->transform( new (phase->C) AddINode( lshl->in(1), y_z ) );
return new (phase->C) AndINode( sum, phase->intcon(mask) );
}
}
Node *andi = in(1);
if( in1_op == Op_AndI ) {
const TypeInt *t3 = phase->type( andi->in(2) )->isa_int();
if( t3 && t3->is_con() ) { // Right input is a constant
jint mask2 = t3->get_con();
mask2 >>= con; // *signed* shift downward (high-order zeroes do not help)
Node *newshr = phase->transform( new (phase->C) URShiftINode(andi->in(1), in(2)) );
return new (phase->C) AndINode(newshr, phase->intcon(mask2));
}
}
Node *shl = in(1);
if( in1_op == Op_LShiftI &&
phase->type(shl->in(2)) == t2 )
return new (phase->C) AndINode( shl->in(1), phase->intcon(mask) );
return NULL;
}
const Type *URShiftINode::Value( PhaseTransform *phase ) const {
const Type *t1 = phase->type( in(1) );
const Type *t2 = phase->type( in(2) );
if( t1 == Type::TOP ) return Type::TOP;
if( t2 == Type::TOP ) return Type::TOP;
if( t1 == TypeInt::ZERO ) return TypeInt::ZERO;
if( t2 == TypeInt::ZERO ) return t1;
if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
return TypeInt::INT;
if (t2 == TypeInt::INT)
return TypeInt::INT;
const TypeInt *r1 = t1->is_int(); // Handy access
const TypeInt *r2 = t2->is_int(); // Handy access
if (r2->is_con()) {
uint shift = r2->get_con();
shift &= BitsPerJavaInteger-1; // semantics of Java shifts
if (shift == 0) return t1;
jint lo = (juint)r1->_lo >> (juint)shift;
jint hi = (juint)r1->_hi >> (juint)shift;
if (r1->_hi >= 0 && r1->_lo < 0) {
jint neg_lo = lo;
jint neg_hi = (juint)-1 >> (juint)shift;
jint pos_lo = (juint) 0 >> (juint)shift;
jint pos_hi = hi;
lo = MIN2(neg_lo, pos_lo); // == 0
hi = MAX2(neg_hi, pos_hi); // == -1 >>> shift;
}
assert(lo <= hi, "must have valid bounds");
const TypeInt* ti = TypeInt::make(lo, hi, MAX2(r1->_widen,r2->_widen));
#ifdef ASSERT
if (shift == BitsPerJavaInteger-1) {
if (r1->_lo >= 0) assert(ti == TypeInt::ZERO, ">>>31 of + is 0");
if (r1->_hi < 0) assert(ti == TypeInt::ONE, ">>>31 of - is +1");
}
#endif
return ti;
}
return TypeInt::INT;
}
Node *URShiftLNode::Identity( PhaseTransform *phase ) {
const TypeInt *ti = phase->type( in(2) )->isa_int(); // shift count is an int
return ( ti && ti->is_con() && ( ti->get_con() & ( BitsPerLong - 1 ) ) == 0 ) ? in(1) : this;
}
Node *URShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
const TypeInt *t2 = phase->type( in(2) )->isa_int();
if( !t2 || !t2->is_con() ) return NULL; // Right input is a constant
const int con = t2->get_con() & ( BitsPerLong - 1 ); // Shift count is always masked
if ( con == 0 ) return NULL; // let Identity() handle a 0 shift count
const jlong mask = jlong(max_julong >> con);
Node *add = in(1);
if( add->Opcode() == Op_AddL ) {
Node *lshl = add->in(1);
if( lshl->Opcode() == Op_LShiftL &&
phase->type(lshl->in(2)) == t2 ) {
Node *y_z = phase->transform( new (phase->C) URShiftLNode(add->in(2),in(2)) );
Node *sum = phase->transform( new (phase->C) AddLNode( lshl->in(1), y_z ) );
return new (phase->C) AndLNode( sum, phase->longcon(mask) );
}
}
Node *andi = in(1);
if( andi->Opcode() == Op_AndL ) {
const TypeLong *t3 = phase->type( andi->in(2) )->isa_long();
if( t3 && t3->is_con() ) { // Right input is a constant
jlong mask2 = t3->get_con();
mask2 >>= con; // *signed* shift downward (high-order zeroes do not help)
Node *newshr = phase->transform( new (phase->C) URShiftLNode(andi->in(1), in(2)) );
return new (phase->C) AndLNode(newshr, phase->longcon(mask2));
}
}
Node *shl = in(1);
if( shl->Opcode() == Op_LShiftL &&
phase->type(shl->in(2)) == t2 )
return new (phase->C) AndLNode( shl->in(1), phase->longcon(mask) );
return NULL;
}
const Type *URShiftLNode::Value( PhaseTransform *phase ) const {
const Type *t1 = phase->type( in(1) );
const Type *t2 = phase->type( in(2) );
if( t1 == Type::TOP ) return Type::TOP;
if( t2 == Type::TOP ) return Type::TOP;
if( t1 == TypeLong::ZERO ) return TypeLong::ZERO;
if( t2 == TypeInt::ZERO ) return t1;
if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
return TypeLong::LONG;
if (t2 == TypeInt::INT)
return TypeLong::LONG;
const TypeLong *r1 = t1->is_long(); // Handy access
const TypeInt *r2 = t2->is_int (); // Handy access
if (r2->is_con()) {
uint shift = r2->get_con();
shift &= BitsPerJavaLong - 1; // semantics of Java shifts
if (shift == 0) return t1;
jlong lo = (julong)r1->_lo >> (juint)shift;
jlong hi = (julong)r1->_hi >> (juint)shift;
if (r1->_hi >= 0 && r1->_lo < 0) {
jlong neg_lo = lo;
jlong neg_hi = (julong)-1 >> (juint)shift;
jlong pos_lo = (julong) 0 >> (juint)shift;
jlong pos_hi = hi;
lo = neg_lo < pos_lo ? neg_lo : pos_lo;
hi = neg_hi > pos_hi ? neg_hi : pos_hi;
}
assert(lo <= hi, "must have valid bounds");
const TypeLong* tl = TypeLong::make(lo, hi, MAX2(r1->_widen,r2->_widen));
#ifdef ASSERT
if (shift == BitsPerJavaLong - 1) {
if (r1->_lo >= 0) assert(tl == TypeLong::ZERO, ">>>63 of + is 0");
if (r1->_hi < 0) assert(tl == TypeLong::ONE, ">>>63 of - is +1");
}
#endif
return tl;
}
return TypeLong::LONG; // Give up
}
C:\hotspot-69087d08d473\src\share\vm/opto/mulnode.hpp
#ifndef SHARE_VM_OPTO_MULNODE_HPP
#define SHARE_VM_OPTO_MULNODE_HPP
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
#include "opto/type.hpp"
class PhaseTransform;
class MulNode : public Node {
virtual uint hash() const;
public:
MulNode( Node *in1, Node *in2 ): Node(0,in1,in2) {
init_class_id(Class_Mul);
}
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value( PhaseTransform *phase ) const;
virtual const Type *mul_ring( const Type *, const Type * ) const = 0;
virtual const Type *mul_id() const = 0;
virtual const Type *add_id() const = 0;
virtual int add_opcode() const = 0;
virtual int mul_opcode() const = 0;
};
class MulINode : public MulNode {
public:
MulINode( Node *in1, Node *in2 ) : MulNode(in1,in2) {}
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *mul_ring( const Type *, const Type * ) const;
const Type *mul_id() const { return TypeInt::ONE; }
const Type *add_id() const { return TypeInt::ZERO; }
int add_opcode() const { return Op_AddI; }
int mul_opcode() const { return Op_MulI; }
const Type *bottom_type() const { return TypeInt::INT; }
virtual uint ideal_reg() const { return Op_RegI; }
};
class MulLNode : public MulNode {
public:
MulLNode( Node *in1, Node *in2 ) : MulNode(in1,in2) {}
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *mul_ring( const Type *, const Type * ) const;
const Type *mul_id() const { return TypeLong::ONE; }
const Type *add_id() const { return TypeLong::ZERO; }
int add_opcode() const { return Op_AddL; }
int mul_opcode() const { return Op_MulL; }
const Type *bottom_type() const { return TypeLong::LONG; }
virtual uint ideal_reg() const { return Op_RegL; }
};
class MulFNode : public MulNode {
public:
MulFNode( Node *in1, Node *in2 ) : MulNode(in1,in2) {}
virtual int Opcode() const;
virtual const Type *mul_ring( const Type *, const Type * ) const;
const Type *mul_id() const { return TypeF::ONE; }
const Type *add_id() const { return TypeF::ZERO; }
int add_opcode() const { return Op_AddF; }
int mul_opcode() const { return Op_MulF; }
const Type *bottom_type() const { return Type::FLOAT; }
virtual uint ideal_reg() const { return Op_RegF; }
};
class MulDNode : public MulNode {
public:
MulDNode( Node *in1, Node *in2 ) : MulNode(in1,in2) {}
virtual int Opcode() const;
virtual const Type *mul_ring( const Type *, const Type * ) const;
const Type *mul_id() const { return TypeD::ONE; }
const Type *add_id() const { return TypeD::ZERO; }
int add_opcode() const { return Op_AddD; }
int mul_opcode() const { return Op_MulD; }
const Type *bottom_type() const { return Type::DOUBLE; }
virtual uint ideal_reg() const { return Op_RegD; }
};
class MulHiLNode : public Node {
public:
MulHiLNode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
const Type *bottom_type() const { return TypeLong::LONG; }
virtual uint ideal_reg() const { return Op_RegL; }
};
class AndINode : public MulINode {
public:
AndINode( Node *in1, Node *in2 ) : MulINode(in1,in2) {}
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *mul_ring( const Type *, const Type * ) const;
const Type *mul_id() const { return TypeInt::MINUS_1; }
const Type *add_id() const { return TypeInt::ZERO; }
int add_opcode() const { return Op_OrI; }
int mul_opcode() const { return Op_AndI; }
virtual uint ideal_reg() const { return Op_RegI; }
};
class AndLNode : public MulLNode {
public:
AndLNode( Node *in1, Node *in2 ) : MulLNode(in1,in2) {}
virtual int Opcode() const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *mul_ring( const Type *, const Type * ) const;
const Type *mul_id() const { return TypeLong::MINUS_1; }
const Type *add_id() const { return TypeLong::ZERO; }
int add_opcode() const { return Op_OrL; }
int mul_opcode() const { return Op_AndL; }
virtual uint ideal_reg() const { return Op_RegL; }
};
class LShiftINode : public Node {
public:
LShiftINode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
virtual int Opcode() const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value( PhaseTransform *phase ) const;
const Type *bottom_type() const { return TypeInt::INT; }
virtual uint ideal_reg() const { return Op_RegI; }
};
class LShiftLNode : public Node {
public:
LShiftLNode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
virtual int Opcode() const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value( PhaseTransform *phase ) const;
const Type *bottom_type() const { return TypeLong::LONG; }
virtual uint ideal_reg() const { return Op_RegL; }
};
class RShiftINode : public Node {
public:
RShiftINode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
virtual int Opcode() const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value( PhaseTransform *phase ) const;
const Type *bottom_type() const { return TypeInt::INT; }
virtual uint ideal_reg() const { return Op_RegI; }
};
class RShiftLNode : public Node {
public:
RShiftLNode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
virtual int Opcode() const;
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
const Type *bottom_type() const { return TypeLong::LONG; }
virtual uint ideal_reg() const { return Op_RegL; }
};
class URShiftINode : public Node {
public:
URShiftINode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
virtual int Opcode() const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value( PhaseTransform *phase ) const;
const Type *bottom_type() const { return TypeInt::INT; }
virtual uint ideal_reg() const { return Op_RegI; }
};
class URShiftLNode : public Node {
public:
URShiftLNode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
virtual int Opcode() const;
virtual Node *Identity( PhaseTransform *phase );
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type *Value( PhaseTransform *phase ) const;
const Type *bottom_type() const { return TypeLong::LONG; }
virtual uint ideal_reg() const { return Op_RegL; }
};
#endif // SHARE_VM_OPTO_MULNODE_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/multnode.cpp
#include "precompiled.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/matcher.hpp"
#include "opto/mathexactnode.hpp"
#include "opto/multnode.hpp"
#include "opto/opcodes.hpp"
#include "opto/phaseX.hpp"
#include "opto/regmask.hpp"
#include "opto/type.hpp"
const RegMask &MultiNode::out_RegMask() const {
return RegMask::Empty;
}
Node *MultiNode::match( const ProjNode *proj, const Matcher *m ) { return proj->clone(); }
ProjNode* MultiNode::proj_out(uint which_proj) const {
assert(Opcode() != Op_If || which_proj == (uint)true || which_proj == (uint)false, "must be 1 or 0");
assert(Opcode() != Op_If || outcnt() == 2, "bad if #1");
for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
Node *p = fast_out(i);
if (p->is_Proj()) {
ProjNode *proj = p->as_Proj();
if (proj->_con == which_proj) {
assert(Opcode() != Op_If || proj->Opcode() == (which_proj?Op_IfTrue:Op_IfFalse), "bad if #2");
return proj;
}
} else {
assert(p == this && this->is_Start(), "else must be proj");
continue;
}
}
return NULL;
}
uint ProjNode::hash() const {
return (uintptr_t)in(TypeFunc::Control) + (_con << 1) + (_is_io_use ? 1 : 0);
}
uint ProjNode::cmp( const Node &n ) const { return _con == ((ProjNode&)n)._con && ((ProjNode&)n)._is_io_use == _is_io_use; }
uint ProjNode::size_of() const { return sizeof(ProjNode); }
bool ProjNode::is_CFG() const {
Node *def = in(0);
return (_con == TypeFunc::Control && def->is_CFG());
}
const Type* ProjNode::proj_type(const Type* t) const {
if (t == Type::TOP) {
return Type::TOP;
}
if (t == Type::BOTTOM) {
return Type::BOTTOM;
}
t = t->is_tuple()->field_at(_con);
Node* n = in(0);
if ((_con == TypeFunc::Parms) &&
n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method()) {
t = t->join_speculative(TypePtr::NOTNULL);
}
return t;
}
const Type *ProjNode::bottom_type() const {
if (in(0) == NULL) return Type::TOP;
return proj_type(in(0)->bottom_type());
}
const TypePtr *ProjNode::adr_type() const {
if (bottom_type() == Type::MEMORY) {
const TypePtr* adr_type = in(0)->adr_type();
#ifdef ASSERT
if (!is_error_reported() && !Node::in_dump())
assert(adr_type != NULL, "source must have adr_type");
#endif
return adr_type;
}
assert(bottom_type()->base() != Type::Memory, "no other memories?");
return NULL;
}
bool ProjNode::pinned() const { return in(0)->pinned(); }
#ifndef PRODUCT
void ProjNode::dump_spec(outputStream *st) const { st->print("#%d",_con); if(_is_io_use) st->print(" (i_o_use)");}
#endif
void ProjNode::check_con() const {
Node* n = in(0);
if (n == NULL) return; // should be assert, but NodeHash makes bogons
if (n->is_Mach()) return; // mach. projs. are not type-safe
if (n->is_Start()) return; // alas, starts can have mach. projs. also
if (_con == SCMemProjNode::SCMEMPROJCON ) return;
const Type* t = n->bottom_type();
if (t == Type::TOP) return; // multi is dead
assert(_con < t->is_tuple()->cnt(), "ProjNode::_con must be in range");
}
const Type *ProjNode::Value( PhaseTransform *phase ) const {
if (in(0) == NULL) return Type::TOP;
return proj_type(phase->type(in(0)));
}
const RegMask &ProjNode::out_RegMask() const {
return RegMask::Empty;
}
uint ProjNode::ideal_reg() const {
return bottom_type()->ideal_reg();
}
bool ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason reason) {
int path_limit = 10;
Node* out = this;
for (int ct = 0; ct < path_limit; ct++) {
out = out->unique_ctrl_out();
if (out == NULL)
return false;
if (out->is_CallStaticJava()) {
int req = out->as_CallStaticJava()->uncommon_trap_request();
if (req != 0) {
Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
if (trap_reason == reason || reason == Deoptimization::Reason_none) {
return true;
}
}
return false; // don't do further after call
}
if (out->Opcode() != Op_Region)
return false;
}
return false;
}
bool ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason) {
Node *in0 = in(0);
if (!in0->is_If()) return false;
if (in0->outcnt() < 2) return false;
IfNode* iff = in0->as_If();
if (reason != Deoptimization::Reason_none) {
if (iff->in(1)->Opcode() != Op_Conv2B ||
iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
return false;
}
}
ProjNode* other_proj = iff->proj_out(1-_con);
if (other_proj == NULL) // Should never happen, but make Parfait happy.
return false;
if (other_proj->is_uncommon_trap_proj(reason)) {
assert(reason == Deoptimization::Reason_none ||
Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list");
return true;
}
return false;
}
C:\hotspot-69087d08d473\src\share\vm/opto/multnode.hpp
#ifndef SHARE_VM_OPTO_MULTNODE_HPP
#define SHARE_VM_OPTO_MULTNODE_HPP
#include "opto/node.hpp"
class Matcher;
class ProjNode;
class MultiNode : public Node {
public:
MultiNode( uint required ) : Node(required) {
init_class_id(Class_Multi);
}
virtual int Opcode() const;
virtual const Type *bottom_type() const = 0;
virtual bool is_CFG() const { return true; }
virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
virtual bool depends_only_on_test() const { return false; }
virtual const RegMask &out_RegMask() const;
virtual Node *match( const ProjNode *proj, const Matcher *m );
virtual uint ideal_reg() const { return NotAMachineReg; }
ProjNode* proj_out(uint which_proj) const; // Get a named projection
};
class ProjNode : public Node {
protected:
virtual uint hash() const;
virtual uint cmp( const Node &n ) const;
virtual uint size_of() const;
void check_con() const; // Called from constructor.
const Type* proj_type(const Type* t) const;
public:
ProjNode( Node *src, uint con, bool io_use = false )
: Node( src ), _con(con), _is_io_use(io_use)
{
init_class_id(Class_Proj);
if (con != TypeFunc::Memory || src->is_Start())
init_flags(Flag_is_dead_loop_safe);
debug_only(check_con());
}
const uint _con; // The field in the tuple we are projecting
const bool _is_io_use; // Used to distinguish between the projections
virtual int Opcode() const;
virtual bool is_CFG() const;
virtual bool depends_only_on_test() const { return false; }
virtual const Type *bottom_type() const;
virtual const TypePtr *adr_type() const;
virtual bool pinned() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const;
virtual const RegMask &out_RegMask() const;
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
bool is_uncommon_trap_proj(Deoptimization::DeoptReason reason);
bool is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason);
};
#endif // SHARE_VM_OPTO_MULTNODE_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/node.cpp
#include "precompiled.hpp"
#include "libadt/vectset.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
#include "opto/matcher.hpp"
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
#include "opto/regmask.hpp"
#include "opto/type.hpp"
#include "utilities/copy.hpp"
class RegMask;
class PhaseTransform;
class PhaseGVN;
const uint Node::NotAMachineReg = 0xffff0000;
#ifndef PRODUCT
extern int nodes_created;
#endif
#ifdef ASSERT
void Node::verify_construction() {
_debug_orig = NULL;
int old_debug_idx = Compile::debug_idx();
int new_debug_idx = old_debug_idx+1;
if (new_debug_idx > 0) {
const int mod = 100000;
int bump = (int)(_idx - new_debug_idx) % mod;
if (bump < 0) bump += mod;
assert(bump >= 0 && bump < mod, "");
new_debug_idx += bump;
}
Compile::set_debug_idx(new_debug_idx);
set_debug_idx( new_debug_idx );
assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX");
assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit");
if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
BREAKPOINT;
}
#if OPTO_DU_ITERATOR_ASSERT
_last_del = NULL;
_del_tick = 0;
#endif
_hash_lock = 0;
}
#if OPTO_DU_ITERATOR_ASSERT
void DUIterator_Common::sample(const Node* node) {
_vdui = VerifyDUIterators;
_node = node;
_outcnt = node->_outcnt;
_del_tick = node->_del_tick;
_last = NULL;
}
void DUIterator_Common::verify(const Node* node, bool at_end_ok) {
assert(_node == node, "consistent iterator source");
assert(_del_tick == node->_del_tick, "no unexpected deletions allowed");
}
void DUIterator_Common::verify_resync() {
const Node* node = _node;
assert(node->_del_tick >= _del_tick+1, "must have deleted an edge");
assert(node->_last_del == _last, "must have deleted the edge just produced");
_outcnt = node->_outcnt;
_del_tick = node->_del_tick;
}
void DUIterator_Common::reset(const DUIterator_Common& that) {
if (this == &that) return; // ignore assignment to self
if (!_vdui) {
_last = that._last;
_vdui = that._vdui;
}
const Node* node = that._node;
_node = node;
_outcnt = node->_outcnt;
_del_tick = node->_del_tick;
}
void DUIterator::sample(const Node* node) {
DUIterator_Common::sample(node); // Initialize the assertion data.
_refresh_tick = 0; // No refreshes have happened, as yet.
}
void DUIterator::verify(const Node* node, bool at_end_ok) {
DUIterator_Common::verify(node, at_end_ok);
assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range");
}
void DUIterator::verify_increment() {
if (_refresh_tick & 1) {
if (_idx > _outcnt) _idx = _outcnt;
}
verify(_node, true);
}
void DUIterator::verify_resync() {
DUIterator_Common::verify_resync();
verify(_node, true);
}
void DUIterator::reset(const DUIterator& that) {
if (this == &that) return; // self assignment is always a no-op
assert(that._refresh_tick == 0, "assign only the result of Node::outs()");
assert(that._idx == 0, "assign only the result of Node::outs()");
assert(_idx == that._idx, "already assigned _idx");
if (!_vdui) {
sample(that._node);
} else {
DUIterator_Common::reset(that);
if (_refresh_tick & 1) {
_refresh_tick++; // Clear the "was refreshed" flag.
}
assert(_refresh_tick < 2*100000, "DU iteration must converge quickly");
}
}
void DUIterator::refresh() {
DUIterator_Common::sample(_node); // Re-fetch assertion data.
_refresh_tick |= 1; // Set the "was refreshed" flag.
}
void DUIterator::verify_finish() {
if (_node->_outcnt == 0) _refresh_tick &= ~1;
assert(!(_refresh_tick & 1), "the loop must run once with no refreshing");
}
void DUIterator_Fast::verify(const Node* node, bool at_end_ok) {
DUIterator_Common::verify(node, at_end_ok);
Node** out = node->_out;
uint cnt = node->_outcnt;
assert(cnt == _outcnt, "no insertions allowed");
assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range");
}
void DUIterator_Fast::verify_limit() {
const Node* node = _node;
verify(node, true);
assert(_outp == node->_out + node->_outcnt, "limit still correct");
}
void DUIterator_Fast::verify_resync() {
const Node* node = _node;
if (_outp == node->_out + _outcnt) {
assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)");
_last = (Node*) node->_last_del;
DUIterator_Common::verify_resync();
} else {
assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)");
DUIterator_Common::verify_resync();
verify(node, true);
}
}
void DUIterator_Fast::verify_relimit(uint n) {
const Node* node = _node;
assert((int)n > 0, "use imax -= n only with a positive count");
assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)");
assert(node->_del_tick == _del_tick + n, "must have deleted n edges");
_last = (Node*) node->_last_del;
DUIterator_Common::verify_resync();
}
void DUIterator_Fast::reset(const DUIterator_Fast& that) {
assert(_outp == that._outp, "already assigned _outp");
DUIterator_Common::reset(that);
}
void DUIterator_Last::verify(const Node* node, bool at_end_ok) {
_outp += at_end_ok;
DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc.
_outp -= at_end_ok;
assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes");
}
void DUIterator_Last::verify_limit() {
assert(_outp == _node->_out, "limit still correct");
}
void DUIterator_Last::verify_step(uint num_edges) {
assert((int)num_edges > 0, "need non-zero edge count for loop progress");
_outcnt -= num_edges;
_del_tick += num_edges;
const Node* node = _node;
verify(node, true);
assert(node->_last_del == _last, "must have deleted the edge just produced");
}
#endif //OPTO_DU_ITERATOR_ASSERT
#endif //ASSERT
#define NO_OUT_ARRAY ((Node**)-1)
#define IDX_INIT(req) this->Init((req), (Compile*) this->_out)
#ifdef _MSC_VER // the IDX_INIT hack falls foul of warning C4355
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif
#ifdef __clang__
#pragma clang diagnostic push
#pragma GCC diagnostic ignored "-Wuninitialized"
#endif
static void init_node_notes(Compile* C, int idx, Node_Notes* nn) {
C->set_node_notes_at(idx, nn);
}
inline int Node::Init(int req, Compile* C) {
assert(Compile::current() == C, "must use operator new(Compile*)");
int idx = C->next_unique();
if (req > 0) {
_in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*))));
#ifdef ASSERT
_in[req-1] = this; // magic cookie for assertion check
#endif
}
Node_Notes* nn = C->default_node_notes();
if (nn != NULL) init_node_notes(C, idx, nn);
_cnt = _max = req;
_outcnt = _outmax = 0;
_class_id = Class_Node;
_flags = 0;
_out = NO_OUT_ARRAY;
return idx;
}
Node::Node(uint req)
: _idx(IDX_INIT(req))
#ifdef ASSERT
, _parse_idx(_idx)
#endif
{
assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" );
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
if (req == 0) {
assert( _in == (Node**)this, "Must not pass arg count to 'new'" );
_in = NULL;
} else {
assert( _in[req-1] == this, "Must pass arg count to 'new'" );
Node** to = _in;
for(uint i = 0; i < req; i++) {
to[i] = NULL;
}
}
}
Node::Node(Node *n0)
: _idx(IDX_INIT(1))
#ifdef ASSERT
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( _in[0] == this, "Must pass arg count to 'new'" );
assert( is_not_dead(n0), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
}
Node::Node(Node *n0, Node *n1)
: _idx(IDX_INIT(2))
#ifdef ASSERT
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( _in[1] == this, "Must pass arg count to 'new'" );
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
}
Node::Node(Node *n0, Node *n1, Node *n2)
: _idx(IDX_INIT(3))
#ifdef ASSERT
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( _in[2] == this, "Must pass arg count to 'new'" );
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
assert( is_not_dead(n2), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
}
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
: _idx(IDX_INIT(4))
#ifdef ASSERT
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( _in[3] == this, "Must pass arg count to 'new'" );
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
assert( is_not_dead(n2), "can not use dead node");
assert( is_not_dead(n3), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
}
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
: _idx(IDX_INIT(5))
#ifdef ASSERT
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( _in[4] == this, "Must pass arg count to 'new'" );
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
assert( is_not_dead(n2), "can not use dead node");
assert( is_not_dead(n3), "can not use dead node");
assert( is_not_dead(n4), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
_in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
}
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
Node *n4, Node *n5)
: _idx(IDX_INIT(6))
#ifdef ASSERT
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( _in[5] == this, "Must pass arg count to 'new'" );
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
assert( is_not_dead(n2), "can not use dead node");
assert( is_not_dead(n3), "can not use dead node");
assert( is_not_dead(n4), "can not use dead node");
assert( is_not_dead(n5), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
_in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
_in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
}
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
Node *n4, Node *n5, Node *n6)
: _idx(IDX_INIT(7))
#ifdef ASSERT
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( _in[6] == this, "Must pass arg count to 'new'" );
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
assert( is_not_dead(n2), "can not use dead node");
assert( is_not_dead(n3), "can not use dead node");
assert( is_not_dead(n4), "can not use dead node");
assert( is_not_dead(n5), "can not use dead node");
assert( is_not_dead(n6), "can not use dead node");
_in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
_in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
_in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
_in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
_in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
_in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
_in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
Node *Node::clone() const {
Compile* C = Compile::current();
uint s = size_of(); // Size of inherited Node
Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*));
Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s);
n->_in = (Node**)(((char*)n)+s);
n->_out = NO_OUT_ARRAY;
n->_outcnt = 0;
n->_outmax = 0;
debug_only(n->_hash_lock = 0);
uint i;
for( i = 0; i < len(); i++ ) {
Node *x = in(i);
n->_in[i] = x;
if (x != NULL) x->add_out(n);
}
if (is_macro())
C->add_macro_node(n);
if (is_expensive())
C->add_expensive_node(n);
CastIINode* cast = n->isa_CastII();
if (cast != NULL && cast->has_range_check()) {
C->add_range_check_cast(cast);
}
n->set_idx(C->next_unique()); // Get new unique index as well
debug_only( n->verify_construction() );
NOT_PRODUCT(nodes_created++);
C->copy_node_notes_to(n, (Node*) this);
uint nopnds;
if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) {
MachNode *mach = n->as_Mach();
MachNode *mthis = this->as_Mach();
MachOper **from = mthis->_opnds;
MachOper **to = (MachOper **)((size_t)(&mach->_opnds) +
pointer_delta((const void*)from,
(const void*)(&mthis->_opnds), 1));
mach->_opnds = to;
for ( uint i = 0; i < nopnds; ++i ) {
to[i] = from[i]->clone(C);
}
}
if (n->is_Call()) {
n->as_Call()->clone_jvms(C);
}
if (n->is_SafePoint()) {
n->as_SafePoint()->clone_replaced_nodes();
}
return n; // Return the clone
}
void Node::setup_is_top() {
if (this == (Node*)Compile::current()->top()) {
_outcnt = _outmax = 0;
_out = NULL; // marker value for top
assert(is_top(), "must be top");
} else {
if (_out == NULL) _out = NO_OUT_ARRAY;
assert(!is_top(), "must not be top");
}
}
extern int reclaim_idx ;
extern int reclaim_in ;
extern int reclaim_node;
void Node::destruct() {
Compile* compile = Compile::current();
if ((uint)_idx+1 == compile->unique()) {
compile->set_unique(compile->unique()-1);
#ifdef ASSERT
reclaim_idx++;
#endif
}
Node_Notes* nn = compile->node_notes_at(_idx);
if (nn != NULL) nn->clear();
_cnt = _max; // forget req/prec distinction
uint i;
for( i = 0; i < _max; i++ ) {
set_req(i, NULL);
}
assert(outcnt() == 0, "deleting a node must not leave a dangling use");
int edge_size = _max*sizeof(void*);
int out_edge_size = _outmax*sizeof(void*);
char *edge_end = ((char*)_in) + edge_size;
char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out);
char *out_edge_end = out_array + out_edge_size;
int node_size = size_of();
if (out_edge_size > 0) {
#ifdef ASSERT
if( out_edge_end == compile->node_arena()->hwm() )
reclaim_in += out_edge_size; // count reclaimed out edges with in edges
#endif
compile->node_arena()->Afree(out_array, out_edge_size);
}
if( edge_end == (char*)this ) {
#ifdef ASSERT
if( edge_end+node_size == compile->node_arena()->hwm() ) {
reclaim_in += edge_size;
reclaim_node+= node_size;
}
#else
compile->node_arena()->Afree(_in,edge_size+node_size);
#endif
} else {
#ifdef ASSERT
if( edge_end == compile->node_arena()->hwm() )
reclaim_in += edge_size;
#endif
compile->node_arena()->Afree(_in,edge_size);
#ifdef ASSERT
if( ((char*)this) + node_size == compile->node_arena()->hwm() )
reclaim_node+= node_size;
#else
compile->node_arena()->Afree(this,node_size);
#endif
}
if (is_macro()) {
compile->remove_macro_node(this);
}
if (is_expensive()) {
compile->remove_expensive_node(this);
}
CastIINode* cast = isa_CastII();
if (cast != NULL && cast->has_range_check()) {
compile->remove_range_check_cast(cast);
}
if (is_SafePoint()) {
as_SafePoint()->delete_replaced_nodes();
}
#ifdef ASSERT
_in = _out = (Node**) badAddress;
_max = _cnt = _outmax = _outcnt = 0;
#endif
}
void Node::grow( uint len ) {
Arena* arena = Compile::current()->node_arena();
uint new_max = _max;
if( new_max == 0 ) {
_max = 4;
_in = (Node**)arena->Amalloc(4*sizeof(Node*));
Node** to = _in;
to[0] = NULL;
to[1] = NULL;
to[2] = NULL;
to[3] = NULL;
return;
}
while( new_max <= len ) new_max <<= 1; // Find next power-of-2
_in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*));
Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space
_max = new_max; // Record new max length
assert(_max == new_max && _max > len, "int width of _max is too small");
}
void Node::out_grow( uint len ) {
assert(!is_top(), "cannot grow a top node's out array");
Arena* arena = Compile::current()->node_arena();
uint new_max = _outmax;
if( new_max == 0 ) {
_outmax = 4;
_out = (Node **)arena->Amalloc(4*sizeof(Node*));
return;
}
while( new_max <= len ) new_max <<= 1; // Find next power-of-2
assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value");
_out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*));
_outmax = new_max; // Record new max length
assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small");
}
#ifdef ASSERT
bool Node::is_dead() const {
if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) )
return false;
for( uint i = 0; i < _max; i++ )
if( _in[i] != NULL )
return false;
dump();
return true;
}
#endif
bool Node::is_unreachable(PhaseIterGVN &igvn) const {
assert(!is_Mach(), "doesn't work with MachNodes");
return outcnt() == 0 || igvn.type(this) == Type::TOP || in(0)->is_top();
}
void Node::add_req( Node *n ) {
assert( is_not_dead(n), "can not use dead node");
if( (_cnt >= _max) || (in(_max-1) != NULL) )
grow( _max+1 );
if( in(_cnt) != NULL ) { // Next precedence edge is busy?
uint i;
for( i=_cnt; i<_max; i++ )
if( in(i) == NULL ) // Find the NULL at end of prec edge list
break; // There must be one, since we grew the array
_in[i] = in(_cnt); // Move prec over, making space for req edge
}
_in[_cnt++] = n; // Stuff over old prec edge
if (n != NULL) n->add_out((Node *)this);
}
void Node::add_req_batch( Node *n, uint m ) {
assert( is_not_dead(n), "can not use dead node");
if ((int)m <= 1) {
assert((int)m >= 0, "oob");
if (m != 0) add_req(n);
return;
}
if( (_cnt+m) > _max || _in[_max-m] )
grow( _max+m );
if( _in[_cnt] != NULL ) { // Next precedence edge is busy?
uint i;
for( i=_cnt; i<_max; i++ )
if( _in[i] == NULL ) // Find the NULL at end of prec edge list
break; // There must be one, since we grew the array
Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*)));
}
for(uint i=0; i<m; i++ ) {
_in[_cnt++] = n;
}
if (n != NULL && !n->is_top()) {
for(uint i=0; i<m; i++ ) {
n->add_out((Node *)this);
}
}
}
void Node::del_req( uint idx ) {
assert( idx < _cnt, "oob");
assert( !VerifyHashTableKeys || _hash_lock == 0,
"remove node from hash table before modifying it");
Node *n = in(idx);
if (n != NULL) n->del_out((Node *)this);
_in[idx] = in(--_cnt); // Compact the array
close_prec_gap_at(_cnt);
}
void Node::del_req_ordered( uint idx ) {
assert( idx < _cnt, "oob");
assert( !VerifyHashTableKeys || _hash_lock == 0,
"remove node from hash table before modifying it");
Node *n = in(idx);
if (n != NULL) n->del_out((Node *)this);
if (idx < --_cnt) { // Not last edge ?
Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx)*sizeof(Node*)));
}
close_prec_gap_at(_cnt);
}
void Node::ins_req( uint idx, Node *n ) {
assert( is_not_dead(n), "can not use dead node");
add_req(NULL); // Make space
assert( idx < _max, "Must have allocated enough space");
if(_cnt-idx-1 > 0) {
Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*)));
}
_in[idx] = n; // Stuff over old required edge
if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge
}
int Node::find_edge(Node* n) {
for (uint i = 0; i < len(); i++) {
if (_in[i] == n) return i;
}
return -1;
}
int Node::replace_edge(Node* old, Node* neww) {
if (old == neww) return 0; // nothing to do
uint nrep = 0;
for (uint i = 0; i < len(); i++) {
if (in(i) == old) {
if (i < req()) {
set_req(i, neww);
} else {
assert(find_prec_edge(neww) == -1, err_msg("spec violation: duplicated prec edge (node %d -> %d)", _idx, neww->_idx));
set_prec(i, neww);
}
nrep++;
}
}
return nrep;
}
int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) {
if (old == neww) return 0; // nothing to do
uint nrep = 0;
for (int i = start; i < end; i++) {
if (in(i) == old) {
set_req(i, neww);
nrep++;
}
}
return nrep;
}
int Node::disconnect_inputs(Node *n, Compile* C) {
int edges_to_n = 0;
uint cnt = req();
for( uint i = 0; i < cnt; ++i ) {
if( in(i) == 0 ) continue;
if( in(i) == n ) ++edges_to_n;
set_req(i, NULL);
}
if( (req() != len()) && (in(req()) != NULL) ) {
uint max = len();
for( uint i = 0; i < max; ++i ) {
if( in(i) == 0 ) continue;
if( in(i) == n ) ++edges_to_n;
set_prec(i, NULL);
}
}
if (edges_to_n == 0) {
C->record_dead_node(_idx);
}
return edges_to_n;
}
Node* Node::uncast() const {
if (is_ConstraintCast() || is_CheckCastPP())
return uncast_helper(this);
else
return (Node*) this;
}
Node* Node::find_out_with(int opcode) {
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
Node* use = fast_out(i);
if (use->Opcode() == opcode) {
return use;
}
}
return NULL;
}
Node* Node::uncast_helper(const Node* p) {
#ifdef ASSERT
uint depth_count = 0;
const Node* orig_p = p;
#endif
while (true) {
#ifdef ASSERT
if (depth_count >= K) {
orig_p->dump(4);
if (p != orig_p)
p->dump(1);
}
assert(depth_count++ < K, "infinite loop in Node::uncast_helper");
#endif
if (p == NULL || p->req() != 2) {
break;
} else if (p->is_ConstraintCast()) {
p = p->in(1);
} else if (p->is_CheckCastPP()) {
p = p->in(1);
} else {
break;
}
}
return (Node*) p;
}
void Node::add_prec( Node *n ) {
assert( is_not_dead(n), "can not use dead node");
if( _cnt >= _max || in(_max-1) )
grow( _max+1 );
uint i = _cnt;
while( in(i) != NULL ) {
if (in(i) == n) return; // Avoid spec violation: duplicated prec edge.
i++;
}
_in[i] = n; // Stuff prec edge over NULL
if ( n != NULL) n->add_out((Node *)this); // Add mirror edge
#ifdef ASSERT
while ((++i)<_max) { assert(_in[i] == NULL, err_msg("spec violation: Gap in prec edges (node %d)", _idx)); }
#endif
}
void Node::rm_prec( uint j ) {
assert(j < _max, err_msg("oob: i=%d, _max=%d", j, _max));
assert(j >= _cnt, "not a precedence edge");
if (_in[j] == NULL) return; // Avoid spec violation: Gap in prec edges.
_in[j]->del_out((Node *)this);
close_prec_gap_at(j);
}
uint Node::size_of() const { return sizeof(*this); }
uint Node::ideal_reg() const { return 0; }
JVMState* Node::jvms() const { return NULL; }
#ifdef ASSERT
bool Node::verify_jvms(const JVMState* using_jvms) const {
for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
if (jvms == using_jvms) return true;
}
return false;
}
void Node::init_NodeProperty() {
assert(_max_classes <= max_jushort, "too many NodeProperty classes");
assert(_max_flags <= max_jushort, "too many NodeProperty flags");
}
#endif
void Node::format( PhaseRegAlloc *, outputStream *st ) const {}
void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
uint Node::size(PhaseRegAlloc *ra_) const { return 0; }
const Node *Node::is_block_proj() const { return 0; }
const Type *Node::bottom_type() const { return Type::BOTTOM; }
void Node::raise_bottom_type(const Type* new_type) {
if (is_Type()) {
TypeNode *n = this->as_Type();
if (VerifyAliases) {
assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
}
n->set_type(new_type);
} else if (is_Load()) {
LoadNode *n = this->as_Load();
if (VerifyAliases) {
assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
}
n->set_type(new_type);
}
}
Node *Node::Identity( PhaseTransform * ) {
return this; // Default to no identities
}
const Type *Node::Value( PhaseTransform * ) const {
return bottom_type(); // Default to worst-case Type
}
Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) {
return NULL; // Default to being Ideal already
}
bool Node::has_special_unique_user() const {
assert(outcnt() == 1, "match only for unique out");
Node* n = unique_out();
int op = Opcode();
if( this->is_Store() ) {
return n->Opcode() == op && n->in(MemNode::Memory) == this;
} else if (this->is_Load() || this->is_DecodeN()) {
return n->Opcode() == Op_MemBarAcquire;
} else if( op == Op_AddL ) {
return n->Opcode() == Op_ConvL2I && n->in(1) == this;
} else if( op == Op_SubI || op == Op_SubL ) {
return n->Opcode() == op && n->in(2) == this;
}
return false;
};
Node* Node::find_exact_control(Node* ctrl) {
if (ctrl == NULL && this->is_Region())
ctrl = this->as_Region()->is_copy();
if (ctrl != NULL && ctrl->is_CatchProj()) {
if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index)
ctrl = ctrl->in(0);
if (ctrl != NULL && !ctrl->is_top())
ctrl = ctrl->in(0);
}
if (ctrl != NULL && ctrl->is_Proj())
ctrl = ctrl->in(0);
return ctrl;
}
bool Node::dominates(Node* sub, Node_List &nlist) {
assert(this->is_CFG(), "expecting control");
assert(sub != NULL && sub->is_CFG(), "expecting control");
int iterations_without_region_limit = DominatorSearchLimit;
Node* orig_sub = sub;
Node* dom = this;
bool met_dom = false;
nlist.clear();
while (sub != NULL) {
if (sub->is_top()) break; // Conservative answer for dead code.
if (sub == dom) {
if (nlist.size() == 0) {
return true;
} else if (met_dom) {
break; // already met before: walk in a cycle
} else {
met_dom = true; // first time meet
iterations_without_region_limit = DominatorSearchLimit; // Reset
}
}
if (sub->is_Start() || sub->is_Root()) {
return met_dom;
}
Node* up = sub->in(0);
up = sub->find_exact_control(up);
if (sub == up && sub->is_Loop()) {
up = sub->in(1); // in(LoopNode::EntryControl);
} else if (sub == up && sub->is_Region() && sub->req() != 3) {
up = sub->in(1);
} else if (sub == up && sub->is_Region()) {
iterations_without_region_limit = DominatorSearchLimit; // Reset
bool region_was_visited_before = false;
for (int j = nlist.size() - 1; j >= 0; j--) {
intptr_t ni = (intptr_t)nlist.at(j);
Node* visited = (Node*)(ni & ~1);
bool visited_twice_already = ((ni & 1) != 0);
if (visited == sub) {
if (visited_twice_already) {
return false;
}
nlist.remove(j);
region_was_visited_before = true;
break;
}
}
assert(up == sub, "");
uint skip = region_was_visited_before ? 1 : 0;
for (uint i = 1; i < sub->req(); i++) {
Node* in = sub->in(i);
if (in != NULL && !in->is_top() && in != sub) {
if (skip == 0) {
up = in;
break;
}
--skip; // skip this nontrivial input
}
}
nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0)));
}
if (up == sub) {
break; // some kind of tight cycle
}
if (up == orig_sub && met_dom) {
break; // some kind of cycle
}
if (--iterations_without_region_limit < 0) {
break; // dead cycle
}
sub = up;
}
return false;
}
static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
if( dead->is_Con() ) return;
Node_List nstack(Thread::current()->resource_area());
Node *top = igvn->C->top();
nstack.push(dead);
bool has_irreducible_loop = igvn->C->has_irreducible_loop();
while (nstack.size() > 0) {
dead = nstack.pop();
if (dead->Opcode() == Op_SafePoint) {
dead->as_SafePoint()->disconnect_from_root(igvn);
}
if (dead->outcnt() > 0) {
nstack.push(dead);
for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) {
Node* use = dead->last_out(k);
igvn->hash_delete(use); // Yank from hash table prior to mod
if (use->in(0) == dead) { // Found another dead node
assert (!use->is_Con(), "Control for Con node should be Root node.");
use->set_req(0, top); // Cut dead edge to prevent processing
nstack.push(use); // the dead node again.
} else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop
use->is_Loop() && !use->is_Root() && // Don't kill Root (RootNode extends LoopNode)
use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead
use->set_req(LoopNode::EntryControl, top); // Cut dead edge to prevent processing
use->set_req(0, top); // Cut self edge
nstack.push(use);
} else { // Else found a not-dead user
bool dead_use = !use->is_Root(); // Keep empty graph alive
for (uint j = 1; j < use->req(); j++) {
Node* in = use->in(j);
if (in == dead) { // Turn all dead inputs into TOP
use->set_req(j, top);
} else if (in != NULL && !in->is_top()) {
dead_use = false;
}
}
if (dead_use) {
if (use->is_Region()) {
use->set_req(0, top); // Cut self edge
}
nstack.push(use);
} else {
igvn->_worklist.push(use);
}
}
k = dead->last_outs(kmin);
}
} else { // (dead->outcnt() == 0)
igvn->hash_delete(dead);
igvn->_worklist.remove(dead);
igvn->set_type(dead, Type::TOP);
if (dead->is_macro()) {
igvn->C->remove_macro_node(dead);
}
if (dead->is_expensive()) {
igvn->C->remove_expensive_node(dead);
}
CastIINode* cast = dead->isa_CastII();
if (cast != NULL && cast->has_range_check()) {
igvn->C->remove_range_check_cast(cast);
}
igvn->C->record_dead_node(dead->_idx);
for (uint i=0; i < dead->req(); i++) {
Node *n = dead->in(i); // Get input to dead guy
if (n != NULL && !n->is_top()) { // Input is valid?
dead->set_req(i, top); // Smash input away
if (n->outcnt() == 0) { // Input also goes dead?
if (!n->is_Con())
nstack.push(n); // Clear it out as well
} else if (n->outcnt() == 1 &&
n->has_special_unique_user()) {
igvn->add_users_to_worklist( n );
} else if (n->outcnt() <= 2 && n->is_Store()) {
igvn->add_users_to_worklist( n );
}
}
}
} // (dead->outcnt() == 0)
} // while (nstack.size() > 0) for outputs
return;
}
bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) {
Node *n = in(0);
if( !n ) return false;
if (can_reshape && n->is_top()) {
kill_dead_code(this, phase->is_IterGVN());
return false; // Node is dead.
}
if( n->is_Region() && n->as_Region()->is_copy() ) {
Node *m = n->nonnull_req();
set_req(0, m);
return true;
}
return false;
}
Node *Node::Ideal_DU_postCCP( PhaseCCP * ) {
return NULL; // Default to no change
}
uint Node::hash() const {
uint sum = 0;
for( uint i=0; i<_cnt; i++ ) // Add in all inputs
sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs
return (sum>>2) + _cnt + Opcode();
}
uint Node::cmp( const Node &n ) const {
return 1; // Must be same
}
bool Node::rematerialize() const {
if ( is_Mach() )
return this->as_Mach()->rematerialize();
else
return (_flags & Flag_rematerialize) != 0;
}
bool Node::needs_anti_dependence_check() const {
if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 )
return false;
else
return in(1)->bottom_type()->has_memory();
}
const TypeInt* Node::find_int_type() const {
if (this->is_Type()) {
return this->as_Type()->type()->isa_int();
} else if (this->is_Con()) {
assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
return this->bottom_type()->isa_int();
}
return NULL;
}
intptr_t Node::get_ptr() const {
assert( Opcode() == Op_ConP, "" );
return ((ConPNode*)this)->type()->is_ptr()->get_con();
}
intptr_t Node::get_narrowcon() const {
assert( Opcode() == Op_ConN, "" );
return ((ConNNode*)this)->type()->is_narrowoop()->get_con();
}
const TypeLong* Node::find_long_type() const {
if (this->is_Type()) {
return this->as_Type()->type()->isa_long();
} else if (this->is_Con()) {
assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
return this->bottom_type()->isa_long();
}
return NULL;
}
const TypePtr* Node::get_ptr_type() const {
const TypePtr* tp = this->bottom_type()->make_ptr();
#ifdef ASSERT
if (tp == NULL) {
this->dump(1);
assert((tp != NULL), "unexpected node type");
}
#endif
return tp;
}
jdouble Node::getd() const {
assert( Opcode() == Op_ConD, "" );
return ((ConDNode*)this)->type()->is_double_constant()->getd();
}
jfloat Node::getf() const {
assert( Opcode() == Op_ConF, "" );
return ((ConFNode*)this)->type()->is_float_constant()->getf();
}
#ifndef PRODUCT
static inline bool NotANode(const Node* n) {
if (n == NULL) return true;
if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
if (*(address*)n == badAddress) return true; // kill by Node::destruct
return false;
}
static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl,
VectorSet* old_space, VectorSet* new_space ) {
int node_idx = (idx >= 0) ? idx : -idx;
if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc.
VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space;
if( v->test(n->_idx) ) return;
if( (int)n->_idx == node_idx
debug_only(|| n->debug_idx() == node_idx) ) {
if (result != NULL)
tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n",
(uintptr_t)result, (uintptr_t)n, node_idx);
result = n;
}
v->set(n->_idx);
for( uint i=0; i<n->len(); i++ ) {
if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue;
find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space );
}
if (idx < 0 && !only_ctrl) {
for( uint j=0; j<n->outcnt(); j++ ) {
find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space );
}
}
#ifdef ASSERT
Node* orig = n->debug_orig();
if (orig != NULL) {
do {
if (NotANode(orig)) break;
find_recur(C, result, orig, idx, only_ctrl, old_space, new_space );
orig = orig->debug_orig();
} while (orig != NULL && orig != n->debug_orig());
}
#endif //ASSERT
}
Node* find_node(Node* n, int idx) {
return n->find(idx);
}
Node* Node::find(int idx) const {
ResourceArea *area = Thread::current()->resource_area();
VectorSet old_space(area), new_space(area);
Node* result = NULL;
find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space );
return result;
}
Node* Node::find_ctrl(int idx) const {
ResourceArea *area = Thread::current()->resource_area();
VectorSet old_space(area), new_space(area);
Node* result = NULL;
find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space );
return result;
}
#endif
#ifndef PRODUCT
extern const char *NodeClassNames[];
const char *Node::Name() const { return NodeClassNames[Opcode()]; }
static bool is_disconnected(const Node* n) {
for (uint i = 0; i < n->req(); i++) {
if (n->in(i) != NULL) return false;
}
return true;
}
#ifdef ASSERT
static void dump_orig(Node* orig, outputStream *st) {
Compile* C = Compile::current();
if (NotANode(orig)) orig = NULL;
if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
if (orig == NULL) return;
st->print(" !orig=");
Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
if (NotANode(fast)) fast = NULL;
while (orig != NULL) {
bool discon = is_disconnected(orig); // if discon, print [123] else 123
if (discon) st->print("[");
if (!Compile::current()->node_arena()->contains(orig))
st->print("o");
st->print("%d", orig->_idx);
if (discon) st->print("]");
orig = orig->debug_orig();
if (NotANode(orig)) orig = NULL;
if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
if (orig != NULL) st->print(",");
if (fast != NULL) {
fast = fast->debug_orig();
if (NotANode(fast)) fast = NULL;
if (fast != NULL && fast != orig) {
fast = fast->debug_orig();
if (NotANode(fast)) fast = NULL;
}
if (fast == orig) {
st->print("...");
break;
}
}
}
}
void Node::set_debug_orig(Node* orig) {
_debug_orig = orig;
if (BreakAtNode == 0) return;
if (NotANode(orig)) orig = NULL;
int trip = 10;
while (orig != NULL) {
if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) {
tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d",
this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx());
BREAKPOINT;
}
orig = orig->debug_orig();
if (NotANode(orig)) orig = NULL;
if (trip-- <= 0) break;
}
}
#endif //ASSERT
void Node::dump(const char* suffix, outputStream *st) const {
Compile* C = Compile::current();
bool is_new = C->node_arena()->contains(this);
C->_in_dump_cnt++;
st->print("%c%d\t%s\t=== ", is_new ? ' ' : 'o', _idx, Name());
dump_req(st);
dump_prec(st);
dump_out(st);
if (is_disconnected(this)) {
#ifdef ASSERT
st->print(" [%d]",debug_idx());
dump_orig(debug_orig(), st);
#endif
st->cr();
C->_in_dump_cnt--;
return; // don't process dead nodes
}
dump_spec(st);
#ifdef ASSERT
if (Verbose && WizardMode) {
st->print(" [%d]",debug_idx());
}
#endif
const Type *t = bottom_type();
if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) {
const TypeInstPtr *toop = t->isa_instptr();
const TypeKlassPtr *tkls = t->isa_klassptr();
ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
if (klass && klass->is_loaded() && klass->is_interface()) {
st->print(" Interface:");
} else if (toop) {
st->print(" Oop:");
} else if (tkls) {
st->print(" Klass:");
}
t->dump_on(st);
} else if (t == Type::MEMORY) {
st->print(" Memory:");
MemNode::dump_adr_type(this, adr_type(), st);
} else if (Verbose || WizardMode) {
st->print(" Type:");
if (t) {
t->dump_on(st);
} else {
st->print("no type");
}
} else if (t->isa_vect() && this->is_MachSpillCopy()) {
t->dump_on(st);
}
ssssssssss57
最新推荐文章于 2024-08-01 15:05:06 发布