diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 5d2ba91e5d9e5..c6162e25c157c 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -214,6 +214,7 @@ function const_prop_profitable(@nospecialize(arg)) const_prop_profitable(b) && return true end end + isa(arg, PartialOpaque) && return true isa(arg, Const) || return true val = arg.val # don't consider mutable values or Strings useful constants @@ -255,7 +256,7 @@ function abstract_call_method_with_const_args(interp::AbstractInterpreter, @nosp # see if any or all of the arguments are constant and propagating constants may be worthwhile for a in argtypes a = widenconditional(a) - if allconst && !isa(a, Const) && !isconstType(a) && !isa(a, PartialStruct) + if allconst && !isa(a, Const) && !isconstType(a) && !isa(a, PartialStruct) && !isa(a, PartialOpaque) allconst = false end if !haveconst && has_nontrivial_const_info(a) && const_prop_profitable(a) @@ -1044,6 +1045,31 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), return abstract_call_gf_by_type(interp, f, argtypes, atype, sv, max_methods) end +function abstract_call_opaque_closure(interp::AbstractInterpreter, closure::PartialOpaque, argtypes::Vector{Any}, sv::InferenceState) + return CallMeta(Any, nothing) +end + +function most_general_argtypes(closure::PartialOpaque) + ret = Any[] + cc = widenconst(closure) + argt = unwrap_unionall(cc).parameters[1] + @assert isa(argt, DataType) && argt.name === typename(Tuple) + params = argt.parameters + for i = 2:closure.source.nargs + rt = unwrapva(params[max(i-1, length(params))]) + if closure.isva + if length(params) > i-1 + for j = (i):length(params) + rt = tmerge(rt, unwrapva(params[j])) + end + end + rt = Vararg{rt} + end + push!(ret, rt) + end + ret +end + # call where the function is any lattice element function abstract_call(interp::AbstractInterpreter, fargs::Union{Nothing,Vector{Any}}, argtypes::Vector{Any}, sv::InferenceState, max_methods::Int = InferenceParams(interp).MAX_METHODS) @@ -1055,10 +1081,14 @@ function abstract_call(interp::AbstractInterpreter, fargs::Union{Nothing,Vector{ f = ft.parameters[1] elseif isa(ft, DataType) && isdefined(ft, :instance) f = ft.instance + elseif isa(ft, PartialOpaque) + return abstract_call_opaque_closure(interp, ft, argtypes, sv) + elseif isa(ft, DataType) && unwrap_unionall(ft).name === typename(Core.OpaqueClosure) + return CallMeta(rewrap_unionall(unwrap_unionall(ft).parameters[2], ft), false) else # non-constant function, but the number of arguments is known # and the ft is not a Builtin or IntrinsicFunction - if typeintersect(widenconst(ft), Builtin) != Union{} + if typeintersect(widenconst(ft), Union{Builtin, Core.OpaqueClosure}) != Union{} add_remark!(interp, sv, "Could not identify method table for call") return CallMeta(Any, false) end @@ -1229,6 +1259,28 @@ function abstract_eval_statement(interp::AbstractInterpreter, @nospecialize(e), t = PartialStruct(t, at.fields) end end + elseif e.head === :new_opaque_closure + t = Union{} + if length(e.args) >= 5 + ea = e.args + n = length(ea) + argtypes = Vector{Any}(undef, n) + @inbounds for i = 1:n + ai = abstract_eval_value(interp, ea[i], vtypes, sv) + if ai === Bottom + return Bottom + end + argtypes[i] = ai + end + t = _opaque_closure_tfunc(argtypes[1], argtypes[2], argtypes[3], + argtypes[4], argtypes[5], argtypes[6:end], sv.linfo) + if isa(t, PartialOpaque) + # Infer this now so that the specialization is available to + # optimization. + abstract_call_opaque_closure(interp, t, + most_general_argtypes(t), sv) + end + end elseif e.head === :foreigncall abstract_eval_value(interp, e.args[1], vtypes, sv) t = sp_type_rewrap(e.args[2], sv.linfo, true) @@ -1389,7 +1441,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState) elseif isa(stmt, ReturnNode) pc´ = n + 1 rt = widenconditional(abstract_eval_value(interp, stmt.val, s[pc], frame)) - if !isa(rt, Const) && !isa(rt, Type) && !isa(rt, PartialStruct) + if !isa(rt, Const) && !isa(rt, Type) && !isa(rt, PartialStruct) && !isa(rt, PartialOpaque) # only propagate information we know we can store # and is valid inter-procedurally rt = widenconst(rt) diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index e8057fbe74566..1658acafe3cc8 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -1362,6 +1362,27 @@ add_tfunc(arrayref, 3, INT_INF, arrayref_tfunc, 20) add_tfunc(const_arrayref, 3, INT_INF, arrayref_tfunc, 20) add_tfunc(arrayset, 4, INT_INF, (@nospecialize(boundscheck), @nospecialize(a), @nospecialize(v), @nospecialize i...)->a, 20) +function _opaque_closure_tfunc(@nospecialize(arg), @nospecialize(isva), + @nospecialize(lb), @nospecialize(ub), @nospecialize(source), env::Vector{Any}, + linfo::MethodInstance) + + argt, argt_exact = instanceof_tfunc(arg) + lbt, lb_exact = instanceof_tfunc(lb) + if !lb_exact + lbt = Union{} + end + + ubt, ub_exact = instanceof_tfunc(ub) + + t = argt_exact ? Core.OpaqueClosure{argt} : Core.OpaqueClosure{<:argt} + t = lbt == ubt ? t{ubt} : (t{T} where lbt <: T <: ubt) + + isa(source, Const) || return t + (isa(isva, Const) && isa(isva.val, Bool)) || return t + + return PartialOpaque(t, env, linfo, isva.val, source.val) +end + function array_type_undefable(@nospecialize(a)) if isa(a, Union) return array_type_undefable(a.a) || array_type_undefable(a.b) diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index 4e0f44d10822e..98d54b962e37d 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -289,6 +289,9 @@ function CodeInstance(result::InferenceResult, @nospecialize(inferred_result::An if isa(result_type, Const) rettype_const = result_type.val const_flags = 0x2 + elseif isa(result_type, PartialOpaque) + rettype_const = result_type + const_flags = 0x2 elseif isconstType(result_type) rettype_const = result_type.parameters[1] const_flags = 0x2 @@ -773,6 +776,8 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize if isdefined(code, :rettype_const) if isa(code.rettype_const, Vector{Any}) && !(Vector{Any} <: code.rettype) return PartialStruct(code.rettype, code.rettype_const), mi + elseif code.rettype <: Core.OpaqueClosure && isa(code.rettype_const, PartialOpaque) + return code.rettype_const, mi else return Const(code.rettype_const), mi end diff --git a/base/compiler/typelattice.jl b/base/compiler/typelattice.jl index 71188d28f5d3c..826f7f4db16e4 100644 --- a/base/compiler/typelattice.jl +++ b/base/compiler/typelattice.jl @@ -54,6 +54,14 @@ struct PartialTypeVar PartialTypeVar(tv::TypeVar, lb_certain::Bool, ub_certain::Bool) = new(tv, lb_certain, ub_certain) end +mutable struct PartialOpaque + t::Type + env_ts::Vector{Any} + parent::MethodInstance + isva::Bool + source::Method +end + # Wraps a type and represents that the value may also be undef at this point. # (only used in optimize, not abstractinterpret) # N.B. in the lattice, this is epsilon bigger than `typ` (even Any) @@ -185,6 +193,14 @@ function ⊑(@nospecialize(a), @nospecialize(b)) end return false end + if isa(a, PartialOpaque) + if isa(b, PartialOpaque) + (a.parent === b.parent && a.source === b.source) || return false + return (widenconst(a) <: widenconst(b)) && + ⊑(a.env, b.env) + end + return widenconst(a) <: widenconst(b) + end if isa(a, Const) if isa(b, Const) return a.val === b.val @@ -240,6 +256,7 @@ end widenconst(m::MaybeUndef) = widenconst(m.typ) widenconst(c::PartialTypeVar) = TypeVar widenconst(t::PartialStruct) = t.typ +widenconst(t::PartialOpaque) = t.t widenconst(t::Type) = t widenconst(t::TypeVar) = t widenconst(t::Core.TypeofVararg) = t diff --git a/base/compiler/typeutils.jl b/base/compiler/typeutils.jl index 10f4a8b949da5..c055f97941311 100644 --- a/base/compiler/typeutils.jl +++ b/base/compiler/typeutils.jl @@ -34,6 +34,7 @@ end function has_nontrivial_const_info(@nospecialize t) isa(t, PartialStruct) && return true + isa(t, PartialOpaque) && return true isa(t, Const) || return false val = t.val return !isdefined(typeof(val), :instance) && !(isa(val, Type) && hasuniquerep(val)) diff --git a/base/compiler/utilities.jl b/base/compiler/utilities.jl index e047c7d83fac3..de819a3eba2e8 100644 --- a/base/compiler/utilities.jl +++ b/base/compiler/utilities.jl @@ -114,12 +114,29 @@ function get_staged(li::MethodInstance) end end +function has_opaque_closure(c::CodeInfo) + for i = 1:length(c.code) + stmt = c.code[i] + (isa(stmt, Expr) && stmt.head === :new_opaque_closure) && return true + end + return false +end + function retrieve_code_info(linfo::MethodInstance) m = linfo.def::Method c = nothing if isdefined(m, :generator) - # user code might throw errors – ignore them - c = get_staged(linfo) + if isdefined(linfo, :uninferred) + c = copy(linfo.uninferred::CodeInfo) + else + # user code might throw errors – ignore them + c = get_staged(linfo) + # For opaque closures, cache the generated code info to make sure + # that Opaque Closure method identity remains stable. + if c !== nothing && has_opaque_closure(c) + linfo.uninferred = copy(c) + end + end end if c === nothing && isdefined(m, :source) src = m.source diff --git a/src/builtins.c b/src/builtins.c index 9844545c8a24d..ebf6e43eed542 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -1611,6 +1611,7 @@ void jl_init_primitives(void) JL_GC_DISABLED add_builtin("Argument", (jl_value_t*)jl_argument_type); add_builtin("Const", (jl_value_t*)jl_const_type); add_builtin("PartialStruct", (jl_value_t*)jl_partial_struct_type); + add_builtin("PartialOpaque", (jl_value_t*)jl_partial_opaque_type); add_builtin("MethodMatch", (jl_value_t*)jl_method_match_type); add_builtin("IntrinsicFunction", (jl_value_t*)jl_intrinsic_type); add_builtin("Function", (jl_value_t*)jl_function_type); diff --git a/src/dump.c b/src/dump.c index 4f063cbf10c31..b6963b629aedd 100644 --- a/src/dump.c +++ b/src/dump.c @@ -380,11 +380,11 @@ static void jl_serialize_module(jl_serializer_state *s, jl_module_t *m) write_uint8(s->s, m->infer); } -static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_literal) JL_GC_DISABLED +static inline int jl_serialize_generic(jl_serializer_state *s, jl_value_t *v) { if (v == NULL) { write_uint8(s->s, TAG_NULL); - return; + return 1; } void *tag = ptrhash_get(&ser_tag, v); @@ -393,28 +393,29 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li if (t8 <= LAST_TAG) write_uint8(s->s, 0); write_uint8(s->s, t8); - return; + return 1; } + if (jl_is_symbol(v)) { void *idx = ptrhash_get(&common_symbol_tag, v); if (idx != HT_NOTFOUND) { write_uint8(s->s, TAG_COMMONSYM); write_uint8(s->s, (uint8_t)(size_t)idx); - return; + return 1; } } else if (v == (jl_value_t*)jl_core_module) { write_uint8(s->s, TAG_CORE); - return; + return 1; } else if (v == (jl_value_t*)jl_base_module) { write_uint8(s->s, TAG_BASE); - return; + return 1; } if (jl_typeis(v, jl_string_type) && jl_string_len(v) == 0) { jl_serialize_value(s, jl_an_empty_string); - return; + return 1; } else if (!jl_is_uint8(v)) { void **bp = ptrhash_bp(&backref_table, v); @@ -428,7 +429,7 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li write_uint8(s->s, TAG_BACKREF); write_int32(s->s, pos); } - return; + return 1; } intptr_t pos = backref_table_numel++; if (((jl_datatype_t*)(jl_typeof(v)))->name == jl_idtable_typename) { @@ -453,6 +454,57 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li ptrhash_put(&backref_table, v, (char*)HT_NOTFOUND + pos + 1); } + return 0; +} + +static void jl_serialize_code_instance(jl_serializer_state *s, jl_code_instance_t *codeinst, int skip_partial_opaque) +{ + if (jl_serialize_generic(s, (jl_value_t*)codeinst)) { + return; + } + + int validate = 0; + if (codeinst->max_world == ~(size_t)0) + validate = 1; // can check on deserialize if this cache entry is still valid + int flags = validate << 0; + if (codeinst->invoke == jl_fptr_const_return) + flags |= 1 << 2; + if (codeinst->precompile) + flags |= 1 << 3; + + int write_ret_type = validate || codeinst->min_world == 0; + if (write_ret_type && codeinst->rettype_const && + jl_typeis(codeinst->rettype_const, jl_partial_opaque_type)) { + if (skip_partial_opaque) { + jl_serialize_code_instance(s, codeinst->next, skip_partial_opaque); + } else { + jl_error("Cannot serialize CodeInstance with PartialOpaque rettype"); + } + } + + write_uint8(s->s, TAG_CODE_INSTANCE); + write_uint8(s->s, flags); + jl_serialize_value(s, (jl_value_t*)codeinst->def); + if (write_ret_type) { + jl_serialize_value(s, codeinst->inferred); + jl_serialize_value(s, codeinst->rettype_const); + jl_serialize_value(s, codeinst->rettype); + } + else { + // skip storing useless data + jl_serialize_value(s, NULL); + jl_serialize_value(s, NULL); + jl_serialize_value(s, jl_any_type); + } + jl_serialize_code_instance(s, codeinst->next, skip_partial_opaque); +} + +static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_literal) JL_GC_DISABLED +{ + if (jl_serialize_generic(s, v)) { + return; + } + size_t i; if (jl_is_svec(v)) { size_t l = jl_svec_len(v); @@ -645,33 +697,10 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li } jl_serialize_value(s, (jl_value_t*)backedges); jl_serialize_value(s, (jl_value_t*)NULL); //callbacks - jl_serialize_value(s, (jl_value_t*)mi->cache); + jl_serialize_code_instance(s, mi->cache, 1); } else if (jl_is_code_instance(v)) { - write_uint8(s->s, TAG_CODE_INSTANCE); - jl_code_instance_t *codeinst = (jl_code_instance_t*)v; - int validate = 0; - if (codeinst->max_world == ~(size_t)0) - validate = 1; // can check on deserialize if this cache entry is still valid - int flags = validate << 0; - if (codeinst->invoke == jl_fptr_const_return) - flags |= 1 << 2; - if (codeinst->precompile) - flags |= 1 << 3; - write_uint8(s->s, flags); - jl_serialize_value(s, (jl_value_t*)codeinst->def); - if (validate || codeinst->min_world == 0) { - jl_serialize_value(s, codeinst->inferred); - jl_serialize_value(s, codeinst->rettype_const); - jl_serialize_value(s, codeinst->rettype); - } - else { - // skip storing useless data - jl_serialize_value(s, NULL); - jl_serialize_value(s, NULL); - jl_serialize_value(s, jl_any_type); - } - jl_serialize_value(s, codeinst->next); + jl_serialize_code_instance(s, (jl_code_instance_t*)v, 0); } else if (jl_typeis(v, jl_module_type)) { jl_serialize_module(s, (jl_module_t*)v); @@ -2422,6 +2451,7 @@ static jl_method_t *jl_lookup_method(jl_methtable_t *mt, jl_datatype_t *sig, siz static jl_method_t *jl_recache_method(jl_method_t *m) { + assert(!m->is_for_opaque_closure); jl_datatype_t *sig = (jl_datatype_t*)m->sig; jl_methtable_t *mt = jl_method_table_for((jl_value_t*)m->sig); assert((jl_value_t*)mt != jl_nothing); diff --git a/src/gf.c b/src/gf.c index 43694a25bdb6b..8b1a34bfd9e65 100644 --- a/src/gf.c +++ b/src/gf.c @@ -1537,6 +1537,7 @@ static jl_typemap_entry_t *do_typemap_search(jl_methtable_t *mt JL_PROPAGATES_RO static void jl_method_table_invalidate(jl_methtable_t *mt, jl_typemap_entry_t *methodentry, jl_method_t *method, size_t max_world) { + assert(!method->is_for_opaque_closure); method->deleted_world = methodentry->max_world = max_world; // drop this method from mt->cache struct invalidate_mt_env mt_cache_env; diff --git a/src/jl_exported_data.inc b/src/jl_exported_data.inc index 0b495568ff5c7..d65de126b86a8 100644 --- a/src/jl_exported_data.inc +++ b/src/jl_exported_data.inc @@ -71,6 +71,7 @@ XX(jl_nothing_type) \ XX(jl_number_type) \ XX(jl_partial_struct_type) \ + XX(jl_partial_opaque_type) \ XX(jl_phicnode_type) \ XX(jl_phinode_type) \ XX(jl_pinode_type) \ diff --git a/src/jltypes.c b/src/jltypes.c index cb853ce2c521c..6f5bc3a93a873 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -2396,6 +2396,10 @@ void jl_init_types(void) JL_GC_DISABLED jl_compute_field_offsets((jl_datatype_t*)jl_unwrap_unionall((jl_value_t*)jl_opaque_closure_type)); + jl_partial_opaque_type = jl_new_datatype(jl_symbol("PartialOpaque"), core, jl_any_type, jl_emptysvec, + jl_perm_symsvec(5, "typ", "env", "isva", "parent", "source"), + jl_svec(5, jl_any_type, jl_type_type, jl_bool_type, jl_method_instance_type, jl_method_type), 0, 0, 5); + // complete builtin type metadata jl_voidpointer_type = (jl_datatype_t*)pointer_void; jl_uint8pointer_type = (jl_datatype_t*)jl_apply_type1((jl_value_t*)jl_pointer_type, (jl_value_t*)jl_uint8_type); diff --git a/src/julia.h b/src/julia.h index 98a1a8a2d4959..16d8934249140 100644 --- a/src/julia.h +++ b/src/julia.h @@ -633,6 +633,7 @@ extern JL_DLLIMPORT jl_datatype_t *jl_typedslot_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_argument_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_const_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_partial_struct_type JL_GLOBALLY_ROOTED; +extern JL_DLLIMPORT jl_datatype_t *jl_partial_opaque_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_method_match_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_simplevector_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_typename_t *jl_tuple_typename JL_GLOBALLY_ROOTED; diff --git a/src/staticdata.c b/src/staticdata.c index accac56e62090..46cdadf7e03b0 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -68,6 +68,7 @@ jl_value_t **const*const get_tags(void) { INSERT_TAG(jl_returnnode_type); INSERT_TAG(jl_const_type); INSERT_TAG(jl_partial_struct_type); + INSERT_TAG(jl_partial_opaque_type); INSERT_TAG(jl_method_match_type); INSERT_TAG(jl_pinode_type); INSERT_TAG(jl_phinode_type);