diff --git a/src/gc.c b/src/gc.c index 02176fba53733..9cacfb5fa5571 100644 --- a/src/gc.c +++ b/src/gc.c @@ -2641,6 +2641,7 @@ mark: { extern jl_typemap_entry_t *call_cache[N_CALL_CACHE]; extern jl_array_t *jl_all_methods; +extern jl_array_t *_jl_debug_method_invalidation; static void jl_gc_queue_thread_local(jl_gc_mark_cache_t *gc_cache, jl_gc_mark_sp_t *sp, jl_ptls_t ptls2) @@ -2680,6 +2681,8 @@ static void mark_roots(jl_gc_mark_cache_t *gc_cache, jl_gc_mark_sp_t *sp) gc_mark_queue_obj(gc_cache, sp, call_cache[i]); if (jl_all_methods != NULL) gc_mark_queue_obj(gc_cache, sp, jl_all_methods); + if (_jl_debug_method_invalidation != NULL) + gc_mark_queue_obj(gc_cache, sp, _jl_debug_method_invalidation); // constants gc_mark_queue_obj(gc_cache, sp, jl_emptytuple_type); diff --git a/src/gf.c b/src/gf.c index 13ccfb0ba03be..7948e9b9c5d80 100644 --- a/src/gf.c +++ b/src/gf.c @@ -1202,14 +1202,15 @@ static jl_method_instance_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype return nf; } -struct shadowed_matches_env { + +struct matches_env { struct typemap_intersection_env match; jl_typemap_entry_t *newentry; jl_value_t *shadowed; }; -static int check_shadowed_visitor(jl_typemap_entry_t *oldentry, struct typemap_intersection_env *closure0) +static int get_intersect_visitor(jl_typemap_entry_t *oldentry, struct typemap_intersection_env *closure0) { - struct shadowed_matches_env *closure = container_of(closure0, struct shadowed_matches_env, match); + struct matches_env *closure = container_of(closure0, struct matches_env, match); if (oldentry == closure->newentry) return 1; if (oldentry->max_world < ~(size_t)0 || oldentry->min_world == closure->newentry->min_world) @@ -1217,53 +1218,13 @@ static int check_shadowed_visitor(jl_typemap_entry_t *oldentry, struct typemap_i // also be careful not to try to scan something from the current dump-reload though return 1; jl_method_t *oldmethod = oldentry->func.method; - if (oldmethod->specializations == jl_emptysvec) - // nothing inferred ever before means nothing shadowed ever before - return 1; - - jl_tupletype_t *type = closure->newentry->sig; - jl_tupletype_t *sig = oldentry->sig; - - int shadowed = 0; - if (closure->match.issubty) { // (new)type <: (old)sig - // new entry is more specific - shadowed = 1; - } - else if (jl_subtype((jl_value_t*)sig, (jl_value_t*)type)) { - // old entry is more specific - } - else if (jl_type_morespecific_no_subtype((jl_value_t*)type, (jl_value_t*)sig)) { - // new entry is more specific - shadowed = 1; - } - else if (jl_type_morespecific_no_subtype((jl_value_t*)sig, (jl_value_t*)type)) { - // old entry is more specific - } - else { - // sort order is ambiguous - shadowed = 1; - } - - // ok: record that this method definition is being partially replaced - // (either with a real definition, or an ambiguity error) - if (shadowed) { - if (closure->shadowed == NULL) { - closure->shadowed = (jl_value_t*)oldmethod; - } - else if (!jl_is_array(closure->shadowed)) { - jl_array_t *list = jl_alloc_vec_any(2); - jl_array_ptr_set(list, 0, closure->shadowed); - jl_array_ptr_set(list, 1, (jl_value_t*)oldmethod); - closure->shadowed = (jl_value_t*)list; - } - else { - jl_array_ptr_1d_push((jl_array_t*)closure->shadowed, (jl_value_t*)oldmethod); - } - } + if (closure->shadowed == NULL) + closure->shadowed = (jl_value_t*)jl_alloc_vec_any(0); + jl_array_ptr_1d_push((jl_array_t*)closure->shadowed, (jl_value_t*)oldmethod); return 1; } -static jl_value_t *check_shadowed_matches(jl_typemap_t *defs, jl_typemap_entry_t *newentry) +static jl_value_t *get_intersect_matches(jl_typemap_t *defs, jl_typemap_entry_t *newentry) { jl_tupletype_t *type = newentry->sig; jl_tupletype_t *ttypes = (jl_tupletype_t*)jl_unwrap_unionall((jl_value_t*)type); @@ -1276,7 +1237,7 @@ static jl_value_t *check_shadowed_matches(jl_typemap_t *defs, jl_typemap_entry_t else va = NULL; } - struct shadowed_matches_env env = {{check_shadowed_visitor, (jl_value_t*)type, va}}; + struct matches_env env = {{get_intersect_visitor, (jl_value_t*)type, va}}; env.match.ti = NULL; env.match.env = jl_emptysvec; env.newentry = newentry; @@ -1476,7 +1437,7 @@ JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *t struct invalidate_mt_env { jl_typemap_entry_t *newentry; - jl_value_t *shadowed; + jl_array_t *shadowed; size_t max_world; int invalidated; }; @@ -1486,25 +1447,15 @@ static int invalidate_mt_cache(jl_typemap_entry_t *oldentry, void *closure0) JL_GC_PROMISE_ROOTED(env->newentry); if (oldentry->max_world == ~(size_t)0) { jl_method_instance_t *mi = oldentry->func.linfo; - jl_method_t *m = mi->def.method; int intersects = 0; - if (jl_is_method(env->shadowed)) { - if ((jl_value_t*)m == env->shadowed) { + jl_method_instance_t **d = (jl_method_instance_t**)jl_array_ptr_data(env->shadowed); + size_t i, n = jl_array_len(env->shadowed); + for (i = 0; i < n; i++) { + if (mi == d[i]) { intersects = 1; + break; } } - else { - assert(jl_is_array(env->shadowed)); - jl_method_t **d = (jl_method_t**)jl_array_ptr_data(env->shadowed); - size_t i, n = jl_array_len(env->shadowed); - for (i = 0; i < n; i++) { - if (m == d[i]) { - intersects = 1; - break; - } - } - } - intersects = intersects && !jl_has_empty_intersection((jl_value_t*)oldentry->sig, (jl_value_t*)env->newentry->sig); if (intersects) { if (_jl_debug_method_invalidation) { jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)oldentry); @@ -1550,20 +1501,12 @@ static jl_typemap_entry_t *do_typemap_search(jl_methtable_t *mt JL_PROPAGATES_RO } #endif -// TODO: decrease repeated work? -// This implementation is stupidly inefficient, but probably correct -JL_DLLEXPORT void jl_method_table_disable(jl_methtable_t *mt, jl_method_t *method) +static void jl_method_table_invalidate(jl_methtable_t *mt, jl_typemap_entry_t *methodentry, jl_method_t *method, size_t max_world) { - if (jl_options.incremental && jl_generating_output()) - jl_printf(JL_STDERR, "WARNING: method deletion during Module precompile may lead to undefined behavior" - "\n ** incremental compilation may be fatally broken for this module **\n\n"); - jl_typemap_entry_t *methodentry = do_typemap_search(mt, method); - JL_LOCK(&mt->writelock); - // Narrow the world age on the method to make it uncallable - method->deleted_world = methodentry->max_world = jl_world_counter++; + method->deleted_world = methodentry->max_world = max_world; // drop this method from mt->cache struct invalidate_mt_env mt_cache_env; - mt_cache_env.max_world = methodentry->max_world - 1; + mt_cache_env.max_world = max_world; mt_cache_env.newentry = methodentry; mt_cache_env.shadowed = NULL; mt_cache_env.invalidated = 0; @@ -1598,6 +1541,17 @@ JL_DLLEXPORT void jl_method_table_disable(jl_methtable_t *mt, jl_method_t *metho jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); JL_GC_POP(); } +} + +JL_DLLEXPORT void jl_method_table_disable(jl_methtable_t *mt, jl_method_t *method) +{ + if (jl_options.incremental && jl_generating_output()) + jl_printf(JL_STDERR, "WARNING: method deletion during Module precompile may lead to undefined behavior" + "\n ** incremental compilation may be fatally broken for this module **\n\n"); + jl_typemap_entry_t *methodentry = do_typemap_search(mt, method); + JL_LOCK(&mt->writelock); + // Narrow the world age on the method to make it uncallable + jl_method_table_invalidate(mt, methodentry, method, jl_world_counter++); JL_UNLOCK(&mt->writelock); } @@ -1608,38 +1562,62 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method assert(jl_is_mtable(mt)); jl_value_t *type = method->sig; jl_value_t *oldvalue = NULL; + jl_array_t *oldmi = NULL; if (method->primary_world == 1) method->primary_world = ++jl_world_counter; size_t max_world = method->primary_world - 1; - int invalidated = 0; jl_value_t *loctag = NULL; // debug info for invalidation + jl_value_t *isect = NULL; jl_typemap_entry_t *newentry = NULL; - JL_GC_PUSH3(&oldvalue, &newentry, &loctag); + JL_GC_PUSH5(&oldvalue, &oldmi, &newentry, &loctag, &isect); JL_LOCK(&mt->writelock); - // first delete the existing entry (we'll disable it later) + // first find if we have an existing entry to delete struct jl_typemap_assoc search = {(jl_value_t*)type, method->primary_world, NULL, 0, ~(size_t)0}; jl_typemap_entry_t *oldentry = jl_typemap_assoc_by_type(mt->defs, &search, /*offs*/0, /*subtype*/0); - if (oldentry) { - oldentry->max_world = method->primary_world - 1; - // TODO: just append our new entry right here - } // then add our new entry newentry = jl_typemap_alloc((jl_tupletype_t*)type, simpletype, jl_emptysvec, (jl_value_t*)method, method->primary_world, method->deleted_world); jl_typemap_insert(&mt->defs, (jl_value_t*)mt, newentry, 0, &method_defs); if (oldentry) { - oldvalue = oldentry->func.value; - method_overwrite(newentry, (jl_method_t*)oldvalue); + jl_method_t *m = oldentry->func.method; + method_overwrite(newentry, m); + jl_method_table_invalidate(mt, oldentry, m, max_world); } else { + oldvalue = get_intersect_matches(mt->defs, newentry); + + int invalidated = 0; + jl_method_t **d; + size_t j, n; + if (oldvalue == NULL) { + d = NULL; + n = 0; + } + else { + assert(jl_is_array(oldvalue)); + d = (jl_method_t**)jl_array_ptr_data(oldvalue); + n = jl_array_len(oldvalue); + } if (mt->backedges) { jl_value_t **backedges = jl_array_ptr_data(mt->backedges); size_t i, na = jl_array_len(mt->backedges); size_t ins = 0; for (i = 1; i < na; i += 2) { jl_value_t *backedgetyp = backedges[i - 1]; - if (!jl_has_empty_intersection(backedgetyp, (jl_value_t*)type)) { - // TODO: only delete if the ml_matches list (with intersection=0, include_ambiguous=1) is empty + isect = jl_type_intersection(backedgetyp, (jl_value_t*)type); + if (isect != jl_bottom_type) { + // see if the intersection was actually already fully + // covered by anything (method or ambiguity is okay) + size_t j; + for (j = 0; j < n; j++) { + jl_method_t *m = d[j]; + if (jl_subtype(isect, m->sig)) + break; + } + if (j != n) + isect = jl_bottom_type; + } + if (isect != jl_bottom_type) { jl_method_instance_t *backedge = (jl_method_instance_t*)backedges[i]; invalidate_method_instance(backedge, max_world, 0); invalidated = 1; @@ -1656,61 +1634,96 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method else jl_array_del_end(mt->backedges, na - ins); } - oldvalue = check_shadowed_matches(mt->defs, newentry); - } - - if (oldvalue) { - // drop anything in mt->cache that might overlap with the new method - struct invalidate_mt_env mt_cache_env; - mt_cache_env.max_world = max_world; - mt_cache_env.shadowed = oldvalue; - mt_cache_env.newentry = newentry; - mt_cache_env.invalidated = 0; - - jl_typemap_visitor(mt->cache, invalidate_mt_cache, (void*)&mt_cache_env); - jl_array_t *leafcache = mt->leafcache; - size_t i, l = jl_array_len(leafcache); - for (i = 1; i < l; i += 2) { - jl_value_t *entry = jl_array_ptr_ref(leafcache, i); - if (entry) { - while (entry != jl_nothing) { - invalidate_mt_cache((jl_typemap_entry_t*)entry, (void*)&mt_cache_env); - entry = (jl_value_t*)((jl_typemap_entry_t*)entry)->next; + if (oldvalue) { + oldmi = jl_alloc_vec_any(0); + enum morespec_options { + morespec_unknown, + morespec_isnot, + morespec_is + }; + char *morespec = (char*)alloca(n); + memset(morespec, morespec_unknown, n); + for (j = 0; j < n; j++) { + jl_method_t *m = d[j]; + if (morespec[j] == (char)morespec_is) + continue; + jl_svec_t *specializations = jl_atomic_load_acquire(&m->specializations); + jl_method_instance_t **data = (jl_method_instance_t**)jl_svec_data(specializations); + size_t i, l = jl_svec_len(specializations); + enum morespec_options ambig = morespec_unknown; + for (i = 0; i < l; i++) { + jl_method_instance_t *mi = jl_atomic_load_relaxed(&data[i]); + if (mi == NULL) + continue; + isect = jl_type_intersection(m->sig, (jl_value_t*)mi->specTypes); + isect = jl_type_intersection(type, isect); + if (isect != jl_bottom_type) { + if (morespec[j] == (char)morespec_unknown) + morespec[j] = (char)jl_type_morespecific(m->sig, type) ? morespec_is : morespec_isnot; + if (morespec[j] == (char)morespec_is) + // not actually shadowing--the existing method is still better + break; + if (ambig == morespec_unknown) + ambig = jl_type_morespecific(type, m->sig) ? morespec_is : morespec_isnot; + // replacing a method--see if this really was the selected method previously + // over the intersection + if (ambig == morespec_isnot) { + size_t k; + for (k = 0; k < n; k++) { + jl_method_t *m2 = d[k]; + if (m == m2 || !jl_subtype(isect, m2->sig)) + continue; + if (morespec[k] == (char)morespec_unknown) + morespec[k] = (char)jl_type_morespecific(m2->sig, type) ? morespec_is : morespec_isnot; + if (morespec[k] == (char)morespec_is) + // not actually shadowing this--m2 will still be better + break; + // since m2 was also a previous match over isect, + // see if m was also previously dominant over all m2 + if (!jl_type_morespecific(m->sig, m2->sig)) + break; + } + if (k != n) + continue; + } + jl_array_ptr_1d_push(oldmi, (jl_value_t*)mi); + if (mi->backedges) { + invalidated = 1; + invalidate_backedges(mi, max_world, "jl_method_table_insert"); + } + } } } - } - - jl_value_t **d; - size_t j, n; - if (jl_is_method(oldvalue)) { - d = &oldvalue; - n = 1; - } - else { - assert(jl_is_array(oldvalue)); - d = jl_array_ptr_data(oldvalue); - n = jl_array_len(oldvalue); - } - for (j = 0; j < n; j++) { - jl_value_t *m = d[j]; - jl_svec_t *specializations = jl_atomic_load_acquire(&((jl_method_t*)m)->specializations); - jl_method_instance_t **data = (jl_method_instance_t**)jl_svec_data(specializations); - size_t i, l = jl_svec_len(specializations); - for (i = 0; i < l; i++) { - jl_method_instance_t *mi = jl_atomic_load_relaxed(&data[i]); - if (mi != NULL && mi->backedges && !jl_has_empty_intersection(type, (jl_value_t*)mi->specTypes)) { - invalidate_backedges(mi, max_world, "jl_method_table_insert"); - invalidated = 1; + if (jl_array_len(oldmi)) { + // search mt->cache and leafcache and drop anything that might overlap with the new method + // TODO: keep track of just the `mi` for which shadowing was true (to avoid recomputing that here) + struct invalidate_mt_env mt_cache_env; + mt_cache_env.max_world = max_world; + mt_cache_env.shadowed = oldmi; + mt_cache_env.newentry = newentry; + mt_cache_env.invalidated = 0; + + jl_typemap_visitor(mt->cache, invalidate_mt_cache, (void*)&mt_cache_env); + jl_array_t *leafcache = mt->leafcache; + size_t i, l = jl_array_len(leafcache); + for (i = 1; i < l; i += 2) { + jl_value_t *entry = jl_array_ptr_ref(leafcache, i); + if (entry) { + while (entry != jl_nothing) { + invalidate_mt_cache((jl_typemap_entry_t*)entry, (void*)&mt_cache_env); + entry = (jl_value_t*)((jl_typemap_entry_t*)entry)->next; + } + } } } } + if (invalidated && _jl_debug_method_invalidation) { + jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)method); + loctag = jl_cstr_to_string("jl_method_table_insert"); + jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); + } + update_max_args(mt, type); } - if (invalidated && _jl_debug_method_invalidation) { - jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)method); - loctag = jl_cstr_to_string("jl_method_table_insert"); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); - } - update_max_args(mt, type); JL_UNLOCK(&mt->writelock); JL_GC_POP(); } diff --git a/test/worlds.jl b/test/worlds.jl index 30aea939bafaa..19b8857c352e3 100644 --- a/test/worlds.jl +++ b/test/worlds.jl @@ -315,15 +315,14 @@ struct Normed35855 <: FixedPoint35855{UInt8} i::UInt8 Normed35855(i::Integer, _) = new(i % UInt8) end -(::Type{X})(x::Real) where X<:FixedPoint35855{T} where T = X(round(T, typemax(T)*x), 0) - -@test_broken worlds(mi) == w +(::Type{X})(x::Real) where {T, X<:FixedPoint35855{T}} = X(round(T, typemax(T)*x), 0) +@test worlds(mi) == w mi = instance(convert, (Type{Nothing}, String)) w = worlds(mi) abstract type Colorant35855 end -Base.convert(::Type{C}, c) where C<:Colorant35855 = false -@test_broken worlds(mi) == w +Base.convert(::Type{C}, c) where {C<:Colorant35855} = false +@test worlds(mi) == w # invoke_in_world f_inworld(x) = "world one; x=$x"