@@ -86,34 +86,119 @@ static int8_t jl_cachearg_offset(jl_methtable_t *mt)
86
86
87
87
/// ----- Insertion logic for special entries ----- ///
88
88
89
+
90
+ static uint_t speccache_hash (size_t idx , jl_svec_t * data )
91
+ {
92
+ jl_method_instance_t * ml = (jl_method_instance_t * )jl_svecref (data , idx );
93
+ jl_value_t * sig = ml -> specTypes ;
94
+ if (jl_is_unionall (sig ))
95
+ sig = jl_unwrap_unionall (sig );
96
+ return ((jl_datatype_t * )sig )-> hash ;
97
+ }
98
+
99
+ static int speccache_eq (size_t idx , const void * ty , jl_svec_t * data , uint_t hv )
100
+ {
101
+ jl_method_instance_t * ml = (jl_method_instance_t * )jl_svecref (data , idx );
102
+ jl_value_t * sig = ml -> specTypes ;
103
+ if (ty == sig )
104
+ return 1 ;
105
+ uint_t h2 = ((jl_datatype_t * )(jl_is_unionall (sig ) ? jl_unwrap_unionall (sig ) : sig ))-> hash ;
106
+ if (h2 != hv )
107
+ return 0 ;
108
+ return jl_types_equal (sig , (jl_value_t * )ty );
109
+ }
110
+
89
111
// get or create the MethodInstance for a specialization
90
112
JL_DLLEXPORT jl_method_instance_t * jl_specializations_get_linfo (jl_method_t * m JL_PROPAGATES_ROOT , jl_value_t * type , jl_svec_t * sparams )
91
113
{
92
- JL_LOCK (& m -> writelock );
93
- struct jl_typemap_assoc search = {type , 1 , 0 , NULL , 0 , ~(size_t )0 };
94
- jl_typemap_entry_t * sf = jl_typemap_assoc_by_type (m -> specializations , & search , /*offs*/ 0 , /*subtype*/ 0 );
95
- if (sf && jl_is_method_instance (sf -> func .value )) {
96
- JL_UNLOCK (& m -> writelock );
97
- return sf -> func .linfo ;
98
- }
99
- jl_method_instance_t * mi = jl_get_specialized (m , type , sparams );
100
- JL_GC_PUSH1 (& mi );
101
- // TODO: fuse lookup and insert steps
102
- jl_typemap_insert (& m -> specializations , (jl_value_t * )m , (jl_tupletype_t * )type ,
103
- NULL , jl_emptysvec , (jl_value_t * )mi , 0 , & tfunc_cache ,
104
- 1 , ~(size_t )0 );
105
- JL_UNLOCK (& m -> writelock );
106
- JL_GC_POP ();
107
- return mi ;
114
+ uint_t hv = ((jl_datatype_t * )(jl_is_unionall (type ) ? jl_unwrap_unionall (type ) : type ))-> hash ;
115
+ for (int locked = 0 ; ; locked ++ ) {
116
+ jl_array_t * speckeyset = jl_atomic_load_acquire (& m -> speckeyset );
117
+ jl_svec_t * specializations = jl_atomic_load_acquire (& m -> specializations );
118
+ size_t i , cl = jl_svec_len (specializations );
119
+ if (hv ) {
120
+ ssize_t idx = jl_smallintset_lookup (speckeyset , speccache_eq , type , specializations , hv );
121
+ if (idx != -1 ) {
122
+ jl_method_instance_t * mi = (jl_method_instance_t * )jl_svecref (specializations , idx );
123
+ JL_GC_PROMISE_ROOTED (mi ); // clang-sa doesn't understand jl_atomic_load_relaxed
124
+ if (locked )
125
+ JL_UNLOCK (& m -> writelock );
126
+ return mi ;
127
+ }
128
+ }
129
+ else {
130
+ jl_method_instance_t * * data = (jl_method_instance_t * * )jl_svec_data (specializations );
131
+ JL_GC_PUSH1 (& specializations ); // clang-sa doesn't realize this loop uses specializations
132
+ for (i = cl ; i > 0 ; i -- ) {
133
+ jl_method_instance_t * mi = jl_atomic_load_relaxed (& data [i - 1 ]);
134
+ JL_GC_PROMISE_ROOTED (mi ); // clang-sa doesn't understand jl_atomic_load_relaxed
135
+ if (mi == NULL )
136
+ break ;
137
+ if (jl_types_equal (mi -> specTypes , type )) {
138
+ if (locked )
139
+ JL_UNLOCK (& m -> writelock );
140
+ JL_GC_POP ();
141
+ return mi ;
142
+ }
143
+ }
144
+ JL_GC_POP ();
145
+ }
146
+ if (!sparams ) // can't insert without knowing this
147
+ return NULL ;
148
+ if (!locked ) {
149
+ JL_LOCK (& m -> writelock );
150
+ }
151
+ else {
152
+ if (hv ) {
153
+ jl_method_instance_t * * data = (jl_method_instance_t * * )jl_svec_data (specializations );
154
+ for (i = 0 ; i < cl ; i ++ ) {
155
+ jl_method_instance_t * mi = jl_atomic_load_relaxed (& data [i ]);
156
+ JL_GC_PROMISE_ROOTED (mi ); // clang-sa doesn't understand jl_atomic_load_relaxed
157
+ if (mi == NULL )
158
+ break ;
159
+ assert (!jl_types_equal (mi -> specTypes , type ));
160
+ }
161
+ }
162
+ jl_method_instance_t * mi = jl_get_specialized (m , type , sparams );
163
+ JL_GC_PUSH1 (& mi );
164
+ if (hv ? (i + 1 >= cl || jl_svecref (specializations , i + 1 ) != NULL ) : (i <= 1 || jl_svecref (specializations , i - 2 ) != NULL )) {
165
+ size_t ncl = cl < 8 ? 8 : (cl * 3 )>>1 ;
166
+ jl_svec_t * nc = jl_alloc_svec_uninit (ncl );
167
+ if (i > 0 )
168
+ memcpy ((char * )jl_svec_data (nc ), jl_svec_data (specializations ), sizeof (void * ) * i );
169
+ memset ((char * )jl_svec_data (nc ) + sizeof (void * ) * i , 0 , sizeof (void * ) * (ncl - cl ));
170
+ if (i < cl )
171
+ memcpy ((char * )jl_svec_data (nc ) + sizeof (void * ) * (i + ncl - cl ),
172
+ (char * )jl_svec_data (specializations ) + sizeof (void * ) * i ,
173
+ sizeof (void * ) * (cl - i ));
174
+ jl_atomic_store_release (& m -> specializations , nc );
175
+ JL_GC_PROMISE_ROOTED (nc ); // clang-sa doesn't understand jl_atomic_store_release
176
+ jl_gc_wb (m , nc );
177
+ specializations = nc ;
178
+ if (!hv )
179
+ i += ncl - cl ;
180
+ }
181
+ if (!hv )
182
+ i -= 1 ;
183
+ assert (jl_svecref (specializations , i ) == NULL );
184
+ jl_svecset (specializations , i , mi ); // jl_atomic_store_release?
185
+ if (hv ) {
186
+ // TODO: fuse lookup and insert steps?
187
+ jl_smallintset_insert (& m -> speckeyset , (jl_value_t * )m , speccache_hash , i , specializations );
188
+ }
189
+ JL_UNLOCK (& m -> writelock );
190
+ JL_GC_POP ();
191
+ return mi ;
192
+ }
193
+ }
108
194
}
109
195
110
196
JL_DLLEXPORT jl_value_t * jl_specializations_lookup (jl_method_t * m , jl_value_t * type )
111
197
{
112
- struct jl_typemap_assoc search = {type , 1 , 0 , NULL , 0 , ~(size_t )0 };
113
- jl_typemap_entry_t * sf = jl_typemap_assoc_by_type (m -> specializations , & search , /*offs*/ 0 , /*subtype*/ 0 );
114
- if (!sf )
198
+ jl_value_t * mi = (jl_value_t * )jl_specializations_get_linfo (m , type , NULL );
199
+ if (mi == NULL )
115
200
return jl_nothing ;
116
- return sf -> func . value ;
201
+ return mi ;
117
202
}
118
203
119
204
JL_DLLEXPORT jl_value_t * jl_methtable_lookup (jl_methtable_t * mt , jl_value_t * type , size_t world )
@@ -318,18 +403,18 @@ JL_DLLEXPORT jl_code_instance_t *jl_set_method_inferred(
318
403
return codeinst ;
319
404
}
320
405
321
- static int get_spec_unspec_list (jl_typemap_entry_t * l , void * closure )
322
- {
323
- jl_method_instance_t * mi = l -> func .linfo ;
324
- assert (jl_is_method_instance (mi ));
325
- if (jl_rettype_inferred (mi , jl_world_counter , jl_world_counter ) == jl_nothing )
326
- jl_array_ptr_1d_push ((jl_array_t * )closure , l -> func .value );
327
- return 1 ;
328
- }
329
-
330
406
static int get_method_unspec_list (jl_typemap_entry_t * def , void * closure )
331
407
{
332
- jl_typemap_visitor (def -> func .method -> specializations , get_spec_unspec_list , closure );
408
+ jl_svec_t * specializations = def -> func .method -> specializations ;
409
+ size_t i , l = jl_svec_len (specializations );
410
+ for (i = 0 ; i < l ; i ++ ) {
411
+ jl_method_instance_t * mi = (jl_method_instance_t * )jl_svecref (specializations , i );
412
+ if (mi ) {
413
+ assert (jl_is_method_instance (mi ));
414
+ if (jl_rettype_inferred (mi , jl_world_counter , jl_world_counter ) == jl_nothing )
415
+ jl_array_ptr_1d_push ((jl_array_t * )closure , (jl_value_t * )mi );
416
+ }
417
+ }
333
418
return 1 ;
334
419
}
335
420
@@ -1440,29 +1525,23 @@ static void invalidate_method_instance(jl_method_instance_t *replaced, size_t ma
1440
1525
}
1441
1526
1442
1527
// invalidate cached methods that overlap this definition
1443
- struct invalidate_conflicting_env {
1444
- struct typemap_intersection_env match ;
1445
- size_t max_world ;
1446
- int invalidated ;
1447
- };
1448
- static int invalidate_backedges (jl_typemap_entry_t * oldentry , struct typemap_intersection_env * closure0 )
1528
+ static int invalidate_backedges (jl_method_instance_t * replaced_linfo , size_t max_world )
1449
1529
{
1450
- struct invalidate_conflicting_env * closure = container_of (closure0 , struct invalidate_conflicting_env , match );
1451
- jl_method_instance_t * replaced_linfo = oldentry -> func .linfo ;
1452
1530
JL_LOCK_NOGC (& replaced_linfo -> def .method -> writelock );
1453
1531
jl_array_t * backedges = replaced_linfo -> backedges ;
1532
+ int invalidated = 0 ;
1454
1533
if (backedges ) {
1455
1534
// invalidate callers (if any)
1456
1535
replaced_linfo -> backedges = NULL ;
1457
1536
size_t i , l = jl_array_len (backedges );
1458
1537
jl_method_instance_t * * replaced = (jl_method_instance_t * * )jl_array_ptr_data (backedges );
1459
1538
for (i = 0 ; i < l ; i ++ ) {
1460
- invalidate_method_instance (replaced [i ], closure -> max_world , 1 );
1539
+ invalidate_method_instance (replaced [i ], max_world , 1 );
1461
1540
}
1462
- closure -> invalidated = 1 ;
1541
+ invalidated = 1 ;
1463
1542
}
1464
1543
JL_UNLOCK_NOGC (& replaced_linfo -> def .method -> writelock );
1465
- return 1 ;
1544
+ return invalidated ;
1466
1545
}
1467
1546
1468
1547
// add a backedge from callee to caller
@@ -1595,10 +1674,13 @@ JL_DLLEXPORT void jl_method_table_disable(jl_methtable_t *mt, jl_method_t *metho
1595
1674
mt_cache_env .shadowed = (jl_value_t * )method ;
1596
1675
jl_typemap_visitor (mt -> cache , invalidate_mt_cache , (void * )& mt_cache_env );
1597
1676
// Invalidate the backedges
1598
- struct invalidate_conflicting_env env = {{NULL , NULL , NULL }};
1599
- env .invalidated = 0 ;
1600
- env .max_world = methodentry -> max_world ;
1601
- jl_typemap_visitor (methodentry -> func .method -> specializations , (jl_typemap_visitor_fptr )invalidate_backedges , & env .match );
1677
+ jl_svec_t * specializations = methodentry -> func .method -> specializations ;
1678
+ size_t i , l = jl_svec_len (specializations );
1679
+ for (i = 0 ; i < l ; i ++ ) {
1680
+ jl_method_instance_t * mi = (jl_method_instance_t * )jl_svecref (specializations , i );
1681
+ if (mi )
1682
+ invalidate_backedges (mi , methodentry -> max_world );
1683
+ }
1602
1684
JL_UNLOCK (& mt -> writelock );
1603
1685
}
1604
1686
@@ -1667,34 +1749,30 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method
1667
1749
// mt->cache = jl_nothing;
1668
1750
//}
1669
1751
1670
- jl_datatype_t * unw = (jl_datatype_t * )jl_unwrap_unionall (type );
1671
- size_t l = jl_svec_len (unw -> parameters );
1672
- jl_value_t * va = NULL ;
1673
- if (l > 0 ) {
1674
- va = jl_tparam (unw , l - 1 );
1675
- if (jl_is_vararg_type (va ))
1676
- va = jl_unwrap_vararg (va );
1677
- else
1678
- va = NULL ;
1679
- }
1680
- struct invalidate_conflicting_env env = {{invalidate_backedges , (jl_value_t * )type , va }};
1681
- env .invalidated = 0 ;
1682
- env .max_world = max_world ;
1683
- env .match .env = NULL ;
1684
-
1752
+ jl_value_t * * d ;
1753
+ size_t j , n ;
1685
1754
if (jl_is_method (oldvalue )) {
1686
- jl_typemap_intersection_visitor (((jl_method_t * )oldvalue )-> specializations , 0 , & env .match );
1755
+ d = & oldvalue ;
1756
+ n = 1 ;
1687
1757
}
1688
1758
else {
1689
1759
assert (jl_is_array (oldvalue ));
1690
- jl_typemap_entry_t * * d = (jl_typemap_entry_t * * )jl_array_ptr_data (oldvalue );
1691
- size_t i , n = jl_array_len (oldvalue );
1692
- for (i = 0 ; i < n ; i ++ ) {
1693
- jl_typemap_intersection_visitor (d [i ]-> func .method -> specializations , 0 , & env .match );
1760
+ d = jl_array_ptr_data (oldvalue );
1761
+ n = jl_array_len (oldvalue );
1762
+ }
1763
+ for (j = 0 ; j < n ; j ++ ) {
1764
+ jl_value_t * m = d [j ];
1765
+ if (jl_is_array (oldvalue ))
1766
+ m = ((jl_typemap_entry_t * )m )-> func .value ;
1767
+ jl_svec_t * specializations = ((jl_method_t * )m )-> specializations ;
1768
+ size_t i , l = jl_svec_len (specializations );
1769
+ for (i = 0 ; i < l ; i ++ ) {
1770
+ jl_method_instance_t * mi = (jl_method_instance_t * )jl_svecref (specializations , i );
1771
+ if (mi != NULL && !jl_has_empty_intersection (type , (jl_value_t * )mi -> specTypes ))
1772
+ if (invalidate_backedges (mi , max_world ))
1773
+ invalidated = 1 ;
1694
1774
}
1695
1775
}
1696
- if (env .invalidated )
1697
- invalidated = 1 ;
1698
1776
}
1699
1777
if (invalidated && JL_DEBUG_METHOD_INVALIDATION ) {
1700
1778
jl_uv_puts (JL_STDOUT , ">> " , 3 );
0 commit comments