@@ -93,7 +93,7 @@ impl<T, const PAUSE: bool> BasicSpinMutex<T, PAUSE> {
93
93
/// let mut guard = mutex.lock();
94
94
/// *guard = 42;
95
95
/// ```
96
- pub fn lock ( & self ) -> BasicSpinMutexGuard < T , PAUSE > {
96
+ pub fn lock ( & self ) -> BasicSpinMutexGuard < ' _ , T , PAUSE > {
97
97
while self
98
98
. locked
99
99
. compare_exchange_weak ( false , true , Ordering :: Acquire , Ordering :: Relaxed )
@@ -126,7 +126,7 @@ impl<T, const PAUSE: bool> BasicSpinMutex<T, PAUSE> {
126
126
/// println!("Lock is currently held by another thread");
127
127
/// };
128
128
/// ```
129
- pub fn try_lock ( & self ) -> Option < BasicSpinMutexGuard < T , PAUSE > > {
129
+ pub fn try_lock ( & self ) -> Option < BasicSpinMutexGuard < ' _ , T , PAUSE > > {
130
130
if self
131
131
. locked
132
132
. compare_exchange ( false , true , Ordering :: Acquire , Ordering :: Relaxed )
@@ -743,7 +743,7 @@ impl ThreadPool {
743
743
/// // Work executes when _op is dropped
744
744
/// }
745
745
/// ```
746
- pub fn for_threads < F > ( & mut self , function : F ) -> ForThreadsOperation < F >
746
+ pub fn for_threads < F > ( & mut self , function : F ) -> ForThreadsOperation < ' _ , F >
747
747
where
748
748
F : Fn ( usize , usize ) + Sync ,
749
749
{
@@ -773,7 +773,7 @@ impl ThreadPool {
773
773
/// std::hint::black_box(result); // Prevent optimization
774
774
/// });
775
775
/// ```
776
- pub fn for_n < F > ( & mut self , n : usize , function : F ) -> ForNOperation < F >
776
+ pub fn for_n < F > ( & mut self , n : usize , function : F ) -> ForNOperation < ' _ , F >
777
777
where
778
778
F : Fn ( Prong ) + Sync ,
779
779
{
@@ -810,7 +810,7 @@ impl ThreadPool {
810
810
/// }
811
811
/// });
812
812
/// ```
813
- pub fn for_n_dynamic < F > ( & mut self , n : usize , function : F ) -> ForNDynamicOperation < F >
813
+ pub fn for_n_dynamic < F > ( & mut self , n : usize , function : F ) -> ForNDynamicOperation < ' _ , F >
814
814
where
815
815
F : Fn ( Prong ) + Sync ,
816
816
{
@@ -853,7 +853,7 @@ impl ThreadPool {
853
853
/// prong.thread_index, start_index, start_index + count);
854
854
/// });
855
855
/// ```
856
- pub fn for_slices < F > ( & mut self , n : usize , function : F ) -> ForSlicesOperation < F >
856
+ pub fn for_slices < F > ( & mut self , n : usize , function : F ) -> ForSlicesOperation < ' _ , F >
857
857
where
858
858
F : Fn ( Prong , usize ) + Sync ,
859
859
{
@@ -2366,32 +2366,38 @@ impl<T> RoundRobinVec<T> {
2366
2366
let threads_in_colocation = pool. count_threads_in ( colocation_index) ;
2367
2367
let thread_local_index = pool. locate_thread_in ( thread_index, colocation_index) ;
2368
2368
2369
- if node_len > current_len {
2370
- // Growing: construct new elements in parallel
2371
- let new_elements = node_len - current_len;
2372
- let split = IndexedSplit :: new ( new_elements, threads_in_colocation) ;
2373
- let range = split. get ( thread_local_index) ;
2374
-
2375
- unsafe {
2376
- let ptr = node_vec. as_mut_ptr ( ) ;
2377
- for i in range {
2378
- let idx = current_len + i;
2379
- core:: ptr:: write ( ptr. add ( idx) , value. clone ( ) ) ;
2369
+ match node_len. cmp ( & current_len) {
2370
+ std:: cmp:: Ordering :: Greater => {
2371
+ // Growing: construct new elements in parallel
2372
+ let new_elements = node_len - current_len;
2373
+ let split = IndexedSplit :: new ( new_elements, threads_in_colocation) ;
2374
+ let range = split. get ( thread_local_index) ;
2375
+
2376
+ unsafe {
2377
+ let ptr = node_vec. as_mut_ptr ( ) ;
2378
+ for i in range {
2379
+ let idx = current_len + i;
2380
+ core:: ptr:: write ( ptr. add ( idx) , value. clone ( ) ) ;
2381
+ }
2380
2382
}
2381
2383
}
2382
- } else if node_len < current_len {
2383
- // Shrinking: drop elements in parallel
2384
- let elements_to_drop = current_len - node_len;
2385
- let split = IndexedSplit :: new ( elements_to_drop, threads_in_colocation) ;
2386
- let range = split. get ( thread_local_index) ;
2387
-
2388
- unsafe {
2389
- let ptr = node_vec. as_mut_ptr ( ) ;
2390
- for i in range {
2391
- let idx = node_len + i;
2392
- core:: ptr:: drop_in_place ( ptr. add ( idx) ) ;
2384
+ std:: cmp:: Ordering :: Less => {
2385
+ // Shrinking: drop elements in parallel
2386
+ let elements_to_drop = current_len - node_len;
2387
+ let split = IndexedSplit :: new ( elements_to_drop, threads_in_colocation) ;
2388
+ let range = split. get ( thread_local_index) ;
2389
+
2390
+ unsafe {
2391
+ let ptr = node_vec. as_mut_ptr ( ) ;
2392
+ for i in range {
2393
+ let idx = node_len + i;
2394
+ core:: ptr:: drop_in_place ( ptr. add ( idx) ) ;
2395
+ }
2393
2396
}
2394
2397
}
2398
+ std:: cmp:: Ordering :: Equal => {
2399
+ // No change needed
2400
+ }
2395
2401
}
2396
2402
}
2397
2403
} ) ;
@@ -2428,11 +2434,13 @@ impl<T> SafePtr<T> {
2428
2434
}
2429
2435
2430
2436
/// Accesses the element at the given index.
2437
+ #[ allow( clippy:: mut_from_ref) ]
2431
2438
pub fn get_mut_at ( & self , index : usize ) -> & mut T {
2432
2439
unsafe { & mut * self . 0 . add ( index) }
2433
2440
}
2434
2441
2435
2442
/// Accesses the element.
2443
+ #[ allow( clippy:: mut_from_ref) ]
2436
2444
pub fn get_mut ( & self ) -> & mut T {
2437
2445
unsafe { & mut * self . 0 }
2438
2446
}
@@ -2778,7 +2786,7 @@ where
2778
2786
/// The remaining chunks have size `floor(tasks / threads)`.
2779
2787
///
2780
2788
/// This ensures optimal load balancing across threads with minimal size variance.
2781
- /// See: https://lemire.me/blog/2025/05/22/dividing-an-array-into-fair-sized-chunks/
2789
+ /// See: < https://lemire.me/blog/2025/05/22/dividing-an-array-into-fair-sized-chunks/>
2782
2790
#[ derive( Debug , Clone ) ]
2783
2791
pub struct IndexedSplit {
2784
2792
quotient : usize ,
0 commit comments