@@ -6568,15 +6568,19 @@ namespace ranges {
6568
6568
6569
6569
if (_Count1 <= _Count2 && _Count1 <= _Capacity) { // buffer left range, then move parts
6570
6570
_Uninitialized_backout<iter_value_t<_It>*> _Backout{
6571
- _Temp_ptr, _RANGES _Uninitialized_move_unchecked(_First, _Mid, _Temp_ptr, _Temp_ptr + _Count1).out};
6571
+ _Temp_ptr, _RANGES _Uninitialized_move_unchecked(
6572
+ _First, _Mid, _Temp_ptr, _Temp_ptr + static_cast<ptrdiff_t>(_Count1))
6573
+ .out};
6572
6574
const _It _New_mid = _RANGES _Move_unchecked(_STD move(_Mid), _STD move(_Last), _STD move(_First)).out;
6573
6575
_RANGES _Move_unchecked(_Backout._First, _Backout._Last, _New_mid);
6574
6576
return _New_mid;
6575
6577
}
6576
6578
6577
6579
if (_Count2 <= _Capacity) { // buffer right range, then move parts
6578
6580
_Uninitialized_backout<iter_value_t<_It>*> _Backout{
6579
- _Temp_ptr, _RANGES _Uninitialized_move_unchecked(_Mid, _Last, _Temp_ptr, _Temp_ptr + _Count2).out};
6581
+ _Temp_ptr, _RANGES _Uninitialized_move_unchecked(
6582
+ _Mid, _Last, _Temp_ptr, _Temp_ptr + static_cast<ptrdiff_t>(_Count2))
6583
+ .out};
6580
6584
_RANGES _Move_backward_common(_First, _STD move(_Mid), _STD move(_Last));
6581
6585
return _RANGES _Move_unchecked(_Backout._First, _Backout._Last, _STD move(_First)).out;
6582
6586
}
@@ -8856,8 +8860,10 @@ namespace ranges {
8856
8860
const iter_difference_t<_It> _Half_count_ceil = _Count - _Half_count;
8857
8861
const _It _Mid = _First + _Half_count_ceil;
8858
8862
if (_Half_count_ceil <= _Capacity) { // temp buffer big enough, sort each half using buffer
8859
- _Buffered_merge_sort_common(_First, _Mid, _Half_count_ceil, _Temp_ptr, _Pred, _Proj);
8860
- _Buffered_merge_sort_common(_Mid, _Last, _Half_count, _Temp_ptr, _Pred, _Proj);
8863
+ _Buffered_merge_sort_common(
8864
+ _First, _Mid, static_cast<ptrdiff_t>(_Half_count_ceil), _Temp_ptr, _Pred, _Proj);
8865
+ _Buffered_merge_sort_common(
8866
+ _Mid, _Last, static_cast<ptrdiff_t>(_Half_count), _Temp_ptr, _Pred, _Proj);
8861
8867
} else { // temp buffer not big enough, divide and conquer
8862
8868
_Stable_sort_common_buffered(_First, _Mid, _Half_count_ceil, _Temp_ptr, _Capacity, _Pred, _Proj);
8863
8869
_Stable_sort_common_buffered(_Mid, _Last, _Half_count, _Temp_ptr, _Capacity, _Pred, _Proj);
@@ -8869,24 +8875,24 @@ namespace ranges {
8869
8875
}
8870
8876
8871
8877
template <class _It, class _Pr, class _Pj>
8872
- static void _Buffered_merge_sort_common(const _It _First, const _It _Last, const iter_difference_t<_It> _Count,
8878
+ static void _Buffered_merge_sort_common(const _It _First, const _It _Last, const ptrdiff_t _Count,
8873
8879
iter_value_t<_It>* const _Temp_ptr, _Pr _Pred, _Pj _Proj) {
8874
8880
// sort using temp buffer for merges
8875
- // pre: _Count <= capacity of buffer at _Temp_ptr; also allows safe narrowing to ptrdiff_t
8881
+ // pre: _Count <= capacity of buffer at _Temp_ptr
8876
8882
_STL_INTERNAL_STATIC_ASSERT(random_access_iterator<_It>);
8877
8883
_STL_INTERNAL_STATIC_ASSERT(sortable<_It, _Pr, _Pj>);
8878
8884
_STL_INTERNAL_CHECK(_Last - _First == _Count);
8879
8885
8880
8886
_Insertion_sort_isort_max_chunks(_First, _Last, _Count, _Pred, _Proj);
8881
8887
// merge adjacent pairs of chunks to and from temp buffer
8882
- if (_Count <= _Isort_max<_It> ) {
8888
+ if (_Count <= _ISORT_MAX ) {
8883
8889
return;
8884
8890
}
8885
8891
8886
8892
// do the first merge, constructing elements in the temporary buffer
8887
8893
_Uninitialized_chunked_merge_common(_First, _Last, _Temp_ptr, _Count, _Pred, _Proj);
8888
8894
_Uninitialized_backout<iter_value_t<_It>*> _Backout{_Temp_ptr, _Temp_ptr + _Count};
8889
- iter_difference_t<_It> _Chunk_size = _Isort_max<_It> ;
8895
+ ptrdiff_t _Chunk_size = _ISORT_MAX ;
8890
8896
for (;;) {
8891
8897
// unconditionally merge elements back into the source buffer
8892
8898
_Chunk_size <<= 1;
@@ -8902,14 +8908,13 @@ namespace ranges {
8902
8908
}
8903
8909
8904
8910
template <class _It, class _Pr, class _Pj>
8905
- static void _Insertion_sort_isort_max_chunks(
8906
- _It _First, _It _Last, iter_difference_t<_It> _Count, _Pr _Pred, _Pj _Proj) {
8911
+ static void _Insertion_sort_isort_max_chunks(_It _First, _It _Last, ptrdiff_t _Count, _Pr _Pred, _Pj _Proj) {
8907
8912
// insertion sort every chunk of distance _Isort_max<_It> in [_First, _Last)
8908
8913
_STL_INTERNAL_STATIC_ASSERT(random_access_iterator<_It>);
8909
8914
_STL_INTERNAL_STATIC_ASSERT(sortable<_It, _Pr, _Pj>);
8910
8915
_STL_INTERNAL_CHECK(_RANGES distance(_First, _Last) == _Count);
8911
8916
8912
- for (; _Isort_max<_It> < _Count; _Count -= _Isort_max<_It> ) { // sort chunks
8917
+ for (; _ISORT_MAX < _Count; _Count -= _ISORT_MAX ) { // sort chunks
8913
8918
_First = _RANGES _Insertion_sort_common(_First, _First + _Isort_max<_It>, _Pred, _Proj);
8914
8919
}
8915
8920
@@ -8918,8 +8923,8 @@ namespace ranges {
8918
8923
}
8919
8924
8920
8925
template <class _It, class _Pr, class _Pj>
8921
- static void _Uninitialized_chunked_merge_common(_It _First, const _It _Last, iter_value_t<_It>* const _Dest,
8922
- iter_difference_t <_It> _Count, _Pr _Pred, _Pj _Proj) {
8926
+ static void _Uninitialized_chunked_merge_common(
8927
+ _It _First, const _It _Last, iter_value_t <_It>* const _Dest, ptrdiff_t _Count, _Pr _Pred, _Pj _Proj) {
8923
8928
// move to uninitialized merging adjacent chunks of distance _Isort_max<_It>
8924
8929
_STL_INTERNAL_STATIC_ASSERT(random_access_iterator<_It>);
8925
8930
_STL_INTERNAL_STATIC_ASSERT(sortable<_It, _Pr, _Pj>);
@@ -8928,14 +8933,14 @@ namespace ranges {
8928
8933
8929
8934
_Uninitialized_backout<iter_value_t<_It>*> _Backout{_Dest};
8930
8935
const auto _Backout_end = _Dest + _Count;
8931
- while (_Isort_max<_It> < _Count) {
8932
- _Count -= _Isort_max<_It> ;
8933
- const auto _Chunk2 = (_STD min)(_Isort_max<_It> , _Count);
8936
+ while (_ISORT_MAX < _Count) {
8937
+ _Count -= _ISORT_MAX ;
8938
+ const auto _Chunk2 = (_STD min)(static_cast<ptrdiff_t>(_ISORT_MAX) , _Count);
8934
8939
_Count -= _Chunk2;
8935
8940
8936
8941
auto _Mid1 = _First + _Isort_max<_It>;
8937
- auto _Last1 = _Mid1 + _Chunk2;
8938
- auto _Last2 = _Backout._Last + _Isort_max<_It> + _Chunk2;
8942
+ auto _Last1 = _Mid1 + static_cast<iter_difference_t<_It>>( _Chunk2) ;
8943
+ auto _Last2 = _Backout._Last + _ISORT_MAX + _Chunk2;
8939
8944
_Backout._Last = _Uninitialized_merge_move(
8940
8945
_STD move(_First), _STD move(_Mid1), _Last1, _Backout._Last, _Last2, _Pred, _Proj);
8941
8946
_First = _STD move(_Last1);
@@ -9015,8 +9020,8 @@ namespace ranges {
9015
9020
}
9016
9021
9017
9022
template <class _It1, class _It2, class _Pr, class _Pj>
9018
- static void _Chunked_merge_common(_It1 _First, const _It1 _Last, _It2 _Dest,
9019
- const iter_difference_t<_It1> _Chunk_size, iter_difference_t<_It1> _Count, _Pr _Pred, _Pj _Proj) {
9023
+ static void _Chunked_merge_common(_It1 _First, const _It1 _Last, _It2 _Dest, const ptrdiff_t _Chunk_size,
9024
+ ptrdiff_t _Count, _Pr _Pred, _Pj _Proj) {
9020
9025
// move merging adjacent chunks of distance _Chunk_size
9021
9026
_STL_INTERNAL_STATIC_ASSERT(random_access_iterator<_It1>);
9022
9027
_STL_INTERNAL_STATIC_ASSERT(sortable<_It1, _Pr, _Pj>);
@@ -9029,8 +9034,8 @@ namespace ranges {
9029
9034
const auto _Right_chunk_size = (_STD min)(_Chunk_size, _Count);
9030
9035
_Count -= _Right_chunk_size;
9031
9036
9032
- auto _Mid1 = _First + _Chunk_size;
9033
- auto _Last1 = _Mid1 + _Right_chunk_size;
9037
+ auto _Mid1 = _First + static_cast<iter_difference_t<_It1>>( _Chunk_size) ;
9038
+ auto _Last1 = _Mid1 + static_cast<iter_difference_t<_It1>>( _Right_chunk_size) ;
9034
9039
_Dest = _Merge_move_common(_STD move(_First), _STD move(_Mid1), _Last1, _Dest, _Pred, _Proj);
9035
9040
_First = _STD move(_Last1);
9036
9041
}
0 commit comments