From ff43366dc24cbd7e6ee72489e6a05df5b1f17dab Mon Sep 17 00:00:00 2001 From: Scott McMurray Date: Tue, 9 Dec 2025 23:25:05 -0800 Subject: [PATCH 1/2] Update `wrapping_sh[lr]` docs and examples --- library/core/src/num/int_macros.rs | 28 ++++++++++++++++++++++++---- library/core/src/num/uint_macros.rs | 28 ++++++++++++++++++++++++---- 2 files changed, 48 insertions(+), 8 deletions(-) diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index 99662768a29f2..c260cd63f7c46 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -2288,6 +2288,13 @@ macro_rules! int_impl { /// Panic-free bitwise shift-left; yields `self << mask(rhs)`, where `mask` removes /// any high-order bits of `rhs` that would cause the shift to exceed the bitwidth of the type. /// + /// Beware that, unlike most other `wrapping_*` methods on integers, this + /// does *not* give the same result as doing the shift in infinite precision + /// then truncating as needed. The behaviour matches what shift instructions + /// do on many processors, and is what the `<<` operator does when overflow + /// checks are disabled, but numerically it's weird. Consider, instead, + /// using [`Self::unbounded_shl`] which has nicer behaviour. + /// /// Note that this is *not* the same as a rotate-left; the RHS of a wrapping shift-left is restricted to /// the range of the type, rather than the bits shifted out of the LHS being returned to the other end. /// The primitive integer types all implement a [`rotate_left`](Self::rotate_left) function, @@ -2296,8 +2303,11 @@ macro_rules! int_impl { /// # Examples /// /// ``` - #[doc = concat!("assert_eq!((-1", stringify!($SelfT), ").wrapping_shl(7), -128);")] - #[doc = concat!("assert_eq!((-1", stringify!($SelfT), ").wrapping_shl(128), -1);")] + #[doc = concat!("assert_eq!((-1_", stringify!($SelfT), ").wrapping_shl(7), -128);")] + #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shl(", stringify!($BITS), "), 42);")] + #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shl(1).wrapping_shl(", stringify!($BITS_MINUS_ONE), "), 0);")] + #[doc = concat!("assert_eq!((-1_", stringify!($SelfT), ").wrapping_shl(128), -1);")] + #[doc = concat!("assert_eq!(5_", stringify!($SelfT), ".wrapping_shl(1025), 10);")] /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")] @@ -2315,6 +2325,13 @@ macro_rules! int_impl { /// Panic-free bitwise shift-right; yields `self >> mask(rhs)`, where `mask` /// removes any high-order bits of `rhs` that would cause the shift to exceed the bitwidth of the type. /// + /// Beware that, unlike most other `wrapping_*` methods on integers, this + /// does *not* give the same result as doing the shift in infinite precision + /// then truncating as needed. The behaviour matches what shift instructions + /// do on many processors, and is what the `>>` operator does when overflow + /// checks are disabled, but numerically it's weird. Consider, instead, + /// using [`Self::unbounded_shr`] which has nicer behaviour. + /// /// Note that this is *not* the same as a rotate-right; the RHS of a wrapping shift-right is restricted /// to the range of the type, rather than the bits shifted out of the LHS being returned to the other /// end. The primitive integer types all implement a [`rotate_right`](Self::rotate_right) function, @@ -2323,8 +2340,11 @@ macro_rules! int_impl { /// # Examples /// /// ``` - #[doc = concat!("assert_eq!((-128", stringify!($SelfT), ").wrapping_shr(7), -1);")] - /// assert_eq!((-128i16).wrapping_shr(64), -128); + #[doc = concat!("assert_eq!((-128_", stringify!($SelfT), ").wrapping_shr(7), -1);")] + #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shr(", stringify!($BITS), "), 42);")] + #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shr(1).wrapping_shr(", stringify!($BITS_MINUS_ONE), "), 0);")] + /// assert_eq!((-128_i16).wrapping_shr(64), -128); + #[doc = concat!("assert_eq!(10_", stringify!($SelfT), ".wrapping_shr(1025), 5);")] /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")] diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index c8224e92b17e4..89f330a063ace 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -2593,6 +2593,13 @@ macro_rules! uint_impl { /// where `mask` removes any high-order bits of `rhs` that /// would cause the shift to exceed the bitwidth of the type. /// + /// Beware that, unlike most other `wrapping_*` methods on integers, this + /// does *not* give the same result as doing the shift in infinite precision + /// then truncating as needed. The behaviour matches what shift instructions + /// do on many processors, and is what the `<<` operator does when overflow + /// checks are disabled, but numerically it's weird. Consider, instead, + /// using [`Self::unbounded_shl`] which has nicer behaviour. + /// /// Note that this is *not* the same as a rotate-left; the /// RHS of a wrapping shift-left is restricted to the range /// of the type, rather than the bits shifted out of the LHS @@ -2603,8 +2610,11 @@ macro_rules! uint_impl { /// # Examples /// /// ``` - #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".wrapping_shl(7), 128);")] - #[doc = concat!("assert_eq!(1", stringify!($SelfT), ".wrapping_shl(128), 1);")] + #[doc = concat!("assert_eq!(1_", stringify!($SelfT), ".wrapping_shl(7), 128);")] + #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shl(", stringify!($BITS), "), 42);")] + #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shl(1).wrapping_shl(", stringify!($BITS_MINUS_ONE), "), 0);")] + #[doc = concat!("assert_eq!(1_", stringify!($SelfT), ".wrapping_shl(128), 1);")] + #[doc = concat!("assert_eq!(5_", stringify!($SelfT), ".wrapping_shl(1025), 10);")] /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")] @@ -2623,6 +2633,13 @@ macro_rules! uint_impl { /// where `mask` removes any high-order bits of `rhs` that /// would cause the shift to exceed the bitwidth of the type. /// + /// Beware that, unlike most other `wrapping_*` methods on integers, this + /// does *not* give the same result as doing the shift in infinite precision + /// then truncating as needed. The behaviour matches what shift instructions + /// do on many processors, and is what the `>>` operator does when overflow + /// checks are disabled, but numerically it's weird. Consider, instead, + /// using [`Self::unbounded_shr`] which has nicer behaviour. + /// /// Note that this is *not* the same as a rotate-right; the /// RHS of a wrapping shift-right is restricted to the range /// of the type, rather than the bits shifted out of the LHS @@ -2633,8 +2650,11 @@ macro_rules! uint_impl { /// # Examples /// /// ``` - #[doc = concat!("assert_eq!(128", stringify!($SelfT), ".wrapping_shr(7), 1);")] - #[doc = concat!("assert_eq!(128", stringify!($SelfT), ".wrapping_shr(128), 128);")] + #[doc = concat!("assert_eq!(128_", stringify!($SelfT), ".wrapping_shr(7), 1);")] + #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shr(", stringify!($BITS), "), 42);")] + #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shr(1).wrapping_shr(", stringify!($BITS_MINUS_ONE), "), 0);")] + #[doc = concat!("assert_eq!(128_", stringify!($SelfT), ".wrapping_shr(128), 128);")] + #[doc = concat!("assert_eq!(10_", stringify!($SelfT), ".wrapping_shr(1025), 5);")] /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")] From f9b830c10239f5ff2c3a6b2b7f8183a4a855e9fe Mon Sep 17 00:00:00 2001 From: Scott McMurray Date: Wed, 10 Dec 2025 18:59:52 -0800 Subject: [PATCH 2/2] Add more basic shift examples --- library/core/src/num/uint_macros.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 89f330a063ace..9e69dec9547f6 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -2611,6 +2611,10 @@ macro_rules! uint_impl { /// /// ``` #[doc = concat!("assert_eq!(1_", stringify!($SelfT), ".wrapping_shl(7), 128);")] + #[doc = concat!("assert_eq!(0b101_", stringify!($SelfT), ".wrapping_shl(0), 0b101);")] + #[doc = concat!("assert_eq!(0b101_", stringify!($SelfT), ".wrapping_shl(1), 0b1010);")] + #[doc = concat!("assert_eq!(0b101_", stringify!($SelfT), ".wrapping_shl(2), 0b10100);")] + #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.wrapping_shl(2), ", stringify!($SelfT), "::MAX - 3);")] #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shl(", stringify!($BITS), "), 42);")] #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shl(1).wrapping_shl(", stringify!($BITS_MINUS_ONE), "), 0);")] #[doc = concat!("assert_eq!(1_", stringify!($SelfT), ".wrapping_shl(128), 1);")] @@ -2651,6 +2655,10 @@ macro_rules! uint_impl { /// /// ``` #[doc = concat!("assert_eq!(128_", stringify!($SelfT), ".wrapping_shr(7), 1);")] + #[doc = concat!("assert_eq!(0b1010_", stringify!($SelfT), ".wrapping_shr(0), 0b1010);")] + #[doc = concat!("assert_eq!(0b1010_", stringify!($SelfT), ".wrapping_shr(1), 0b101);")] + #[doc = concat!("assert_eq!(0b1010_", stringify!($SelfT), ".wrapping_shr(2), 0b10);")] + #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.wrapping_shr(1), ", stringify!($SignedT), "::MAX.cast_unsigned());")] #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shr(", stringify!($BITS), "), 42);")] #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_shr(1).wrapping_shr(", stringify!($BITS_MINUS_ONE), "), 0);")] #[doc = concat!("assert_eq!(128_", stringify!($SelfT), ".wrapping_shr(128), 128);")]