Skip to content

Commit 46efde1

Browse files
authored
Use uppercase for const generic parameters (rust-lang#1035)
1 parent 81e50a3 commit 46efde1

File tree

7 files changed

+336
-337
lines changed

7 files changed

+336
-337
lines changed

crates/core_arch/src/lib.rs

+1-2
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,7 @@
5353
clippy::shadow_reuse,
5454
clippy::cognitive_complexity,
5555
clippy::similar_names,
56-
clippy::many_single_char_names,
57-
non_upper_case_globals
56+
clippy::many_single_char_names
5857
)]
5958
#![cfg_attr(test, allow(unused_imports))]
6059
#![no_std]

crates/core_arch/src/macros.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,10 @@
22
33
// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is
44
// out of `bits`-bit range.
5-
pub(crate) struct ValidateConstImm<const imm: i32, const bits: i32>;
6-
impl<const imm: i32, const bits: i32> ValidateConstImm<imm, bits> {
5+
pub(crate) struct ValidateConstImm<const IMM: i32, const BITS: i32>;
6+
impl<const IMM: i32, const BITS: i32> ValidateConstImm<IMM, BITS> {
77
pub(crate) const VALID: () = {
8-
let _ = 1 / ((imm >= 0 && imm < (1 << bits)) as usize);
8+
let _ = 1 / ((IMM >= 0 && IMM < (1 << BITS)) as usize);
99
};
1010
}
1111

crates/core_arch/src/x86/avx2.rs

+40-40
Original file line numberDiff line numberDiff line change
@@ -2923,46 +2923,46 @@ pub unsafe fn _mm256_sll_epi64(a: __m256i, count: __m128i) -> __m256i {
29232923
transmute(psllq(a.as_i64x4(), count.as_i64x2()))
29242924
}
29252925

2926-
/// Shifts packed 16-bit integers in `a` left by `imm8` while
2926+
/// Shifts packed 16-bit integers in `a` left by `IMM8` while
29272927
/// shifting in zeros, return the results;
29282928
///
29292929
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi16)
29302930
#[inline]
29312931
#[target_feature(enable = "avx2")]
2932-
#[cfg_attr(test, assert_instr(vpsllw, imm8 = 7))]
2932+
#[cfg_attr(test, assert_instr(vpsllw, IMM8 = 7))]
29332933
#[rustc_legacy_const_generics(1)]
29342934
#[stable(feature = "simd_x86", since = "1.27.0")]
2935-
pub unsafe fn _mm256_slli_epi16<const imm8: i32>(a: __m256i) -> __m256i {
2936-
static_assert_imm8!(imm8);
2937-
transmute(pslliw(a.as_i16x16(), imm8))
2935+
pub unsafe fn _mm256_slli_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
2936+
static_assert_imm8!(IMM8);
2937+
transmute(pslliw(a.as_i16x16(), IMM8))
29382938
}
29392939

2940-
/// Shifts packed 32-bit integers in `a` left by `imm8` while
2940+
/// Shifts packed 32-bit integers in `a` left by `IMM8` while
29412941
/// shifting in zeros, return the results;
29422942
///
29432943
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi32)
29442944
#[inline]
29452945
#[target_feature(enable = "avx2")]
2946-
#[cfg_attr(test, assert_instr(vpslld, imm8 = 7))]
2946+
#[cfg_attr(test, assert_instr(vpslld, IMM8 = 7))]
29472947
#[rustc_legacy_const_generics(1)]
29482948
#[stable(feature = "simd_x86", since = "1.27.0")]
2949-
pub unsafe fn _mm256_slli_epi32<const imm8: i32>(a: __m256i) -> __m256i {
2950-
static_assert_imm8!(imm8);
2951-
transmute(psllid(a.as_i32x8(), imm8))
2949+
pub unsafe fn _mm256_slli_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
2950+
static_assert_imm8!(IMM8);
2951+
transmute(psllid(a.as_i32x8(), IMM8))
29522952
}
29532953

2954-
/// Shifts packed 64-bit integers in `a` left by `imm8` while
2954+
/// Shifts packed 64-bit integers in `a` left by `IMM8` while
29552955
/// shifting in zeros, return the results;
29562956
///
29572957
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi64)
29582958
#[inline]
29592959
#[target_feature(enable = "avx2")]
2960-
#[cfg_attr(test, assert_instr(vpsllq, imm8 = 7))]
2960+
#[cfg_attr(test, assert_instr(vpsllq, IMM8 = 7))]
29612961
#[rustc_legacy_const_generics(1)]
29622962
#[stable(feature = "simd_x86", since = "1.27.0")]
2963-
pub unsafe fn _mm256_slli_epi64<const imm8: i32>(a: __m256i) -> __m256i {
2964-
static_assert_imm8!(imm8);
2965-
transmute(pslliq(a.as_i64x4(), imm8))
2963+
pub unsafe fn _mm256_slli_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
2964+
static_assert_imm8!(IMM8);
2965+
transmute(pslliq(a.as_i64x4(), IMM8))
29662966
}
29672967

29682968
/// Shifts 128-bit lanes in `a` left by `imm8` bytes while shifting in zeros.
@@ -3077,32 +3077,32 @@ pub unsafe fn _mm256_sra_epi32(a: __m256i, count: __m128i) -> __m256i {
30773077
transmute(psrad(a.as_i32x8(), count.as_i32x4()))
30783078
}
30793079

3080-
/// Shifts packed 16-bit integers in `a` right by `imm8` while
3080+
/// Shifts packed 16-bit integers in `a` right by `IMM8` while
30813081
/// shifting in sign bits.
30823082
///
30833083
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srai_epi16)
30843084
#[inline]
30853085
#[target_feature(enable = "avx2")]
3086-
#[cfg_attr(test, assert_instr(vpsraw, imm8 = 7))]
3086+
#[cfg_attr(test, assert_instr(vpsraw, IMM8 = 7))]
30873087
#[rustc_legacy_const_generics(1)]
30883088
#[stable(feature = "simd_x86", since = "1.27.0")]
3089-
pub unsafe fn _mm256_srai_epi16<const imm8: i32>(a: __m256i) -> __m256i {
3090-
static_assert_imm8!(imm8);
3091-
transmute(psraiw(a.as_i16x16(), imm8))
3089+
pub unsafe fn _mm256_srai_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
3090+
static_assert_imm8!(IMM8);
3091+
transmute(psraiw(a.as_i16x16(), IMM8))
30923092
}
30933093

3094-
/// Shifts packed 32-bit integers in `a` right by `imm8` while
3094+
/// Shifts packed 32-bit integers in `a` right by `IMM8` while
30953095
/// shifting in sign bits.
30963096
///
30973097
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srai_epi32)
30983098
#[inline]
30993099
#[target_feature(enable = "avx2")]
3100-
#[cfg_attr(test, assert_instr(vpsrad, imm8 = 7))]
3100+
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 7))]
31013101
#[rustc_legacy_const_generics(1)]
31023102
#[stable(feature = "simd_x86", since = "1.27.0")]
3103-
pub unsafe fn _mm256_srai_epi32<const imm8: i32>(a: __m256i) -> __m256i {
3104-
static_assert_imm8!(imm8);
3105-
transmute(psraid(a.as_i32x8(), imm8))
3103+
pub unsafe fn _mm256_srai_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
3104+
static_assert_imm8!(IMM8);
3105+
transmute(psraid(a.as_i32x8(), IMM8))
31063106
}
31073107

31083108
/// Shifts packed 32-bit integers in `a` right by the amount specified by the
@@ -3201,46 +3201,46 @@ pub unsafe fn _mm256_srl_epi64(a: __m256i, count: __m128i) -> __m256i {
32013201
transmute(psrlq(a.as_i64x4(), count.as_i64x2()))
32023202
}
32033203

3204-
/// Shifts packed 16-bit integers in `a` right by `imm8` while shifting in
3204+
/// Shifts packed 16-bit integers in `a` right by `IMM8` while shifting in
32053205
/// zeros
32063206
///
32073207
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi16)
32083208
#[inline]
32093209
#[target_feature(enable = "avx2")]
3210-
#[cfg_attr(test, assert_instr(vpsrlw, imm8 = 7))]
3210+
#[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 7))]
32113211
#[rustc_legacy_const_generics(1)]
32123212
#[stable(feature = "simd_x86", since = "1.27.0")]
3213-
pub unsafe fn _mm256_srli_epi16<const imm8: i32>(a: __m256i) -> __m256i {
3214-
static_assert_imm8!(imm8);
3215-
transmute(psrliw(a.as_i16x16(), imm8))
3213+
pub unsafe fn _mm256_srli_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
3214+
static_assert_imm8!(IMM8);
3215+
transmute(psrliw(a.as_i16x16(), IMM8))
32163216
}
32173217

3218-
/// Shifts packed 32-bit integers in `a` right by `imm8` while shifting in
3218+
/// Shifts packed 32-bit integers in `a` right by `IMM8` while shifting in
32193219
/// zeros
32203220
///
32213221
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi32)
32223222
#[inline]
32233223
#[target_feature(enable = "avx2")]
3224-
#[cfg_attr(test, assert_instr(vpsrld, imm8 = 7))]
3224+
#[cfg_attr(test, assert_instr(vpsrld, IMM8 = 7))]
32253225
#[rustc_legacy_const_generics(1)]
32263226
#[stable(feature = "simd_x86", since = "1.27.0")]
3227-
pub unsafe fn _mm256_srli_epi32<const imm8: i32>(a: __m256i) -> __m256i {
3228-
static_assert_imm8!(imm8);
3229-
transmute(psrlid(a.as_i32x8(), imm8))
3227+
pub unsafe fn _mm256_srli_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
3228+
static_assert_imm8!(IMM8);
3229+
transmute(psrlid(a.as_i32x8(), IMM8))
32303230
}
32313231

3232-
/// Shifts packed 64-bit integers in `a` right by `imm8` while shifting in
3232+
/// Shifts packed 64-bit integers in `a` right by `IMM8` while shifting in
32333233
/// zeros
32343234
///
32353235
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi64)
32363236
#[inline]
32373237
#[target_feature(enable = "avx2")]
3238-
#[cfg_attr(test, assert_instr(vpsrlq, imm8 = 7))]
3238+
#[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 7))]
32393239
#[rustc_legacy_const_generics(1)]
32403240
#[stable(feature = "simd_x86", since = "1.27.0")]
3241-
pub unsafe fn _mm256_srli_epi64<const imm8: i32>(a: __m256i) -> __m256i {
3242-
static_assert_imm8!(imm8);
3243-
transmute(psrliq(a.as_i64x4(), imm8))
3241+
pub unsafe fn _mm256_srli_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
3242+
static_assert_imm8!(IMM8);
3243+
transmute(psrliq(a.as_i64x4(), IMM8))
32443244
}
32453245

32463246
/// Shifts packed 32-bit integers in `a` right by the amount specified by

crates/core_arch/src/x86/sse.rs

+17-17
Original file line numberDiff line numberDiff line change
@@ -992,7 +992,7 @@ pub const fn _MM_SHUFFLE(z: u32, y: u32, x: u32, w: u32) -> i32 {
992992
}
993993

994994
/// Shuffles packed single-precision (32-bit) floating-point elements in `a` and
995-
/// `b` using `mask`.
995+
/// `b` using `MASK`.
996996
///
997997
/// The lower half of result takes values from `a` and the higher half from
998998
/// `b`. Mask is split to 2 control bits each to index the element from inputs.
@@ -1006,19 +1006,19 @@ pub const fn _MM_SHUFFLE(z: u32, y: u32, x: u32, w: u32) -> i32 {
10061006
/// does not cause a problem in C, however Rust's commitment to strong typing does not allow this.
10071007
#[inline]
10081008
#[target_feature(enable = "sse")]
1009-
#[cfg_attr(test, assert_instr(shufps, mask = 3))]
1009+
#[cfg_attr(test, assert_instr(shufps, MASK = 3))]
10101010
#[rustc_legacy_const_generics(2)]
10111011
#[stable(feature = "simd_x86", since = "1.27.0")]
1012-
pub unsafe fn _mm_shuffle_ps<const mask: i32>(a: __m128, b: __m128) -> __m128 {
1013-
static_assert_imm8!(mask);
1012+
pub unsafe fn _mm_shuffle_ps<const MASK: i32>(a: __m128, b: __m128) -> __m128 {
1013+
static_assert_imm8!(MASK);
10141014
simd_shuffle4(
10151015
a,
10161016
b,
10171017
[
1018-
mask as u32 & 0b11,
1019-
(mask as u32 >> 2) & 0b11,
1020-
((mask as u32 >> 4) & 0b11) + 4,
1021-
((mask as u32 >> 6) & 0b11) + 4,
1018+
MASK as u32 & 0b11,
1019+
(MASK as u32 >> 2) & 0b11,
1020+
((MASK as u32 >> 4) & 0b11) + 4,
1021+
((MASK as u32 >> 6) & 0b11) + 4,
10221022
],
10231023
)
10241024
}
@@ -1701,9 +1701,9 @@ pub const _MM_HINT_ET0: i32 = 7;
17011701
#[stable(feature = "simd_x86", since = "1.27.0")]
17021702
pub const _MM_HINT_ET1: i32 = 6;
17031703

1704-
/// Fetch the cache line that contains address `p` using the given `strategy`.
1704+
/// Fetch the cache line that contains address `p` using the given `STRATEGY`.
17051705
///
1706-
/// The `strategy` must be one of:
1706+
/// The `STRATEGY` must be one of:
17071707
///
17081708
/// * [`_MM_HINT_T0`](constant._MM_HINT_T0.html): Fetch into all levels of the
17091709
/// cache hierarchy.
@@ -1745,16 +1745,16 @@ pub const _MM_HINT_ET1: i32 = 6;
17451745
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_prefetch)
17461746
#[inline]
17471747
#[target_feature(enable = "sse")]
1748-
#[cfg_attr(test, assert_instr(prefetcht0, strategy = _MM_HINT_T0))]
1749-
#[cfg_attr(test, assert_instr(prefetcht1, strategy = _MM_HINT_T1))]
1750-
#[cfg_attr(test, assert_instr(prefetcht2, strategy = _MM_HINT_T2))]
1751-
#[cfg_attr(test, assert_instr(prefetchnta, strategy = _MM_HINT_NTA))]
1748+
#[cfg_attr(test, assert_instr(prefetcht0, STRATEGY = _MM_HINT_T0))]
1749+
#[cfg_attr(test, assert_instr(prefetcht1, STRATEGY = _MM_HINT_T1))]
1750+
#[cfg_attr(test, assert_instr(prefetcht2, STRATEGY = _MM_HINT_T2))]
1751+
#[cfg_attr(test, assert_instr(prefetchnta, STRATEGY = _MM_HINT_NTA))]
17521752
#[rustc_legacy_const_generics(1)]
17531753
#[stable(feature = "simd_x86", since = "1.27.0")]
1754-
pub unsafe fn _mm_prefetch<const strategy: i32>(p: *const i8) {
1754+
pub unsafe fn _mm_prefetch<const STRATEGY: i32>(p: *const i8) {
17551755
// We use the `llvm.prefetch` instrinsic with `cache type` = 1 (data cache).
1756-
// `locality` and `rw` are based on our `strategy`.
1757-
prefetch(p, (strategy >> 2) & 1, strategy & 3, 1);
1756+
// `locality` and `rw` are based on our `STRATEGY`.
1757+
prefetch(p, (STRATEGY >> 2) & 1, STRATEGY & 3, 1);
17581758
}
17591759

17601760
/// Returns vector of type __m128 with undefined elements.

0 commit comments

Comments
 (0)