Skip to main content

MInt

Struct MInt 

Source
#[repr(transparent)]
pub struct MInt<M>
where M: MIntBase,
{ x: M::Inner, _marker: PhantomData<fn() -> M>, }

Fields§

§x: M::Inner§_marker: PhantomData<fn() -> M>

Implementations§

Source§

impl<M> MInt<M>
where M: MIntConvert<u32>,

Source

pub fn sqrt(self) -> Option<Self>

Examples found in repository?
crates/competitive/src/math/formal_power_series/mod.rs (line 100)
99    fn sqrt_coefficient(&self) -> Option<Self> {
100        self.sqrt()
101    }
More examples
Hide additional examples
crates/library_checker/src/number_theory/sqrt_mod.rs (line 12)
6pub fn sqrt_mod(reader: impl Read, mut writer: impl Write) {
7    let s = read_all_unchecked(reader);
8    let mut scanner = Scanner::new(&s);
9    scan!(scanner, q, yp: [(u32, u32)]);
10    for (y, p) in yp.take(q) {
11        DynMIntU32::set_mod(p);
12        if let Some(x) = DynMIntU32::from(y).sqrt() {
13            writeln!(writer, "{}", x).ok();
14        } else {
15            writeln!(writer, "-1").ok();
16        }
17    }
18}
Source§

impl<M> MInt<M>
where M: MIntConvert,

Source

pub fn new(x: M::Inner) -> Self

Examples found in repository?
crates/library_checker/src/number_theory/sum_of_totient_function.rs (line 19)
12pub fn sum_of_totient_function(reader: impl Read, mut writer: impl Write) {
13    let s = read_all_unchecked(reader);
14    let mut scanner = Scanner::new(&s);
15    scan!(scanner, n: u64);
16    let mut s = 1;
17    let mut pp = 0;
18    let mut pc = 0;
19    let inv2 = M::new(2).inv();
20    let qa = QuotientArray::from_fn(n, |i| [M::from(i), M::from(i) * M::from(i + 1) * inv2])
21        .map(|[x, y]| [x - M::one(), y - M::one()])
22        .lucy_dp::<ArrayOperation<AdditiveOperation<_>, 2>>(|[x, y], p| [x, y * M::from(p)])
23        .map(|[x, y]| y - x)
24        .min_25_sieve::<AddMulOperation<_>>(|p, c| {
25            if pp != p || pc > c {
26                pp = p;
27                pc = 1;
28                s = p - 1;
29            }
30            while pc < c {
31                pc += 1;
32                s *= p;
33            }
34            M::from(s)
35        });
36    writeln!(writer, "{}", qa[n]).ok();
37}
More examples
Hide additional examples
crates/competitive/src/math/number_theoretic_transform.rs (line 991)
990    fn inverse_transform(f: Self::F, len: usize) -> Self::T {
991        let t1 = MInt::<N2>::new(N1::get_mod()).inv();
992        let m1 = MInt::<M>::from(N1::get_mod());
993        let m1_3 = MInt::<N3>::new(N1::get_mod());
994        let t2 = (m1_3 * MInt::<N3>::new(N2::get_mod())).inv();
995        let m2 = m1 * MInt::<M>::from(N2::get_mod());
996        Convolve::<N1>::inverse_transform(f.0, len)
997            .into_iter()
998            .zip(Convolve::<N2>::inverse_transform(f.1, len))
999            .zip(Convolve::<N3>::inverse_transform(f.2, len))
1000            .map(|((c1, c2), c3)| {
1001                let d1 = c1.inner();
1002                let d2 = ((c2 - MInt::<N2>::from(d1)) * t1).inner();
1003                let x = MInt::<N3>::new(d1) + MInt::<N3>::new(d2) * m1_3;
1004                let d3 = ((c3 - x) * t2).inner();
1005                MInt::<M>::from(d1) + MInt::<M>::from(d2) * m1 + MInt::<M>::from(d3) * m2
1006            })
1007            .collect()
1008    }
1009    fn multiply(f: &mut Self::F, g: &Self::F) {
1010        assert_eq!(f.0.len(), g.0.len());
1011        assert_eq!(f.1.len(), g.1.len());
1012        assert_eq!(f.2.len(), g.2.len());
1013        for (f, g) in f.0.iter_mut().zip(g.0.iter()) {
1014            *f *= *g;
1015        }
1016        for (f, g) in f.1.iter_mut().zip(g.1.iter()) {
1017            *f *= *g;
1018        }
1019        for (f, g) in f.2.iter_mut().zip(g.2.iter()) {
1020            *f *= *g;
1021        }
1022    }
1023    fn convolve(a: Self::T, b: Self::T) -> Self::T {
1024        if Self::length(&a).max(Self::length(&b)) <= 300 {
1025            return convolve_karatsuba(&a, &b);
1026        }
1027        if Self::length(&a).min(Self::length(&b)) <= 60 {
1028            return convolve_naive(&a, &b);
1029        }
1030        let len = (Self::length(&a) + Self::length(&b)).saturating_sub(1);
1031        let mut a = Self::transform(a, len);
1032        let b = Self::transform(b, len);
1033        Self::multiply(&mut a, &b);
1034        Self::inverse_transform(a, len)
1035    }
1036}
1037
1038impl<N1, N2, N3> ConvolveSteps for Convolve<(u64, (N1, N2, N3))>
1039where
1040    N1: Montgomery32NttModulus,
1041    N2: Montgomery32NttModulus,
1042    N3: Montgomery32NttModulus,
1043{
1044    type T = Vec<u64>;
1045    type F = (MVec<N1>, MVec<N2>, MVec<N3>);
1046
1047    fn length(t: &Self::T) -> usize {
1048        t.len()
1049    }
1050
1051    fn transform(t: Self::T, len: usize) -> Self::F {
1052        let npot = len.max(1).next_power_of_two();
1053        let mut f = (
1054            MVec::<N1>::with_capacity(npot),
1055            MVec::<N2>::with_capacity(npot),
1056            MVec::<N3>::with_capacity(npot),
1057        );
1058        for t in t {
1059            f.0.push(t.into());
1060            f.1.push(t.into());
1061            f.2.push(t.into());
1062        }
1063        f.0.resize_with(npot, Zero::zero);
1064        f.1.resize_with(npot, Zero::zero);
1065        f.2.resize_with(npot, Zero::zero);
1066        ntt(&mut f.0);
1067        ntt(&mut f.1);
1068        ntt(&mut f.2);
1069        f
1070    }
1071
1072    fn inverse_transform(f: Self::F, len: usize) -> Self::T {
1073        let t1 = MInt::<N2>::new(N1::get_mod()).inv();
1074        let m1 = N1::get_mod() as u64;
1075        let m1_3 = MInt::<N3>::new(N1::get_mod());
1076        let t2 = (m1_3 * MInt::<N3>::new(N2::get_mod())).inv();
1077        let m2 = m1 * N2::get_mod() as u64;
1078        Convolve::<N1>::inverse_transform(f.0, len)
1079            .into_iter()
1080            .zip(Convolve::<N2>::inverse_transform(f.1, len))
1081            .zip(Convolve::<N3>::inverse_transform(f.2, len))
1082            .map(|((c1, c2), c3)| {
1083                let d1 = c1.inner();
1084                let d2 = ((c2 - MInt::<N2>::from(d1)) * t1).inner();
1085                let x = MInt::<N3>::new(d1) + MInt::<N3>::new(d2) * m1_3;
1086                let d3 = ((c3 - x) * t2).inner();
1087                d1 as u64 + d2 as u64 * m1 + d3 as u64 * m2
1088            })
1089            .collect()
1090    }
1091
1092    fn multiply(f: &mut Self::F, g: &Self::F) {
1093        assert_eq!(f.0.len(), g.0.len());
1094        assert_eq!(f.1.len(), g.1.len());
1095        assert_eq!(f.2.len(), g.2.len());
1096        for (f, g) in f.0.iter_mut().zip(g.0.iter()) {
1097            *f *= *g;
1098        }
1099        for (f, g) in f.1.iter_mut().zip(g.1.iter()) {
1100            *f *= *g;
1101        }
1102        for (f, g) in f.2.iter_mut().zip(g.2.iter()) {
1103            *f *= *g;
1104        }
1105    }
1106
1107    fn convolve(a: Self::T, b: Self::T) -> Self::T {
1108        if Self::length(&a).max(Self::length(&b)) <= 300 {
1109            return convolve_karatsuba(&a, &b);
1110        }
1111        if Self::length(&a).min(Self::length(&b)) <= 60 {
1112            return convolve_naive(&a, &b);
1113        }
1114        let len = (Self::length(&a) + Self::length(&b)).saturating_sub(1);
1115        let mut a = Self::transform(a, len);
1116        let b = Self::transform(b, len);
1117        Self::multiply(&mut a, &b);
1118        Self::inverse_transform(a, len)
1119    }
1120}
1121
1122pub trait NttReuse: ConvolveSteps {
1123    const MULTIPLE: bool = true;
1124
1125    /// F(a) → F(a + [0] * a.len())
1126    fn ntt_doubling(f: Self::F) -> Self::F;
1127
1128    /// F(a(x)), F(b(x)) → even(F(a(x) * b(-x)))
1129    fn even_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F;
1130
1131    /// F(a(x)), F(b(x)) → odd(F(a(x) * b(-x)))
1132    fn odd_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F;
1133}
1134
1135thread_local!(
1136    static BIT_REVERSE: UnsafeCell<Vec<Vec<usize>>> = const { UnsafeCell::new(vec![]) };
1137);
1138
1139impl<M> NttReuse for Convolve<M>
1140where
1141    M: Montgomery32NttModulus,
1142{
1143    const MULTIPLE: bool = false;
1144
1145    fn ntt_doubling(mut f: Self::F) -> Self::F {
1146        let n = f.len();
1147        let k = n.trailing_zeros() as usize;
1148        let mut a = Self::inverse_transform(f.clone(), n);
1149        let mut rot = MInt::<M>::one();
1150        let zeta = MInt::<M>::new_unchecked(M::INFO.root[k + 1]);
1151        for a in a.iter_mut() {
1152            *a *= rot;
1153            rot *= zeta;
1154        }
1155        f.extend(Self::transform(a, n));
1156        f
1157    }
1158
1159    fn even_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1160        assert_eq!(f.len(), g.len());
1161        assert!(f.len().is_power_of_two());
1162        assert!(f.len() >= 2);
1163        let inv2 = MInt::<M>::from(2).inv();
1164        let n = f.len() / 2;
1165        (0..n)
1166            .map(|i| (f[i << 1] * g[i << 1 | 1] + f[i << 1 | 1] * g[i << 1]) * inv2)
1167            .collect()
1168    }
1169
1170    fn odd_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1171        assert_eq!(f.len(), g.len());
1172        assert!(f.len().is_power_of_two());
1173        assert!(f.len() >= 2);
1174        let mut inv2 = MInt::<M>::from(2).inv();
1175        let n = f.len() / 2;
1176        let k = f.len().trailing_zeros() as usize;
1177        let mut h = vec![MInt::<M>::zero(); n];
1178        let w = MInt::<M>::new_unchecked(M::INFO.inv_root[k]);
1179        BIT_REVERSE.with(|br| {
1180            let br = unsafe { &mut *br.get() };
1181            if br.len() < k {
1182                br.resize_with(k, Default::default);
1183            }
1184            let k = k - 1;
1185            if br[k].is_empty() {
1186                let mut v = vec![0; 1 << k];
1187                for i in 0..1 << k {
1188                    v[i] = (v[i >> 1] >> 1) | ((i & 1) << (k.saturating_sub(1)));
1189                }
1190                br[k] = v;
1191            }
1192            for &i in &br[k] {
1193                h[i] = (f[i << 1] * g[i << 1 | 1] - f[i << 1 | 1] * g[i << 1]) * inv2;
1194                inv2 *= w;
1195            }
1196        });
1197        h
1198    }
1199}
1200
1201impl<M, N1, N2, N3> NttReuse for Convolve<(M, (N1, N2, N3))>
1202where
1203    M: MIntConvert + MIntConvert<u32>,
1204    N1: Montgomery32NttModulus,
1205    N2: Montgomery32NttModulus,
1206    N3: Montgomery32NttModulus,
1207{
1208    fn ntt_doubling(f: Self::F) -> Self::F {
1209        (
1210            Convolve::<N1>::ntt_doubling(f.0),
1211            Convolve::<N2>::ntt_doubling(f.1),
1212            Convolve::<N3>::ntt_doubling(f.2),
1213        )
1214    }
1215
1216    fn even_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1217        fn even_mul_normal_neg_corrected<M>(f: &[MInt<M>], g: &[MInt<M>], m: u32) -> Vec<MInt<M>>
1218        where
1219            M: Montgomery32NttModulus,
1220        {
1221            let n = f.len();
1222            assert_eq!(f.len(), g.len());
1223            assert!(f.len().is_power_of_two());
1224            assert!(f.len() >= 2);
1225            let inv2 = MInt::<M>::from(2).inv();
1226            let u = MInt::<M>::new(m) * MInt::<M>::from(n as u32);
1227            let n = f.len() / 2;
1228            (0..n)
1229                .map(|i| {
1230                    (f[i << 1]
1231                        * if i == 0 {
1232                            g[i << 1 | 1] + u
1233                        } else {
1234                            g[i << 1 | 1]
1235                        }
1236                        + f[i << 1 | 1] * g[i << 1])
1237                        * inv2
1238                })
1239                .collect()
1240        }
1241
1242        let m = M::mod_into();
1243        (
1244            even_mul_normal_neg_corrected(&f.0, &g.0, m),
1245            even_mul_normal_neg_corrected(&f.1, &g.1, m),
1246            even_mul_normal_neg_corrected(&f.2, &g.2, m),
1247        )
1248    }
1249
1250    fn odd_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1251        fn odd_mul_normal_neg_corrected<M>(f: &[MInt<M>], g: &[MInt<M>], m: u32) -> Vec<MInt<M>>
1252        where
1253            M: Montgomery32NttModulus,
1254        {
1255            assert_eq!(f.len(), g.len());
1256            assert!(f.len().is_power_of_two());
1257            assert!(f.len() >= 2);
1258            let mut inv2 = MInt::<M>::from(2).inv();
1259            let u = MInt::<M>::new(m) * MInt::<M>::from(f.len() as u32);
1260            let n = f.len() / 2;
1261            let k = f.len().trailing_zeros() as usize;
1262            let mut h = vec![MInt::<M>::zero(); n];
1263            let w = MInt::<M>::new_unchecked(M::INFO.inv_root[k]);
1264            BIT_REVERSE.with(|br| {
1265                let br = unsafe { &mut *br.get() };
1266                if br.len() < k {
1267                    br.resize_with(k, Default::default);
1268                }
1269                let k = k - 1;
1270                if br[k].is_empty() {
1271                    let mut v = vec![0; 1 << k];
1272                    for i in 0..1 << k {
1273                        v[i] = (v[i >> 1] >> 1) | ((i & 1) << (k.saturating_sub(1)));
1274                    }
1275                    br[k] = v;
1276                }
1277                for &i in &br[k] {
1278                    h[i] = (f[i << 1]
1279                        * if i == 0 {
1280                            g[i << 1 | 1] + u
1281                        } else {
1282                            g[i << 1 | 1]
1283                        }
1284                        - f[i << 1 | 1] * g[i << 1])
1285                        * inv2;
1286                    inv2 *= w;
1287                }
1288            });
1289            h
1290        }
Source§

impl<M> MInt<M>
where M: MIntBase,

Source

pub const fn new_unchecked(x: M::Inner) -> Self

Examples found in repository?
crates/competitive/src/num/mint/mint_base.rs (line 60)
59    pub fn new(x: M::Inner) -> Self {
60        Self::new_unchecked(<M as MIntConvert<M::Inner>>::from(x))
61    }
62}
63impl<M> MInt<M>
64where
65    M: MIntBase,
66{
67    #[inline]
68    pub const fn new_unchecked(x: M::Inner) -> Self {
69        Self {
70            x,
71            _marker: PhantomData,
72        }
73    }
74    #[inline]
75    pub fn get_mod() -> M::Inner {
76        M::get_mod()
77    }
78    #[inline]
79    pub fn pow(self, y: usize) -> Self {
80        Self::new_unchecked(M::mod_pow(self.x, y))
81    }
82    #[inline]
83    pub fn inv(self) -> Self {
84        Self::new_unchecked(M::mod_inv(self.x))
85    }
86    #[inline]
87    pub fn inner(self) -> M::Inner {
88        M::mod_inner(self.x)
89    }
90}
91
92impl<M> Clone for MInt<M>
93where
94    M: MIntBase,
95{
96    #[inline]
97    fn clone(&self) -> Self {
98        *self
99    }
100}
101impl<M> Copy for MInt<M> where M: MIntBase {}
102impl<M> Debug for MInt<M>
103where
104    M: MIntBase,
105{
106    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
107        Debug::fmt(&self.inner(), f)
108    }
109}
110impl<M> Default for MInt<M>
111where
112    M: MIntBase,
113{
114    #[inline]
115    fn default() -> Self {
116        <Self as Zero>::zero()
117    }
118}
119impl<M> PartialEq for MInt<M>
120where
121    M: MIntBase,
122{
123    #[inline]
124    fn eq(&self, other: &Self) -> bool {
125        PartialEq::eq(&self.x, &other.x)
126    }
127}
128impl<M> Eq for MInt<M> where M: MIntBase {}
129impl<M> Hash for MInt<M>
130where
131    M: MIntBase,
132{
133    #[inline]
134    fn hash<H: Hasher>(&self, state: &mut H) {
135        Hash::hash(&self.x, state)
136    }
137}
138macro_rules! impl_mint_from {
139    ($($t:ty),*) => {
140        $(impl<M> From<$t> for MInt<M>
141        where
142            M: MIntConvert<$t>,
143        {
144            #[inline]
145            fn from(x: $t) -> Self {
146                Self::new_unchecked(<M as MIntConvert<$t>>::from(x))
147            }
148        }
149        impl<M> From<MInt<M>> for $t
150        where
151            M: MIntConvert<$t>,
152        {
153            #[inline]
154            fn from(x: MInt<M>) -> $t {
155                <M as MIntConvert<$t>>::into(x.x)
156            }
157        })*
158    };
159}
160impl_mint_from!(
161    u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize
162);
163impl<M> Zero for MInt<M>
164where
165    M: MIntBase,
166{
167    #[inline]
168    fn zero() -> Self {
169        Self::new_unchecked(M::mod_zero())
170    }
171}
172impl<M> One for MInt<M>
173where
174    M: MIntBase,
175{
176    #[inline]
177    fn one() -> Self {
178        Self::new_unchecked(M::mod_one())
179    }
180}
181
182impl<M> Add for MInt<M>
183where
184    M: MIntBase,
185{
186    type Output = Self;
187    #[inline]
188    fn add(self, rhs: Self) -> Self::Output {
189        Self::new_unchecked(M::mod_add(self.x, rhs.x))
190    }
191}
192impl<M> Sub for MInt<M>
193where
194    M: MIntBase,
195{
196    type Output = Self;
197    #[inline]
198    fn sub(self, rhs: Self) -> Self::Output {
199        Self::new_unchecked(M::mod_sub(self.x, rhs.x))
200    }
201}
202impl<M> Mul for MInt<M>
203where
204    M: MIntBase,
205{
206    type Output = Self;
207    #[inline]
208    fn mul(self, rhs: Self) -> Self::Output {
209        Self::new_unchecked(M::mod_mul(self.x, rhs.x))
210    }
211}
212impl<M> Div for MInt<M>
213where
214    M: MIntBase,
215{
216    type Output = Self;
217    #[inline]
218    fn div(self, rhs: Self) -> Self::Output {
219        Self::new_unchecked(M::mod_div(self.x, rhs.x))
220    }
221}
222impl<M> Neg for MInt<M>
223where
224    M: MIntBase,
225{
226    type Output = Self;
227    #[inline]
228    fn neg(self) -> Self::Output {
229        Self::new_unchecked(M::mod_neg(self.x))
230    }
231}
232impl<M> Sum for MInt<M>
233where
234    M: MIntBase,
235{
236    #[inline]
237    fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
238        iter.fold(<Self as Zero>::zero(), Add::add)
239    }
240}
241impl<M> Product for MInt<M>
242where
243    M: MIntBase,
244{
245    #[inline]
246    fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
247        iter.fold(<Self as One>::one(), Mul::mul)
248    }
249}
250impl<'a, M: 'a> Sum<&'a MInt<M>> for MInt<M>
251where
252    M: MIntBase,
253{
254    #[inline]
255    fn sum<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
256        iter.fold(<Self as Zero>::zero(), Add::add)
257    }
258}
259impl<'a, M: 'a> Product<&'a MInt<M>> for MInt<M>
260where
261    M: MIntBase,
262{
263    #[inline]
264    fn product<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
265        iter.fold(<Self as One>::one(), Mul::mul)
266    }
267}
268impl<M> Display for MInt<M>
269where
270    M: MIntBase<Inner: Display>,
271{
272    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
273        write!(f, "{}", self.inner())
274    }
275}
276impl<M> FromStr for MInt<M>
277where
278    M: MIntConvert + MIntBase<Inner: FromStr>,
279{
280    type Err = <M::Inner as FromStr>::Err;
281    #[inline]
282    fn from_str(s: &str) -> Result<Self, Self::Err> {
283        s.parse::<M::Inner>().map(Self::new)
284    }
285}
286impl<M> IterScan for MInt<M>
287where
288    M: MIntConvert + MIntBase<Inner: FromStr>,
289{
290    type Output = Self;
291    #[inline]
292    fn scan<'a, I: Iterator<Item = &'a str>>(iter: &mut I) -> Option<Self::Output> {
293        iter.next()?.parse::<MInt<M>>().ok()
294    }
295}
296impl<M> SerdeByteStr for MInt<M>
297where
298    M: MIntBase<Inner: SerdeByteStr>,
299{
300    fn serialize(&self, buf: &mut Vec<u8>) {
301        self.inner().serialize(buf)
302    }
303
304    fn deserialize<I>(iter: &mut I) -> Self
305    where
306        I: Iterator<Item = u8>,
307    {
308        Self::new_unchecked(M::Inner::deserialize(iter))
309    }
More examples
Hide additional examples
crates/competitive/src/num/mint/mod.rs (line 34)
33        fn rand(&self, rng: &mut Xorshift) -> MInt<M> {
34            MInt::<M>::new_unchecked(rng.random(..M::get_mod()))
35        }
crates/competitive/src/math/number_theoretic_transform.rs (line 155)
149fn ntt_scalar<M>(a: &mut [MInt<M>])
150where
151    M: Montgomery32NttModulus,
152{
153    let n = a.len();
154    let mut v = n / 2;
155    let imag = MInt::<M>::new_unchecked(M::INFO.root[2]);
156    while v > 1 {
157        let mut w1 = MInt::<M>::one();
158        for (s, a) in a.chunks_exact_mut(v << 1).enumerate() {
159            let (l, r) = a.split_at_mut(v);
160            let (ll, lr) = l.split_at_mut(v >> 1);
161            let (rl, rr) = r.split_at_mut(v >> 1);
162            let w2 = w1 * w1;
163            let w3 = w1 * w2;
164            for (((x0, x1), x2), x3) in ll.iter_mut().zip(lr).zip(rl).zip(rr) {
165                let a0 = *x0;
166                let a1 = *x1 * w1;
167                let a2 = *x2 * w2;
168                let a3 = *x3 * w3;
169                let a0pa2 = a0 + a2;
170                let a0na2 = a0 - a2;
171                let a1pa3 = a1 + a3;
172                let a1na3imag = (a1 - a3) * imag;
173                *x0 = a0pa2 + a1pa3;
174                *x1 = a0pa2 - a1pa3;
175                *x2 = a0na2 + a1na3imag;
176                *x3 = a0na2 - a1na3imag;
177            }
178            w1 *= MInt::<M>::new_unchecked(M::INFO.rate3[s.trailing_ones() as usize]);
179        }
180        v >>= 2;
181    }
182    if v == 1 {
183        let mut w1 = MInt::<M>::one();
184        for (s, a) in a.chunks_exact_mut(2).enumerate() {
185            unsafe {
186                let (l, r) = a.split_at_mut(1);
187                let x0 = l.get_unchecked_mut(0);
188                let x1 = r.get_unchecked_mut(0);
189                let a0 = *x0;
190                let a1 = *x1 * w1;
191                *x0 = a0 + a1;
192                *x1 = a0 - a1;
193            }
194            w1 *= MInt::<M>::new_unchecked(M::INFO.rate2[s.trailing_ones() as usize]);
195        }
196    }
197}
198
199fn intt_scalar<M>(a: &mut [MInt<M>])
200where
201    M: Montgomery32NttModulus,
202{
203    let n = a.len();
204    let mut v = 1;
205    if n.trailing_zeros() & 1 == 1 {
206        let mut w1 = MInt::<M>::one();
207        for (s, a) in a.chunks_exact_mut(2).enumerate() {
208            unsafe {
209                let (l, r) = a.split_at_mut(1);
210                let x0 = l.get_unchecked_mut(0);
211                let x1 = r.get_unchecked_mut(0);
212                let a0 = *x0;
213                let a1 = *x1;
214                *x0 = a0 + a1;
215                *x1 = (a0 - a1) * w1;
216            }
217            w1 *= MInt::<M>::new_unchecked(M::INFO.inv_rate2[s.trailing_ones() as usize]);
218        }
219        v <<= 1;
220    }
221    let iimag = MInt::<M>::new_unchecked(M::INFO.inv_root[2]);
222    while v < n {
223        let mut w1 = MInt::<M>::one();
224        for (s, a) in a.chunks_exact_mut(v << 2).enumerate() {
225            let (l, r) = a.split_at_mut(v << 1);
226            let (ll, lr) = l.split_at_mut(v);
227            let (rl, rr) = r.split_at_mut(v);
228            let w2 = w1 * w1;
229            let w3 = w1 * w2;
230            for (((x0, x1), x2), x3) in ll.iter_mut().zip(lr).zip(rl).zip(rr) {
231                let a0 = *x0;
232                let a1 = *x1;
233                let a2 = *x2;
234                let a3 = *x3;
235                let a0pa1 = a0 + a1;
236                let a0na1 = a0 - a1;
237                let a2pa3 = a2 + a3;
238                let a2na3iimag = (a2 - a3) * iimag;
239                *x0 = a0pa1 + a2pa3;
240                *x1 = (a0na1 + a2na3iimag) * w1;
241                *x2 = (a0pa1 - a2pa3) * w2;
242                *x3 = (a0na1 - a2na3iimag) * w3;
243            }
244            w1 *= MInt::<M>::new_unchecked(M::INFO.inv_rate3[s.trailing_ones() as usize]);
245        }
246        v <<= 2;
247    }
248}
249
250fn ntt<M>(a: &mut [MInt<M>])
251where
252    M: Montgomery32NttModulus,
253{
254    #[cfg(target_arch = "x86_64")]
255    {
256        match simd_backend() {
257            SimdBackend::Avx512 => unsafe { ntt_simd::ntt_avx512::<M>(a) },
258            SimdBackend::Avx2 => unsafe { ntt_simd::ntt_avx2::<M>(a) },
259            SimdBackend::Scalar => ntt_scalar(a),
260        }
261    }
262    #[cfg(not(target_arch = "x86_64"))]
263    {
264        ntt_scalar(a);
265    }
266}
267
268fn intt<M>(a: &mut [MInt<M>])
269where
270    M: Montgomery32NttModulus,
271{
272    #[cfg(target_arch = "x86_64")]
273    {
274        match simd_backend() {
275            SimdBackend::Avx512 => unsafe { ntt_simd::intt_avx512::<M>(a) },
276            SimdBackend::Avx2 => unsafe { ntt_simd::intt_avx2::<M>(a) },
277            SimdBackend::Scalar => intt_scalar(a),
278        }
279    }
280    #[cfg(not(target_arch = "x86_64"))]
281    {
282        intt_scalar(a);
283    }
284}
285
286#[cfg(target_arch = "x86_64")]
287#[allow(unsafe_op_in_unsafe_fn)] // SIMD intrinsics and raw pointers are confined here
288mod ntt_simd {
289    use super::*;
290    use std::arch::x86_64::*;
291
292    const LAZY_THRESHOLD: u32 = 1 << 30;
293
294    #[target_feature(enable = "avx2")]
295    unsafe fn normalize_avx2<M>(a: &mut [u32])
296    where
297        M: Montgomery32NttModulus,
298    {
299        let mod_vec = _mm256_set1_epi32(M::MOD as i32);
300        let sign = _mm256_set1_epi32(0x8000_0000u32 as i32);
301        let mut i = 0;
302        while i + 8 <= a.len() {
303            let x = _mm256_loadu_si256(a.as_ptr().add(i) as *const __m256i);
304            let x_x = _mm256_xor_si256(x, sign);
305            let m_x = _mm256_xor_si256(mod_vec, sign);
306            let gt = _mm256_cmpgt_epi32(x_x, m_x);
307            let eq = _mm256_cmpeq_epi32(x, mod_vec);
308            let mask = _mm256_or_si256(gt, eq);
309            let sub = _mm256_and_si256(mod_vec, mask);
310            let y = _mm256_sub_epi32(x, sub);
311            _mm256_storeu_si256(a.as_mut_ptr().add(i) as *mut __m256i, y);
312            i += 8;
313        }
314        while i < a.len() {
315            let x = a[i];
316            a[i] = if x >= M::MOD { x - M::MOD } else { x };
317            i += 1;
318        }
319    }
320
321    #[target_feature(enable = "avx512f,avx512dq,avx512cd,avx512bw,avx512vl")]
322    unsafe fn normalize_avx512<M>(a: &mut [u32])
323    where
324        M: Montgomery32NttModulus,
325    {
326        let mod_vec = _mm512_set1_epi32(M::MOD as i32);
327        let mut i = 0;
328        while i + 16 <= a.len() {
329            let x = _mm512_loadu_si512(a.as_ptr().add(i) as *const __m512i);
330            let mask = !_mm512_cmp_epu32_mask(x, mod_vec, _MM_CMPINT_LT);
331            let y = _mm512_mask_sub_epi32(x, mask, x, mod_vec);
332            _mm512_storeu_si512(a.as_mut_ptr().add(i) as *mut __m512i, y);
333            i += 16;
334        }
335        while i < a.len() {
336            let x = a[i];
337            a[i] = if x >= M::MOD { x - M::MOD } else { x };
338            i += 1;
339        }
340    }
341
342    unsafe fn add_vec_avx2<M>(
343        a: __m256i,
344        b: __m256i,
345        mod_vec: __m256i,
346        mod2_vec: __m256i,
347        sign: __m256i,
348    ) -> __m256i
349    where
350        M: Montgomery32NttModulus,
351    {
352        if M::MOD < LAZY_THRESHOLD {
353            simd32::montgomery_add_256(a, b, mod2_vec, sign)
354        } else {
355            simd32::add_mod_256(a, b, mod_vec, sign)
356        }
357    }
358
359    unsafe fn sub_vec_avx2<M>(
360        a: __m256i,
361        b: __m256i,
362        mod_vec: __m256i,
363        mod2_vec: __m256i,
364        sign: __m256i,
365    ) -> __m256i
366    where
367        M: Montgomery32NttModulus,
368    {
369        if M::MOD < LAZY_THRESHOLD {
370            simd32::montgomery_sub_256(a, b, mod2_vec, sign)
371        } else {
372            simd32::sub_mod_256(a, b, mod_vec, sign)
373        }
374    }
375
376    unsafe fn mul_vec_avx2<M>(
377        a: __m256i,
378        b: __m256i,
379        r_vec: __m256i,
380        mod_vec: __m256i,
381        sign: __m256i,
382    ) -> __m256i
383    where
384        M: Montgomery32NttModulus,
385    {
386        if M::MOD < LAZY_THRESHOLD {
387            simd32::montgomery_mul_256(a, b, r_vec, mod_vec)
388        } else {
389            simd32::montgomery_mul_256_canon(a, b, r_vec, mod_vec, sign)
390        }
391    }
392
393    unsafe fn add_vec_avx512<M>(
394        a: __m512i,
395        b: __m512i,
396        mod_vec: __m512i,
397        mod2_vec: __m512i,
398    ) -> __m512i
399    where
400        M: Montgomery32NttModulus,
401    {
402        if M::MOD < LAZY_THRESHOLD {
403            simd32::montgomery_add_512(a, b, mod2_vec)
404        } else {
405            simd32::add_mod_512(a, b, mod_vec)
406        }
407    }
408
409    unsafe fn sub_vec_avx512<M>(
410        a: __m512i,
411        b: __m512i,
412        mod_vec: __m512i,
413        mod2_vec: __m512i,
414    ) -> __m512i
415    where
416        M: Montgomery32NttModulus,
417    {
418        if M::MOD < LAZY_THRESHOLD {
419            simd32::montgomery_sub_512(a, b, mod2_vec)
420        } else {
421            simd32::sub_mod_512(a, b, mod_vec)
422        }
423    }
424
425    unsafe fn mul_vec_avx512<M>(a: __m512i, b: __m512i, r_vec: __m512i, mod_vec: __m512i) -> __m512i
426    where
427        M: Montgomery32NttModulus,
428    {
429        if M::MOD < LAZY_THRESHOLD {
430            simd32::montgomery_mul_512(a, b, r_vec, mod_vec)
431        } else {
432            simd32::montgomery_mul_512_canon(a, b, r_vec, mod_vec)
433        }
434    }
435
436    #[target_feature(enable = "avx2")]
437    pub(super) unsafe fn ntt_avx2<M>(a: &mut [MInt<M>])
438    where
439        M: Montgomery32NttModulus,
440    {
441        let n = a.len();
442        if n <= 1 {
443            return;
444        }
445        let ptr = a.as_mut_ptr() as *mut u32;
446        let a = std::slice::from_raw_parts_mut(ptr, n);
447        let mod_vec = _mm256_set1_epi32(M::MOD as i32);
448        let mod2_vec = _mm256_set1_epi32(M::MOD.wrapping_add(M::MOD) as i32);
449        let r_vec = _mm256_set1_epi32(M::R.wrapping_neg() as i32);
450        let sign = _mm256_set1_epi32(0x8000_0000u32 as i32);
451        let imag = M::INFO.root[2];
452        let imag_vec = _mm256_set1_epi32(imag as i32);
453
454        let mut v = n / 2;
455        while v > 1 {
456            let half = v >> 1;
457            let mut w1 = M::N1;
458            for (s, block) in a.chunks_exact_mut(v << 1).enumerate() {
459                let base = block.as_mut_ptr();
460                let ll = base;
461                let lr = base.add(half);
462                let rl = base.add(v);
463                let rr = base.add(v + half);
464
465                let w2 = M::mod_mul(w1, w1);
466                let w3 = M::mod_mul(w2, w1);
467                let w1v = _mm256_set1_epi32(w1 as i32);
468                let w2v = _mm256_set1_epi32(w2 as i32);
469                let w3v = _mm256_set1_epi32(w3 as i32);
470
471                let mut i = 0;
472                while i + 8 <= half {
473                    let x0 = _mm256_loadu_si256(ll.add(i) as *const __m256i);
474                    let x1 = _mm256_loadu_si256(lr.add(i) as *const __m256i);
475                    let x2 = _mm256_loadu_si256(rl.add(i) as *const __m256i);
476                    let x3 = _mm256_loadu_si256(rr.add(i) as *const __m256i);
477
478                    let a1 = mul_vec_avx2::<M>(x1, w1v, r_vec, mod_vec, sign);
479                    let a2 = mul_vec_avx2::<M>(x2, w2v, r_vec, mod_vec, sign);
480                    let a3 = mul_vec_avx2::<M>(x3, w3v, r_vec, mod_vec, sign);
481
482                    let a0pa2 = add_vec_avx2::<M>(x0, a2, mod_vec, mod2_vec, sign);
483                    let a0na2 = sub_vec_avx2::<M>(x0, a2, mod_vec, mod2_vec, sign);
484                    let a1pa3 = add_vec_avx2::<M>(a1, a3, mod_vec, mod2_vec, sign);
485                    let a1na3 = sub_vec_avx2::<M>(a1, a3, mod_vec, mod2_vec, sign);
486                    let a1na3imag = mul_vec_avx2::<M>(a1na3, imag_vec, r_vec, mod_vec, sign);
487
488                    let y0 = add_vec_avx2::<M>(a0pa2, a1pa3, mod_vec, mod2_vec, sign);
489                    let y1 = sub_vec_avx2::<M>(a0pa2, a1pa3, mod_vec, mod2_vec, sign);
490                    let y2 = add_vec_avx2::<M>(a0na2, a1na3imag, mod_vec, mod2_vec, sign);
491                    let y3 = sub_vec_avx2::<M>(a0na2, a1na3imag, mod_vec, mod2_vec, sign);
492
493                    _mm256_storeu_si256(ll.add(i) as *mut __m256i, y0);
494                    _mm256_storeu_si256(lr.add(i) as *mut __m256i, y1);
495                    _mm256_storeu_si256(rl.add(i) as *mut __m256i, y2);
496                    _mm256_storeu_si256(rr.add(i) as *mut __m256i, y3);
497                    i += 8;
498                }
499                while i < half {
500                    let a0 = *ll.add(i);
501                    let a1 = M::mod_mul(*lr.add(i), w1);
502                    let a2 = M::mod_mul(*rl.add(i), w2);
503                    let a3 = M::mod_mul(*rr.add(i), w3);
504                    let a0pa2 = M::mod_add(a0, a2);
505                    let a0na2 = M::mod_sub(a0, a2);
506                    let a1pa3 = M::mod_add(a1, a3);
507                    let a1na3 = M::mod_sub(a1, a3);
508                    let a1na3imag = M::mod_mul(a1na3, imag);
509                    *ll.add(i) = M::mod_add(a0pa2, a1pa3);
510                    *lr.add(i) = M::mod_sub(a0pa2, a1pa3);
511                    *rl.add(i) = M::mod_add(a0na2, a1na3imag);
512                    *rr.add(i) = M::mod_sub(a0na2, a1na3imag);
513                    i += 1;
514                }
515                w1 = M::mod_mul(w1, M::INFO.rate3[s.trailing_ones() as usize]);
516            }
517            v >>= 2;
518        }
519        if v == 1 {
520            let mut w1 = M::N1;
521            for (s, block) in a.chunks_exact_mut(2).enumerate() {
522                let a0 = *block.get_unchecked(0);
523                let a1 = M::mod_mul(*block.get_unchecked(1), w1);
524                *block.get_unchecked_mut(0) = M::mod_add(a0, a1);
525                *block.get_unchecked_mut(1) = M::mod_sub(a0, a1);
526                w1 = M::mod_mul(w1, M::INFO.rate2[s.trailing_ones() as usize]);
527            }
528        }
529        normalize_avx2::<M>(a);
530    }
531
532    #[target_feature(enable = "avx2")]
533    pub(super) unsafe fn intt_avx2<M>(a: &mut [MInt<M>])
534    where
535        M: Montgomery32NttModulus,
536    {
537        let n = a.len();
538        if n <= 1 {
539            return;
540        }
541        let ptr = a.as_mut_ptr() as *mut u32;
542        let a = std::slice::from_raw_parts_mut(ptr, n);
543        let mod_vec = _mm256_set1_epi32(M::MOD as i32);
544        let mod2_vec = _mm256_set1_epi32(M::MOD.wrapping_add(M::MOD) as i32);
545        let r_vec = _mm256_set1_epi32(M::R.wrapping_neg() as i32);
546        let sign = _mm256_set1_epi32(0x8000_0000u32 as i32);
547        let iimag = M::INFO.inv_root[2];
548        let iimag_vec = _mm256_set1_epi32(iimag as i32);
549
550        let mut v = 1;
551        if n.trailing_zeros() & 1 == 1 {
552            let mut w1 = M::N1;
553            for (s, block) in a.chunks_exact_mut(2).enumerate() {
554                let a0 = *block.get_unchecked(0);
555                let a1 = *block.get_unchecked(1);
556                *block.get_unchecked_mut(0) = M::mod_add(a0, a1);
557                *block.get_unchecked_mut(1) = M::mod_mul(M::mod_sub(a0, a1), w1);
558                w1 = M::mod_mul(w1, M::INFO.inv_rate2[s.trailing_ones() as usize]);
559            }
560            v <<= 1;
561        }
562        while v < n {
563            let mut w1 = M::N1;
564            for (s, block) in a.chunks_exact_mut(v << 2).enumerate() {
565                let base = block.as_mut_ptr();
566                let ll = base;
567                let lr = base.add(v);
568                let rl = base.add(v << 1);
569                let rr = base.add(v * 3);
570
571                let w2 = M::mod_mul(w1, w1);
572                let w3 = M::mod_mul(w2, w1);
573                let w1v = _mm256_set1_epi32(w1 as i32);
574                let w2v = _mm256_set1_epi32(w2 as i32);
575                let w3v = _mm256_set1_epi32(w3 as i32);
576
577                let mut i = 0;
578                while i + 8 <= v {
579                    let x0 = _mm256_loadu_si256(ll.add(i) as *const __m256i);
580                    let x1 = _mm256_loadu_si256(lr.add(i) as *const __m256i);
581                    let x2 = _mm256_loadu_si256(rl.add(i) as *const __m256i);
582                    let x3 = _mm256_loadu_si256(rr.add(i) as *const __m256i);
583
584                    let a0pa1 = add_vec_avx2::<M>(x0, x1, mod_vec, mod2_vec, sign);
585                    let a0na1 = sub_vec_avx2::<M>(x0, x1, mod_vec, mod2_vec, sign);
586                    let a2pa3 = add_vec_avx2::<M>(x2, x3, mod_vec, mod2_vec, sign);
587                    let a2na3 = sub_vec_avx2::<M>(x2, x3, mod_vec, mod2_vec, sign);
588                    let a2na3iimag = mul_vec_avx2::<M>(a2na3, iimag_vec, r_vec, mod_vec, sign);
589
590                    let y0 = add_vec_avx2::<M>(a0pa1, a2pa3, mod_vec, mod2_vec, sign);
591                    let y1 = add_vec_avx2::<M>(a0na1, a2na3iimag, mod_vec, mod2_vec, sign);
592                    let y2 = sub_vec_avx2::<M>(a0pa1, a2pa3, mod_vec, mod2_vec, sign);
593                    let y3 = sub_vec_avx2::<M>(a0na1, a2na3iimag, mod_vec, mod2_vec, sign);
594
595                    let y1 = mul_vec_avx2::<M>(y1, w1v, r_vec, mod_vec, sign);
596                    let y2 = mul_vec_avx2::<M>(y2, w2v, r_vec, mod_vec, sign);
597                    let y3 = mul_vec_avx2::<M>(y3, w3v, r_vec, mod_vec, sign);
598
599                    _mm256_storeu_si256(ll.add(i) as *mut __m256i, y0);
600                    _mm256_storeu_si256(lr.add(i) as *mut __m256i, y1);
601                    _mm256_storeu_si256(rl.add(i) as *mut __m256i, y2);
602                    _mm256_storeu_si256(rr.add(i) as *mut __m256i, y3);
603                    i += 8;
604                }
605                while i < v {
606                    let a0 = *ll.add(i);
607                    let a1 = *lr.add(i);
608                    let a2 = *rl.add(i);
609                    let a3 = *rr.add(i);
610                    let a0pa1 = M::mod_add(a0, a1);
611                    let a0na1 = M::mod_sub(a0, a1);
612                    let a2pa3 = M::mod_add(a2, a3);
613                    let a2na3iimag = M::mod_mul(M::mod_sub(a2, a3), iimag);
614                    *ll.add(i) = M::mod_add(a0pa1, a2pa3);
615                    *lr.add(i) = M::mod_mul(M::mod_add(a0na1, a2na3iimag), w1);
616                    *rl.add(i) = M::mod_mul(M::mod_sub(a0pa1, a2pa3), w2);
617                    *rr.add(i) = M::mod_mul(M::mod_sub(a0na1, a2na3iimag), w3);
618                    i += 1;
619                }
620                w1 = M::mod_mul(w1, M::INFO.inv_rate3[s.trailing_ones() as usize]);
621            }
622            v <<= 2;
623        }
624        normalize_avx2::<M>(a);
625    }
626
627    #[target_feature(enable = "avx512f,avx512dq,avx512cd,avx512bw,avx512vl")]
628    pub(super) unsafe fn ntt_avx512<M>(a: &mut [MInt<M>])
629    where
630        M: Montgomery32NttModulus,
631    {
632        let n = a.len();
633        if n <= 1 {
634            return;
635        }
636        let ptr = a.as_mut_ptr() as *mut u32;
637        let a = std::slice::from_raw_parts_mut(ptr, n);
638        let mod_vec = _mm512_set1_epi32(M::MOD as i32);
639        let mod2_vec = _mm512_set1_epi32(M::MOD.wrapping_add(M::MOD) as i32);
640        let r_vec = _mm512_set1_epi32(M::R.wrapping_neg() as i32);
641        let imag = M::INFO.root[2];
642        let imag_vec = _mm512_set1_epi32(imag as i32);
643
644        let mut v = n / 2;
645        while v > 1 {
646            let half = v >> 1;
647            let mut w1 = M::N1;
648            for (s, block) in a.chunks_exact_mut(v << 1).enumerate() {
649                let base = block.as_mut_ptr();
650                let ll = base;
651                let lr = base.add(half);
652                let rl = base.add(v);
653                let rr = base.add(v + half);
654                let w2 = M::mod_mul(w1, w1);
655                let w3 = M::mod_mul(w2, w1);
656                let w1v = _mm512_set1_epi32(w1 as i32);
657                let w2v = _mm512_set1_epi32(w2 as i32);
658                let w3v = _mm512_set1_epi32(w3 as i32);
659
660                let mut i = 0;
661                while i + 16 <= half {
662                    let x0 = _mm512_loadu_si512(ll.add(i) as *const __m512i);
663                    let x1 = _mm512_loadu_si512(lr.add(i) as *const __m512i);
664                    let x2 = _mm512_loadu_si512(rl.add(i) as *const __m512i);
665                    let x3 = _mm512_loadu_si512(rr.add(i) as *const __m512i);
666
667                    let a1 = mul_vec_avx512::<M>(x1, w1v, r_vec, mod_vec);
668                    let a2 = mul_vec_avx512::<M>(x2, w2v, r_vec, mod_vec);
669                    let a3 = mul_vec_avx512::<M>(x3, w3v, r_vec, mod_vec);
670
671                    let a0pa2 = add_vec_avx512::<M>(x0, a2, mod_vec, mod2_vec);
672                    let a0na2 = sub_vec_avx512::<M>(x0, a2, mod_vec, mod2_vec);
673                    let a1pa3 = add_vec_avx512::<M>(a1, a3, mod_vec, mod2_vec);
674                    let a1na3 = sub_vec_avx512::<M>(a1, a3, mod_vec, mod2_vec);
675                    let a1na3imag = mul_vec_avx512::<M>(a1na3, imag_vec, r_vec, mod_vec);
676
677                    let y0 = add_vec_avx512::<M>(a0pa2, a1pa3, mod_vec, mod2_vec);
678                    let y1 = sub_vec_avx512::<M>(a0pa2, a1pa3, mod_vec, mod2_vec);
679                    let y2 = add_vec_avx512::<M>(a0na2, a1na3imag, mod_vec, mod2_vec);
680                    let y3 = sub_vec_avx512::<M>(a0na2, a1na3imag, mod_vec, mod2_vec);
681
682                    _mm512_storeu_si512(ll.add(i) as *mut __m512i, y0);
683                    _mm512_storeu_si512(lr.add(i) as *mut __m512i, y1);
684                    _mm512_storeu_si512(rl.add(i) as *mut __m512i, y2);
685                    _mm512_storeu_si512(rr.add(i) as *mut __m512i, y3);
686                    i += 16;
687                }
688                while i < half {
689                    let a0 = *ll.add(i);
690                    let a1 = M::mod_mul(*lr.add(i), w1);
691                    let a2 = M::mod_mul(*rl.add(i), w2);
692                    let a3 = M::mod_mul(*rr.add(i), w3);
693                    let a0pa2 = M::mod_add(a0, a2);
694                    let a0na2 = M::mod_sub(a0, a2);
695                    let a1pa3 = M::mod_add(a1, a3);
696                    let a1na3 = M::mod_sub(a1, a3);
697                    let a1na3imag = M::mod_mul(a1na3, imag);
698                    *ll.add(i) = M::mod_add(a0pa2, a1pa3);
699                    *lr.add(i) = M::mod_sub(a0pa2, a1pa3);
700                    *rl.add(i) = M::mod_add(a0na2, a1na3imag);
701                    *rr.add(i) = M::mod_sub(a0na2, a1na3imag);
702                    i += 1;
703                }
704                w1 = M::mod_mul(w1, M::INFO.rate3[s.trailing_ones() as usize]);
705            }
706            v >>= 2;
707        }
708        if v == 1 {
709            let mut w1 = M::N1;
710            for (s, block) in a.chunks_exact_mut(2).enumerate() {
711                let a0 = *block.get_unchecked(0);
712                let a1 = M::mod_mul(*block.get_unchecked(1), w1);
713                *block.get_unchecked_mut(0) = M::mod_add(a0, a1);
714                *block.get_unchecked_mut(1) = M::mod_sub(a0, a1);
715                w1 = M::mod_mul(w1, M::INFO.rate2[s.trailing_ones() as usize]);
716            }
717        }
718        normalize_avx512::<M>(a);
719    }
720
721    #[target_feature(enable = "avx512f,avx512dq,avx512cd,avx512bw,avx512vl")]
722    pub(super) unsafe fn intt_avx512<M>(a: &mut [MInt<M>])
723    where
724        M: Montgomery32NttModulus,
725    {
726        let n = a.len();
727        if n <= 1 {
728            return;
729        }
730        let ptr = a.as_mut_ptr() as *mut u32;
731        let a = std::slice::from_raw_parts_mut(ptr, n);
732        let mod_vec = _mm512_set1_epi32(M::MOD as i32);
733        let mod2_vec = _mm512_set1_epi32(M::MOD.wrapping_add(M::MOD) as i32);
734        let r_vec = _mm512_set1_epi32(M::R.wrapping_neg() as i32);
735        let iimag = M::INFO.inv_root[2];
736        let iimag_vec = _mm512_set1_epi32(iimag as i32);
737
738        let mut v = 1;
739        if n.trailing_zeros() & 1 == 1 {
740            let mut w1 = M::N1;
741            for (s, block) in a.chunks_exact_mut(2).enumerate() {
742                let a0 = *block.get_unchecked(0);
743                let a1 = *block.get_unchecked(1);
744                *block.get_unchecked_mut(0) = M::mod_add(a0, a1);
745                *block.get_unchecked_mut(1) = M::mod_mul(M::mod_sub(a0, a1), w1);
746                w1 = M::mod_mul(w1, M::INFO.inv_rate2[s.trailing_ones() as usize]);
747            }
748            v <<= 1;
749        }
750        while v < n {
751            let mut w1 = M::N1;
752            for (s, block) in a.chunks_exact_mut(v << 2).enumerate() {
753                let base = block.as_mut_ptr();
754                let ll = base;
755                let lr = base.add(v);
756                let rl = base.add(v << 1);
757                let rr = base.add(v * 3);
758                let w2 = M::mod_mul(w1, w1);
759                let w3 = M::mod_mul(w2, w1);
760                let w1v = _mm512_set1_epi32(w1 as i32);
761                let w2v = _mm512_set1_epi32(w2 as i32);
762                let w3v = _mm512_set1_epi32(w3 as i32);
763
764                let mut i = 0;
765                while i + 16 <= v {
766                    let x0 = _mm512_loadu_si512(ll.add(i) as *const __m512i);
767                    let x1 = _mm512_loadu_si512(lr.add(i) as *const __m512i);
768                    let x2 = _mm512_loadu_si512(rl.add(i) as *const __m512i);
769                    let x3 = _mm512_loadu_si512(rr.add(i) as *const __m512i);
770
771                    let a0pa1 = add_vec_avx512::<M>(x0, x1, mod_vec, mod2_vec);
772                    let a0na1 = sub_vec_avx512::<M>(x0, x1, mod_vec, mod2_vec);
773                    let a2pa3 = add_vec_avx512::<M>(x2, x3, mod_vec, mod2_vec);
774                    let a2na3 = sub_vec_avx512::<M>(x2, x3, mod_vec, mod2_vec);
775                    let a2na3iimag = mul_vec_avx512::<M>(a2na3, iimag_vec, r_vec, mod_vec);
776
777                    let y0 = add_vec_avx512::<M>(a0pa1, a2pa3, mod_vec, mod2_vec);
778                    let y1 = add_vec_avx512::<M>(a0na1, a2na3iimag, mod_vec, mod2_vec);
779                    let y2 = sub_vec_avx512::<M>(a0pa1, a2pa3, mod_vec, mod2_vec);
780                    let y3 = sub_vec_avx512::<M>(a0na1, a2na3iimag, mod_vec, mod2_vec);
781
782                    let y1 = mul_vec_avx512::<M>(y1, w1v, r_vec, mod_vec);
783                    let y2 = mul_vec_avx512::<M>(y2, w2v, r_vec, mod_vec);
784                    let y3 = mul_vec_avx512::<M>(y3, w3v, r_vec, mod_vec);
785
786                    _mm512_storeu_si512(ll.add(i) as *mut __m512i, y0);
787                    _mm512_storeu_si512(lr.add(i) as *mut __m512i, y1);
788                    _mm512_storeu_si512(rl.add(i) as *mut __m512i, y2);
789                    _mm512_storeu_si512(rr.add(i) as *mut __m512i, y3);
790                    i += 16;
791                }
792                while i < v {
793                    let a0 = *ll.add(i);
794                    let a1 = *lr.add(i);
795                    let a2 = *rl.add(i);
796                    let a3 = *rr.add(i);
797                    let a0pa1 = M::mod_add(a0, a1);
798                    let a0na1 = M::mod_sub(a0, a1);
799                    let a2pa3 = M::mod_add(a2, a3);
800                    let a2na3iimag = M::mod_mul(M::mod_sub(a2, a3), iimag);
801                    *ll.add(i) = M::mod_add(a0pa1, a2pa3);
802                    *lr.add(i) = M::mod_mul(M::mod_add(a0na1, a2na3iimag), w1);
803                    *rl.add(i) = M::mod_mul(M::mod_sub(a0pa1, a2pa3), w2);
804                    *rr.add(i) = M::mod_mul(M::mod_sub(a0na1, a2na3iimag), w3);
805                    i += 1;
806                }
807                w1 = M::mod_mul(w1, M::INFO.inv_rate3[s.trailing_ones() as usize]);
808            }
809            v <<= 2;
810        }
811        normalize_avx512::<M>(a);
812    }
813}
814
815fn convolve_naive<T>(a: &[T], b: &[T]) -> Vec<T>
816where
817    T: Copy + Zero + AddAssign<T> + Mul<Output = T>,
818{
819    if a.is_empty() && b.is_empty() {
820        return Vec::new();
821    }
822    let len = a.len() + b.len() - 1;
823    let mut c = vec![T::zero(); len];
824    if a.len() < b.len() {
825        for (i, &b) in b.iter().enumerate() {
826            for (a, c) in a.iter().zip(&mut c[i..]) {
827                *c += *a * b;
828            }
829        }
830    } else {
831        for (i, &a) in a.iter().enumerate() {
832            for (b, c) in b.iter().zip(&mut c[i..]) {
833                *c += *b * a;
834            }
835        }
836    }
837    c
838}
839
840fn convolve_karatsuba<T>(a: &[T], b: &[T]) -> Vec<T>
841where
842    T: Copy + Zero + AddAssign<T> + SubAssign<T> + Mul<Output = T>,
843{
844    if a.len().min(b.len()) <= 30 {
845        return convolve_naive(a, b);
846    }
847    let m = a.len().max(b.len()).div_ceil(2);
848    let (a0, a1) = if a.len() <= m {
849        (a, &[][..])
850    } else {
851        a.split_at(m)
852    };
853    let (b0, b1) = if b.len() <= m {
854        (b, &[][..])
855    } else {
856        b.split_at(m)
857    };
858    let f00 = convolve_karatsuba(a0, b0);
859    let f11 = convolve_karatsuba(a1, b1);
860    let mut a0a1 = a0.to_vec();
861    for (a0a1, &a1) in a0a1.iter_mut().zip(a1) {
862        *a0a1 += a1;
863    }
864    let mut b0b1 = b0.to_vec();
865    for (b0b1, &b1) in b0b1.iter_mut().zip(b1) {
866        *b0b1 += b1;
867    }
868    let mut f01 = convolve_karatsuba(&a0a1, &b0b1);
869    for (f01, &f00) in f01.iter_mut().zip(&f00) {
870        *f01 -= f00;
871    }
872    for (f01, &f11) in f01.iter_mut().zip(&f11) {
873        *f01 -= f11;
874    }
875    let mut c = vec![T::zero(); a.len() + b.len() - 1];
876    for (c, &f00) in c.iter_mut().zip(&f00) {
877        *c += f00;
878    }
879    for (c, &f01) in c[m..].iter_mut().zip(&f01) {
880        *c += f01;
881    }
882    for (c, &f11) in c[m << 1..].iter_mut().zip(&f11) {
883        *c += f11;
884    }
885    c
886}
887
888impl<M> ConvolveSteps for Convolve<M>
889where
890    M: Montgomery32NttModulus,
891{
892    type T = Vec<MInt<M>>;
893    type F = Vec<MInt<M>>;
894    fn length(t: &Self::T) -> usize {
895        t.len()
896    }
897    fn transform(mut t: Self::T, len: usize) -> Self::F {
898        t.resize_with(len.max(1).next_power_of_two(), Zero::zero);
899        ntt(&mut t);
900        t
901    }
902    fn inverse_transform(mut f: Self::F, len: usize) -> Self::T {
903        intt(&mut f);
904        f.truncate(len);
905        let inv = MInt::from(len.max(1).next_power_of_two() as u32).inv();
906        for f in f.iter_mut() {
907            *f *= inv;
908        }
909        f
910    }
911    fn multiply(f: &mut Self::F, g: &Self::F) {
912        assert_eq!(f.len(), g.len());
913        for (f, g) in f.iter_mut().zip(g.iter()) {
914            *f *= *g;
915        }
916    }
917    fn convolve(mut a: Self::T, mut b: Self::T) -> Self::T {
918        if Self::length(&a).max(Self::length(&b)) <= 100 {
919            return convolve_karatsuba(&a, &b);
920        }
921        if Self::length(&a).min(Self::length(&b)) <= 60 {
922            return convolve_naive(&a, &b);
923        }
924        let len = (Self::length(&a) + Self::length(&b)).saturating_sub(1);
925        let size = len.max(1).next_power_of_two();
926        if len <= size / 2 + 2 {
927            let xa = a.pop().unwrap();
928            let xb = b.pop().unwrap();
929            let mut c = vec![MInt::<M>::zero(); len];
930            *c.last_mut().unwrap() = xa * xb;
931            for (a, c) in a.iter().zip(&mut c[b.len()..]) {
932                *c += *a * xb;
933            }
934            for (b, c) in b.iter().zip(&mut c[a.len()..]) {
935                *c += *b * xa;
936            }
937            let d = Self::convolve(a, b);
938            for (d, c) in d.into_iter().zip(&mut c) {
939                *c += d;
940            }
941            return c;
942        }
943        let same = a == b;
944        let mut a = Self::transform(a, len);
945        if same {
946            for a in a.iter_mut() {
947                *a *= *a;
948            }
949        } else {
950            let b = Self::transform(b, len);
951            Self::multiply(&mut a, &b);
952        }
953        Self::inverse_transform(a, len)
954    }
955}
956
957type MVec<M> = Vec<MInt<M>>;
958impl<M, N1, N2, N3> ConvolveSteps for Convolve<(M, (N1, N2, N3))>
959where
960    M: MIntConvert + MIntConvert<u32>,
961    N1: Montgomery32NttModulus,
962    N2: Montgomery32NttModulus,
963    N3: Montgomery32NttModulus,
964{
965    type T = MVec<M>;
966    type F = (MVec<N1>, MVec<N2>, MVec<N3>);
967    fn length(t: &Self::T) -> usize {
968        t.len()
969    }
970    fn transform(t: Self::T, len: usize) -> Self::F {
971        let npot = len.max(1).next_power_of_two();
972        let mut f = (
973            MVec::<N1>::with_capacity(npot),
974            MVec::<N2>::with_capacity(npot),
975            MVec::<N3>::with_capacity(npot),
976        );
977        for t in t {
978            f.0.push(<M as MIntConvert<u32>>::into(t.inner()).into());
979            f.1.push(<M as MIntConvert<u32>>::into(t.inner()).into());
980            f.2.push(<M as MIntConvert<u32>>::into(t.inner()).into());
981        }
982        f.0.resize_with(npot, Zero::zero);
983        f.1.resize_with(npot, Zero::zero);
984        f.2.resize_with(npot, Zero::zero);
985        ntt(&mut f.0);
986        ntt(&mut f.1);
987        ntt(&mut f.2);
988        f
989    }
990    fn inverse_transform(f: Self::F, len: usize) -> Self::T {
991        let t1 = MInt::<N2>::new(N1::get_mod()).inv();
992        let m1 = MInt::<M>::from(N1::get_mod());
993        let m1_3 = MInt::<N3>::new(N1::get_mod());
994        let t2 = (m1_3 * MInt::<N3>::new(N2::get_mod())).inv();
995        let m2 = m1 * MInt::<M>::from(N2::get_mod());
996        Convolve::<N1>::inverse_transform(f.0, len)
997            .into_iter()
998            .zip(Convolve::<N2>::inverse_transform(f.1, len))
999            .zip(Convolve::<N3>::inverse_transform(f.2, len))
1000            .map(|((c1, c2), c3)| {
1001                let d1 = c1.inner();
1002                let d2 = ((c2 - MInt::<N2>::from(d1)) * t1).inner();
1003                let x = MInt::<N3>::new(d1) + MInt::<N3>::new(d2) * m1_3;
1004                let d3 = ((c3 - x) * t2).inner();
1005                MInt::<M>::from(d1) + MInt::<M>::from(d2) * m1 + MInt::<M>::from(d3) * m2
1006            })
1007            .collect()
1008    }
1009    fn multiply(f: &mut Self::F, g: &Self::F) {
1010        assert_eq!(f.0.len(), g.0.len());
1011        assert_eq!(f.1.len(), g.1.len());
1012        assert_eq!(f.2.len(), g.2.len());
1013        for (f, g) in f.0.iter_mut().zip(g.0.iter()) {
1014            *f *= *g;
1015        }
1016        for (f, g) in f.1.iter_mut().zip(g.1.iter()) {
1017            *f *= *g;
1018        }
1019        for (f, g) in f.2.iter_mut().zip(g.2.iter()) {
1020            *f *= *g;
1021        }
1022    }
1023    fn convolve(a: Self::T, b: Self::T) -> Self::T {
1024        if Self::length(&a).max(Self::length(&b)) <= 300 {
1025            return convolve_karatsuba(&a, &b);
1026        }
1027        if Self::length(&a).min(Self::length(&b)) <= 60 {
1028            return convolve_naive(&a, &b);
1029        }
1030        let len = (Self::length(&a) + Self::length(&b)).saturating_sub(1);
1031        let mut a = Self::transform(a, len);
1032        let b = Self::transform(b, len);
1033        Self::multiply(&mut a, &b);
1034        Self::inverse_transform(a, len)
1035    }
1036}
1037
1038impl<N1, N2, N3> ConvolveSteps for Convolve<(u64, (N1, N2, N3))>
1039where
1040    N1: Montgomery32NttModulus,
1041    N2: Montgomery32NttModulus,
1042    N3: Montgomery32NttModulus,
1043{
1044    type T = Vec<u64>;
1045    type F = (MVec<N1>, MVec<N2>, MVec<N3>);
1046
1047    fn length(t: &Self::T) -> usize {
1048        t.len()
1049    }
1050
1051    fn transform(t: Self::T, len: usize) -> Self::F {
1052        let npot = len.max(1).next_power_of_two();
1053        let mut f = (
1054            MVec::<N1>::with_capacity(npot),
1055            MVec::<N2>::with_capacity(npot),
1056            MVec::<N3>::with_capacity(npot),
1057        );
1058        for t in t {
1059            f.0.push(t.into());
1060            f.1.push(t.into());
1061            f.2.push(t.into());
1062        }
1063        f.0.resize_with(npot, Zero::zero);
1064        f.1.resize_with(npot, Zero::zero);
1065        f.2.resize_with(npot, Zero::zero);
1066        ntt(&mut f.0);
1067        ntt(&mut f.1);
1068        ntt(&mut f.2);
1069        f
1070    }
1071
1072    fn inverse_transform(f: Self::F, len: usize) -> Self::T {
1073        let t1 = MInt::<N2>::new(N1::get_mod()).inv();
1074        let m1 = N1::get_mod() as u64;
1075        let m1_3 = MInt::<N3>::new(N1::get_mod());
1076        let t2 = (m1_3 * MInt::<N3>::new(N2::get_mod())).inv();
1077        let m2 = m1 * N2::get_mod() as u64;
1078        Convolve::<N1>::inverse_transform(f.0, len)
1079            .into_iter()
1080            .zip(Convolve::<N2>::inverse_transform(f.1, len))
1081            .zip(Convolve::<N3>::inverse_transform(f.2, len))
1082            .map(|((c1, c2), c3)| {
1083                let d1 = c1.inner();
1084                let d2 = ((c2 - MInt::<N2>::from(d1)) * t1).inner();
1085                let x = MInt::<N3>::new(d1) + MInt::<N3>::new(d2) * m1_3;
1086                let d3 = ((c3 - x) * t2).inner();
1087                d1 as u64 + d2 as u64 * m1 + d3 as u64 * m2
1088            })
1089            .collect()
1090    }
1091
1092    fn multiply(f: &mut Self::F, g: &Self::F) {
1093        assert_eq!(f.0.len(), g.0.len());
1094        assert_eq!(f.1.len(), g.1.len());
1095        assert_eq!(f.2.len(), g.2.len());
1096        for (f, g) in f.0.iter_mut().zip(g.0.iter()) {
1097            *f *= *g;
1098        }
1099        for (f, g) in f.1.iter_mut().zip(g.1.iter()) {
1100            *f *= *g;
1101        }
1102        for (f, g) in f.2.iter_mut().zip(g.2.iter()) {
1103            *f *= *g;
1104        }
1105    }
1106
1107    fn convolve(a: Self::T, b: Self::T) -> Self::T {
1108        if Self::length(&a).max(Self::length(&b)) <= 300 {
1109            return convolve_karatsuba(&a, &b);
1110        }
1111        if Self::length(&a).min(Self::length(&b)) <= 60 {
1112            return convolve_naive(&a, &b);
1113        }
1114        let len = (Self::length(&a) + Self::length(&b)).saturating_sub(1);
1115        let mut a = Self::transform(a, len);
1116        let b = Self::transform(b, len);
1117        Self::multiply(&mut a, &b);
1118        Self::inverse_transform(a, len)
1119    }
1120}
1121
1122pub trait NttReuse: ConvolveSteps {
1123    const MULTIPLE: bool = true;
1124
1125    /// F(a) → F(a + [0] * a.len())
1126    fn ntt_doubling(f: Self::F) -> Self::F;
1127
1128    /// F(a(x)), F(b(x)) → even(F(a(x) * b(-x)))
1129    fn even_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F;
1130
1131    /// F(a(x)), F(b(x)) → odd(F(a(x) * b(-x)))
1132    fn odd_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F;
1133}
1134
1135thread_local!(
1136    static BIT_REVERSE: UnsafeCell<Vec<Vec<usize>>> = const { UnsafeCell::new(vec![]) };
1137);
1138
1139impl<M> NttReuse for Convolve<M>
1140where
1141    M: Montgomery32NttModulus,
1142{
1143    const MULTIPLE: bool = false;
1144
1145    fn ntt_doubling(mut f: Self::F) -> Self::F {
1146        let n = f.len();
1147        let k = n.trailing_zeros() as usize;
1148        let mut a = Self::inverse_transform(f.clone(), n);
1149        let mut rot = MInt::<M>::one();
1150        let zeta = MInt::<M>::new_unchecked(M::INFO.root[k + 1]);
1151        for a in a.iter_mut() {
1152            *a *= rot;
1153            rot *= zeta;
1154        }
1155        f.extend(Self::transform(a, n));
1156        f
1157    }
1158
1159    fn even_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1160        assert_eq!(f.len(), g.len());
1161        assert!(f.len().is_power_of_two());
1162        assert!(f.len() >= 2);
1163        let inv2 = MInt::<M>::from(2).inv();
1164        let n = f.len() / 2;
1165        (0..n)
1166            .map(|i| (f[i << 1] * g[i << 1 | 1] + f[i << 1 | 1] * g[i << 1]) * inv2)
1167            .collect()
1168    }
1169
1170    fn odd_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1171        assert_eq!(f.len(), g.len());
1172        assert!(f.len().is_power_of_two());
1173        assert!(f.len() >= 2);
1174        let mut inv2 = MInt::<M>::from(2).inv();
1175        let n = f.len() / 2;
1176        let k = f.len().trailing_zeros() as usize;
1177        let mut h = vec![MInt::<M>::zero(); n];
1178        let w = MInt::<M>::new_unchecked(M::INFO.inv_root[k]);
1179        BIT_REVERSE.with(|br| {
1180            let br = unsafe { &mut *br.get() };
1181            if br.len() < k {
1182                br.resize_with(k, Default::default);
1183            }
1184            let k = k - 1;
1185            if br[k].is_empty() {
1186                let mut v = vec![0; 1 << k];
1187                for i in 0..1 << k {
1188                    v[i] = (v[i >> 1] >> 1) | ((i & 1) << (k.saturating_sub(1)));
1189                }
1190                br[k] = v;
1191            }
1192            for &i in &br[k] {
1193                h[i] = (f[i << 1] * g[i << 1 | 1] - f[i << 1 | 1] * g[i << 1]) * inv2;
1194                inv2 *= w;
1195            }
1196        });
1197        h
1198    }
1199}
1200
1201impl<M, N1, N2, N3> NttReuse for Convolve<(M, (N1, N2, N3))>
1202where
1203    M: MIntConvert + MIntConvert<u32>,
1204    N1: Montgomery32NttModulus,
1205    N2: Montgomery32NttModulus,
1206    N3: Montgomery32NttModulus,
1207{
1208    fn ntt_doubling(f: Self::F) -> Self::F {
1209        (
1210            Convolve::<N1>::ntt_doubling(f.0),
1211            Convolve::<N2>::ntt_doubling(f.1),
1212            Convolve::<N3>::ntt_doubling(f.2),
1213        )
1214    }
1215
1216    fn even_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1217        fn even_mul_normal_neg_corrected<M>(f: &[MInt<M>], g: &[MInt<M>], m: u32) -> Vec<MInt<M>>
1218        where
1219            M: Montgomery32NttModulus,
1220        {
1221            let n = f.len();
1222            assert_eq!(f.len(), g.len());
1223            assert!(f.len().is_power_of_two());
1224            assert!(f.len() >= 2);
1225            let inv2 = MInt::<M>::from(2).inv();
1226            let u = MInt::<M>::new(m) * MInt::<M>::from(n as u32);
1227            let n = f.len() / 2;
1228            (0..n)
1229                .map(|i| {
1230                    (f[i << 1]
1231                        * if i == 0 {
1232                            g[i << 1 | 1] + u
1233                        } else {
1234                            g[i << 1 | 1]
1235                        }
1236                        + f[i << 1 | 1] * g[i << 1])
1237                        * inv2
1238                })
1239                .collect()
1240        }
1241
1242        let m = M::mod_into();
1243        (
1244            even_mul_normal_neg_corrected(&f.0, &g.0, m),
1245            even_mul_normal_neg_corrected(&f.1, &g.1, m),
1246            even_mul_normal_neg_corrected(&f.2, &g.2, m),
1247        )
1248    }
1249
1250    fn odd_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1251        fn odd_mul_normal_neg_corrected<M>(f: &[MInt<M>], g: &[MInt<M>], m: u32) -> Vec<MInt<M>>
1252        where
1253            M: Montgomery32NttModulus,
1254        {
1255            assert_eq!(f.len(), g.len());
1256            assert!(f.len().is_power_of_two());
1257            assert!(f.len() >= 2);
1258            let mut inv2 = MInt::<M>::from(2).inv();
1259            let u = MInt::<M>::new(m) * MInt::<M>::from(f.len() as u32);
1260            let n = f.len() / 2;
1261            let k = f.len().trailing_zeros() as usize;
1262            let mut h = vec![MInt::<M>::zero(); n];
1263            let w = MInt::<M>::new_unchecked(M::INFO.inv_root[k]);
1264            BIT_REVERSE.with(|br| {
1265                let br = unsafe { &mut *br.get() };
1266                if br.len() < k {
1267                    br.resize_with(k, Default::default);
1268                }
1269                let k = k - 1;
1270                if br[k].is_empty() {
1271                    let mut v = vec![0; 1 << k];
1272                    for i in 0..1 << k {
1273                        v[i] = (v[i >> 1] >> 1) | ((i & 1) << (k.saturating_sub(1)));
1274                    }
1275                    br[k] = v;
1276                }
1277                for &i in &br[k] {
1278                    h[i] = (f[i << 1]
1279                        * if i == 0 {
1280                            g[i << 1 | 1] + u
1281                        } else {
1282                            g[i << 1 | 1]
1283                        }
1284                        - f[i << 1 | 1] * g[i << 1])
1285                        * inv2;
1286                    inv2 *= w;
1287                }
1288            });
1289            h
1290        }
Source

pub fn get_mod() -> M::Inner

Source

pub fn pow(self, y: usize) -> Self

Examples found in repository?
crates/competitive/src/math/formal_power_series/mod.rs (line 78)
77    fn pow(self, exp: usize) -> Self {
78        Self::pow(self, exp)
79    }
More examples
Hide additional examples
crates/competitive/src/algorithm/chromatic_number.rs (line 40)
36    pub fn k_colorable(&self, k: usize) -> bool {
37        !self
38            .ind
39            .iter()
40            .map(|d| d.pow(k))
41            .enumerate()
42            .map(|(s, d)| if s.count_ones() % 2 == 0 { d } else { -d })
43            .sum::<MInt<M>>()
44            .is_zero()
45    }
Source

pub fn inv(self) -> Self

Examples found in repository?
crates/competitive/src/math/number_theoretic_transform.rs (line 905)
902    fn inverse_transform(mut f: Self::F, len: usize) -> Self::T {
903        intt(&mut f);
904        f.truncate(len);
905        let inv = MInt::from(len.max(1).next_power_of_two() as u32).inv();
906        for f in f.iter_mut() {
907            *f *= inv;
908        }
909        f
910    }
911    fn multiply(f: &mut Self::F, g: &Self::F) {
912        assert_eq!(f.len(), g.len());
913        for (f, g) in f.iter_mut().zip(g.iter()) {
914            *f *= *g;
915        }
916    }
917    fn convolve(mut a: Self::T, mut b: Self::T) -> Self::T {
918        if Self::length(&a).max(Self::length(&b)) <= 100 {
919            return convolve_karatsuba(&a, &b);
920        }
921        if Self::length(&a).min(Self::length(&b)) <= 60 {
922            return convolve_naive(&a, &b);
923        }
924        let len = (Self::length(&a) + Self::length(&b)).saturating_sub(1);
925        let size = len.max(1).next_power_of_two();
926        if len <= size / 2 + 2 {
927            let xa = a.pop().unwrap();
928            let xb = b.pop().unwrap();
929            let mut c = vec![MInt::<M>::zero(); len];
930            *c.last_mut().unwrap() = xa * xb;
931            for (a, c) in a.iter().zip(&mut c[b.len()..]) {
932                *c += *a * xb;
933            }
934            for (b, c) in b.iter().zip(&mut c[a.len()..]) {
935                *c += *b * xa;
936            }
937            let d = Self::convolve(a, b);
938            for (d, c) in d.into_iter().zip(&mut c) {
939                *c += d;
940            }
941            return c;
942        }
943        let same = a == b;
944        let mut a = Self::transform(a, len);
945        if same {
946            for a in a.iter_mut() {
947                *a *= *a;
948            }
949        } else {
950            let b = Self::transform(b, len);
951            Self::multiply(&mut a, &b);
952        }
953        Self::inverse_transform(a, len)
954    }
955}
956
957type MVec<M> = Vec<MInt<M>>;
958impl<M, N1, N2, N3> ConvolveSteps for Convolve<(M, (N1, N2, N3))>
959where
960    M: MIntConvert + MIntConvert<u32>,
961    N1: Montgomery32NttModulus,
962    N2: Montgomery32NttModulus,
963    N3: Montgomery32NttModulus,
964{
965    type T = MVec<M>;
966    type F = (MVec<N1>, MVec<N2>, MVec<N3>);
967    fn length(t: &Self::T) -> usize {
968        t.len()
969    }
970    fn transform(t: Self::T, len: usize) -> Self::F {
971        let npot = len.max(1).next_power_of_two();
972        let mut f = (
973            MVec::<N1>::with_capacity(npot),
974            MVec::<N2>::with_capacity(npot),
975            MVec::<N3>::with_capacity(npot),
976        );
977        for t in t {
978            f.0.push(<M as MIntConvert<u32>>::into(t.inner()).into());
979            f.1.push(<M as MIntConvert<u32>>::into(t.inner()).into());
980            f.2.push(<M as MIntConvert<u32>>::into(t.inner()).into());
981        }
982        f.0.resize_with(npot, Zero::zero);
983        f.1.resize_with(npot, Zero::zero);
984        f.2.resize_with(npot, Zero::zero);
985        ntt(&mut f.0);
986        ntt(&mut f.1);
987        ntt(&mut f.2);
988        f
989    }
990    fn inverse_transform(f: Self::F, len: usize) -> Self::T {
991        let t1 = MInt::<N2>::new(N1::get_mod()).inv();
992        let m1 = MInt::<M>::from(N1::get_mod());
993        let m1_3 = MInt::<N3>::new(N1::get_mod());
994        let t2 = (m1_3 * MInt::<N3>::new(N2::get_mod())).inv();
995        let m2 = m1 * MInt::<M>::from(N2::get_mod());
996        Convolve::<N1>::inverse_transform(f.0, len)
997            .into_iter()
998            .zip(Convolve::<N2>::inverse_transform(f.1, len))
999            .zip(Convolve::<N3>::inverse_transform(f.2, len))
1000            .map(|((c1, c2), c3)| {
1001                let d1 = c1.inner();
1002                let d2 = ((c2 - MInt::<N2>::from(d1)) * t1).inner();
1003                let x = MInt::<N3>::new(d1) + MInt::<N3>::new(d2) * m1_3;
1004                let d3 = ((c3 - x) * t2).inner();
1005                MInt::<M>::from(d1) + MInt::<M>::from(d2) * m1 + MInt::<M>::from(d3) * m2
1006            })
1007            .collect()
1008    }
1009    fn multiply(f: &mut Self::F, g: &Self::F) {
1010        assert_eq!(f.0.len(), g.0.len());
1011        assert_eq!(f.1.len(), g.1.len());
1012        assert_eq!(f.2.len(), g.2.len());
1013        for (f, g) in f.0.iter_mut().zip(g.0.iter()) {
1014            *f *= *g;
1015        }
1016        for (f, g) in f.1.iter_mut().zip(g.1.iter()) {
1017            *f *= *g;
1018        }
1019        for (f, g) in f.2.iter_mut().zip(g.2.iter()) {
1020            *f *= *g;
1021        }
1022    }
1023    fn convolve(a: Self::T, b: Self::T) -> Self::T {
1024        if Self::length(&a).max(Self::length(&b)) <= 300 {
1025            return convolve_karatsuba(&a, &b);
1026        }
1027        if Self::length(&a).min(Self::length(&b)) <= 60 {
1028            return convolve_naive(&a, &b);
1029        }
1030        let len = (Self::length(&a) + Self::length(&b)).saturating_sub(1);
1031        let mut a = Self::transform(a, len);
1032        let b = Self::transform(b, len);
1033        Self::multiply(&mut a, &b);
1034        Self::inverse_transform(a, len)
1035    }
1036}
1037
1038impl<N1, N2, N3> ConvolveSteps for Convolve<(u64, (N1, N2, N3))>
1039where
1040    N1: Montgomery32NttModulus,
1041    N2: Montgomery32NttModulus,
1042    N3: Montgomery32NttModulus,
1043{
1044    type T = Vec<u64>;
1045    type F = (MVec<N1>, MVec<N2>, MVec<N3>);
1046
1047    fn length(t: &Self::T) -> usize {
1048        t.len()
1049    }
1050
1051    fn transform(t: Self::T, len: usize) -> Self::F {
1052        let npot = len.max(1).next_power_of_two();
1053        let mut f = (
1054            MVec::<N1>::with_capacity(npot),
1055            MVec::<N2>::with_capacity(npot),
1056            MVec::<N3>::with_capacity(npot),
1057        );
1058        for t in t {
1059            f.0.push(t.into());
1060            f.1.push(t.into());
1061            f.2.push(t.into());
1062        }
1063        f.0.resize_with(npot, Zero::zero);
1064        f.1.resize_with(npot, Zero::zero);
1065        f.2.resize_with(npot, Zero::zero);
1066        ntt(&mut f.0);
1067        ntt(&mut f.1);
1068        ntt(&mut f.2);
1069        f
1070    }
1071
1072    fn inverse_transform(f: Self::F, len: usize) -> Self::T {
1073        let t1 = MInt::<N2>::new(N1::get_mod()).inv();
1074        let m1 = N1::get_mod() as u64;
1075        let m1_3 = MInt::<N3>::new(N1::get_mod());
1076        let t2 = (m1_3 * MInt::<N3>::new(N2::get_mod())).inv();
1077        let m2 = m1 * N2::get_mod() as u64;
1078        Convolve::<N1>::inverse_transform(f.0, len)
1079            .into_iter()
1080            .zip(Convolve::<N2>::inverse_transform(f.1, len))
1081            .zip(Convolve::<N3>::inverse_transform(f.2, len))
1082            .map(|((c1, c2), c3)| {
1083                let d1 = c1.inner();
1084                let d2 = ((c2 - MInt::<N2>::from(d1)) * t1).inner();
1085                let x = MInt::<N3>::new(d1) + MInt::<N3>::new(d2) * m1_3;
1086                let d3 = ((c3 - x) * t2).inner();
1087                d1 as u64 + d2 as u64 * m1 + d3 as u64 * m2
1088            })
1089            .collect()
1090    }
1091
1092    fn multiply(f: &mut Self::F, g: &Self::F) {
1093        assert_eq!(f.0.len(), g.0.len());
1094        assert_eq!(f.1.len(), g.1.len());
1095        assert_eq!(f.2.len(), g.2.len());
1096        for (f, g) in f.0.iter_mut().zip(g.0.iter()) {
1097            *f *= *g;
1098        }
1099        for (f, g) in f.1.iter_mut().zip(g.1.iter()) {
1100            *f *= *g;
1101        }
1102        for (f, g) in f.2.iter_mut().zip(g.2.iter()) {
1103            *f *= *g;
1104        }
1105    }
1106
1107    fn convolve(a: Self::T, b: Self::T) -> Self::T {
1108        if Self::length(&a).max(Self::length(&b)) <= 300 {
1109            return convolve_karatsuba(&a, &b);
1110        }
1111        if Self::length(&a).min(Self::length(&b)) <= 60 {
1112            return convolve_naive(&a, &b);
1113        }
1114        let len = (Self::length(&a) + Self::length(&b)).saturating_sub(1);
1115        let mut a = Self::transform(a, len);
1116        let b = Self::transform(b, len);
1117        Self::multiply(&mut a, &b);
1118        Self::inverse_transform(a, len)
1119    }
1120}
1121
1122pub trait NttReuse: ConvolveSteps {
1123    const MULTIPLE: bool = true;
1124
1125    /// F(a) → F(a + [0] * a.len())
1126    fn ntt_doubling(f: Self::F) -> Self::F;
1127
1128    /// F(a(x)), F(b(x)) → even(F(a(x) * b(-x)))
1129    fn even_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F;
1130
1131    /// F(a(x)), F(b(x)) → odd(F(a(x) * b(-x)))
1132    fn odd_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F;
1133}
1134
1135thread_local!(
1136    static BIT_REVERSE: UnsafeCell<Vec<Vec<usize>>> = const { UnsafeCell::new(vec![]) };
1137);
1138
1139impl<M> NttReuse for Convolve<M>
1140where
1141    M: Montgomery32NttModulus,
1142{
1143    const MULTIPLE: bool = false;
1144
1145    fn ntt_doubling(mut f: Self::F) -> Self::F {
1146        let n = f.len();
1147        let k = n.trailing_zeros() as usize;
1148        let mut a = Self::inverse_transform(f.clone(), n);
1149        let mut rot = MInt::<M>::one();
1150        let zeta = MInt::<M>::new_unchecked(M::INFO.root[k + 1]);
1151        for a in a.iter_mut() {
1152            *a *= rot;
1153            rot *= zeta;
1154        }
1155        f.extend(Self::transform(a, n));
1156        f
1157    }
1158
1159    fn even_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1160        assert_eq!(f.len(), g.len());
1161        assert!(f.len().is_power_of_two());
1162        assert!(f.len() >= 2);
1163        let inv2 = MInt::<M>::from(2).inv();
1164        let n = f.len() / 2;
1165        (0..n)
1166            .map(|i| (f[i << 1] * g[i << 1 | 1] + f[i << 1 | 1] * g[i << 1]) * inv2)
1167            .collect()
1168    }
1169
1170    fn odd_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1171        assert_eq!(f.len(), g.len());
1172        assert!(f.len().is_power_of_two());
1173        assert!(f.len() >= 2);
1174        let mut inv2 = MInt::<M>::from(2).inv();
1175        let n = f.len() / 2;
1176        let k = f.len().trailing_zeros() as usize;
1177        let mut h = vec![MInt::<M>::zero(); n];
1178        let w = MInt::<M>::new_unchecked(M::INFO.inv_root[k]);
1179        BIT_REVERSE.with(|br| {
1180            let br = unsafe { &mut *br.get() };
1181            if br.len() < k {
1182                br.resize_with(k, Default::default);
1183            }
1184            let k = k - 1;
1185            if br[k].is_empty() {
1186                let mut v = vec![0; 1 << k];
1187                for i in 0..1 << k {
1188                    v[i] = (v[i >> 1] >> 1) | ((i & 1) << (k.saturating_sub(1)));
1189                }
1190                br[k] = v;
1191            }
1192            for &i in &br[k] {
1193                h[i] = (f[i << 1] * g[i << 1 | 1] - f[i << 1 | 1] * g[i << 1]) * inv2;
1194                inv2 *= w;
1195            }
1196        });
1197        h
1198    }
1199}
1200
1201impl<M, N1, N2, N3> NttReuse for Convolve<(M, (N1, N2, N3))>
1202where
1203    M: MIntConvert + MIntConvert<u32>,
1204    N1: Montgomery32NttModulus,
1205    N2: Montgomery32NttModulus,
1206    N3: Montgomery32NttModulus,
1207{
1208    fn ntt_doubling(f: Self::F) -> Self::F {
1209        (
1210            Convolve::<N1>::ntt_doubling(f.0),
1211            Convolve::<N2>::ntt_doubling(f.1),
1212            Convolve::<N3>::ntt_doubling(f.2),
1213        )
1214    }
1215
1216    fn even_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1217        fn even_mul_normal_neg_corrected<M>(f: &[MInt<M>], g: &[MInt<M>], m: u32) -> Vec<MInt<M>>
1218        where
1219            M: Montgomery32NttModulus,
1220        {
1221            let n = f.len();
1222            assert_eq!(f.len(), g.len());
1223            assert!(f.len().is_power_of_two());
1224            assert!(f.len() >= 2);
1225            let inv2 = MInt::<M>::from(2).inv();
1226            let u = MInt::<M>::new(m) * MInt::<M>::from(n as u32);
1227            let n = f.len() / 2;
1228            (0..n)
1229                .map(|i| {
1230                    (f[i << 1]
1231                        * if i == 0 {
1232                            g[i << 1 | 1] + u
1233                        } else {
1234                            g[i << 1 | 1]
1235                        }
1236                        + f[i << 1 | 1] * g[i << 1])
1237                        * inv2
1238                })
1239                .collect()
1240        }
1241
1242        let m = M::mod_into();
1243        (
1244            even_mul_normal_neg_corrected(&f.0, &g.0, m),
1245            even_mul_normal_neg_corrected(&f.1, &g.1, m),
1246            even_mul_normal_neg_corrected(&f.2, &g.2, m),
1247        )
1248    }
1249
1250    fn odd_mul_normal_neg(f: &Self::F, g: &Self::F) -> Self::F {
1251        fn odd_mul_normal_neg_corrected<M>(f: &[MInt<M>], g: &[MInt<M>], m: u32) -> Vec<MInt<M>>
1252        where
1253            M: Montgomery32NttModulus,
1254        {
1255            assert_eq!(f.len(), g.len());
1256            assert!(f.len().is_power_of_two());
1257            assert!(f.len() >= 2);
1258            let mut inv2 = MInt::<M>::from(2).inv();
1259            let u = MInt::<M>::new(m) * MInt::<M>::from(f.len() as u32);
1260            let n = f.len() / 2;
1261            let k = f.len().trailing_zeros() as usize;
1262            let mut h = vec![MInt::<M>::zero(); n];
1263            let w = MInt::<M>::new_unchecked(M::INFO.inv_root[k]);
1264            BIT_REVERSE.with(|br| {
1265                let br = unsafe { &mut *br.get() };
1266                if br.len() < k {
1267                    br.resize_with(k, Default::default);
1268                }
1269                let k = k - 1;
1270                if br[k].is_empty() {
1271                    let mut v = vec![0; 1 << k];
1272                    for i in 0..1 << k {
1273                        v[i] = (v[i >> 1] >> 1) | ((i & 1) << (k.saturating_sub(1)));
1274                    }
1275                    br[k] = v;
1276                }
1277                for &i in &br[k] {
1278                    h[i] = (f[i << 1]
1279                        * if i == 0 {
1280                            g[i << 1 | 1] + u
1281                        } else {
1282                            g[i << 1 | 1]
1283                        }
1284                        - f[i << 1 | 1] * g[i << 1])
1285                        * inv2;
1286                    inv2 *= w;
1287                }
1288            });
1289            h
1290        }
More examples
Hide additional examples
crates/competitive/src/math/factorial.rs (line 22)
16    pub fn new(max_n: usize) -> Self {
17        let mut fact = vec![MInt::one(); max_n + 1];
18        let mut inv_fact = vec![MInt::one(); max_n + 1];
19        for i in 2..=max_n {
20            fact[i] = fact[i - 1] * MInt::from(i);
21        }
22        inv_fact[max_n] = fact[max_n].inv();
23        for i in (3..=max_n).rev() {
24            inv_fact[i - 1] = inv_fact[i] * MInt::from(i);
25        }
26        Self { fact, inv_fact }
27    }
crates/competitive/src/math/black_box_matrix.rs (line 290)
279    fn black_box_linear_equation(&self, mut b: Vec<MInt<M>>) -> Option<Vec<MInt<M>>>
280    where
281        M: MIntConvert<u64>,
282    {
283        assert_eq!(self.shape().0, self.shape().1);
284        assert_eq!(self.shape().1, b.len());
285        let n = self.shape().0;
286        let p = self.minimal_polynomial();
287        if p.is_empty() || p[0].is_zero() {
288            return None;
289        }
290        let p0_inv = p[0].inv();
291        let mut x = vec![MInt::zero(); n];
292        for p in p.into_iter().skip(1) {
293            let p = -p * p0_inv;
294            for i in 0..n {
295                x[i] += p * b[i];
296            }
297            b = self.apply(&b);
298        }
299        Some(x)
300    }
crates/library_checker/src/number_theory/sum_of_totient_function.rs (line 19)
12pub fn sum_of_totient_function(reader: impl Read, mut writer: impl Write) {
13    let s = read_all_unchecked(reader);
14    let mut scanner = Scanner::new(&s);
15    scan!(scanner, n: u64);
16    let mut s = 1;
17    let mut pp = 0;
18    let mut pc = 0;
19    let inv2 = M::new(2).inv();
20    let qa = QuotientArray::from_fn(n, |i| [M::from(i), M::from(i) * M::from(i + 1) * inv2])
21        .map(|[x, y]| [x - M::one(), y - M::one()])
22        .lucy_dp::<ArrayOperation<AdditiveOperation<_>, 2>>(|[x, y], p| [x, y * M::from(p)])
23        .map(|[x, y]| y - x)
24        .min_25_sieve::<AddMulOperation<_>>(|p, c| {
25            if pp != p || pc > c {
26                pp = p;
27                pc = 1;
28                s = p - 1;
29            }
30            while pc < c {
31                pc += 1;
32                s *= p;
33            }
34            M::from(s)
35        });
36    writeln!(writer, "{}", qa[n]).ok();
37}
crates/competitive/src/math/lagrange_interpolation.rs (line 78)
50pub fn lagrange_interpolation_polynomial<M>(x: &[MInt<M>], y: &[MInt<M>]) -> Vec<MInt<M>>
51where
52    M: MIntBase,
53{
54    let n = x.len() - 1;
55    let mut dp = vec![MInt::zero(); n + 2];
56    let mut ndp = vec![MInt::zero(); n + 2];
57    dp[0] = -x[0];
58    dp[1] = MInt::one();
59    for x in x.iter().skip(1) {
60        for j in 0..=n + 1 {
61            ndp[j] = -dp[j] * x + if j >= 1 { dp[j - 1] } else { MInt::zero() };
62        }
63        std::mem::swap(&mut dp, &mut ndp);
64    }
65    let mut res = vec![MInt::zero(); n + 1];
66    for i in 0..=n {
67        let t = y[i]
68            / (0..=n)
69                .map(|j| if i != j { x[i] - x[j] } else { MInt::one() })
70                .product::<MInt<M>>();
71        if t.is_zero() {
72            continue;
73        } else if x[i].is_zero() {
74            for j in 0..=n {
75                res[j] += dp[j + 1] * t;
76            }
77        } else {
78            let xinv = x[i].inv();
79            let mut pre = MInt::zero();
80            for j in 0..=n {
81                let d = -(dp[j] - pre) * xinv;
82                res[j] += d * t;
83                pre = d;
84            }
85        }
86    }
87    res
88}
crates/competitive/src/math/mint_matrix.rs (line 55)
41    fn determinant_linear_non_singular(mut self, mut other: Self) -> Option<Vec<MInt<M>>>
42    where
43        M: MIntBase,
44    {
45        let n = self.data.len();
46        let mut f = MInt::one();
47        for d in 0..n {
48            let i = other.data.iter().position(|other| !other[d].is_zero())?;
49            if i != d {
50                self.data.swap(i, d);
51                other.data.swap(i, d);
52                f = -f;
53            }
54            f *= other[d][d];
55            let r = other[d][d].inv();
56            for j in 0..n {
57                self[d][j] *= r;
58                other[d][j] *= r;
59            }
60            assert!(other[d][d].is_one());
61            for i in d + 1..n {
62                let a = other[i][d];
63                for k in 0..n {
64                    self[i][k] = self[i][k] - a * self[d][k];
65                    other[i][k] = other[i][k] - a * other[d][k];
66                }
67            }
68            for j in d + 1..n {
69                let a = other[d][j];
70                for k in 0..n {
71                    self[k][j] = self[k][j] - a * self[k][d];
72                    other[k][j] = other[k][j] - a * other[k][d];
73                }
74            }
75        }
76        for s in self.data.iter_mut() {
77            for s in s.iter_mut() {
78                *s = -*s;
79            }
80        }
81        let mut p = self.characteristic_polynomial();
82        for p in p.iter_mut() {
83            *p *= f;
84        }
85        Some(p)
86    }
Source

pub fn inner(self) -> M::Inner

Examples found in repository?
crates/competitive/src/num/mint/mint_base.rs (line 107)
106    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
107        Debug::fmt(&self.inner(), f)
108    }
109}
110impl<M> Default for MInt<M>
111where
112    M: MIntBase,
113{
114    #[inline]
115    fn default() -> Self {
116        <Self as Zero>::zero()
117    }
118}
119impl<M> PartialEq for MInt<M>
120where
121    M: MIntBase,
122{
123    #[inline]
124    fn eq(&self, other: &Self) -> bool {
125        PartialEq::eq(&self.x, &other.x)
126    }
127}
128impl<M> Eq for MInt<M> where M: MIntBase {}
129impl<M> Hash for MInt<M>
130where
131    M: MIntBase,
132{
133    #[inline]
134    fn hash<H: Hasher>(&self, state: &mut H) {
135        Hash::hash(&self.x, state)
136    }
137}
138macro_rules! impl_mint_from {
139    ($($t:ty),*) => {
140        $(impl<M> From<$t> for MInt<M>
141        where
142            M: MIntConvert<$t>,
143        {
144            #[inline]
145            fn from(x: $t) -> Self {
146                Self::new_unchecked(<M as MIntConvert<$t>>::from(x))
147            }
148        }
149        impl<M> From<MInt<M>> for $t
150        where
151            M: MIntConvert<$t>,
152        {
153            #[inline]
154            fn from(x: MInt<M>) -> $t {
155                <M as MIntConvert<$t>>::into(x.x)
156            }
157        })*
158    };
159}
160impl_mint_from!(
161    u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize
162);
163impl<M> Zero for MInt<M>
164where
165    M: MIntBase,
166{
167    #[inline]
168    fn zero() -> Self {
169        Self::new_unchecked(M::mod_zero())
170    }
171}
172impl<M> One for MInt<M>
173where
174    M: MIntBase,
175{
176    #[inline]
177    fn one() -> Self {
178        Self::new_unchecked(M::mod_one())
179    }
180}
181
182impl<M> Add for MInt<M>
183where
184    M: MIntBase,
185{
186    type Output = Self;
187    #[inline]
188    fn add(self, rhs: Self) -> Self::Output {
189        Self::new_unchecked(M::mod_add(self.x, rhs.x))
190    }
191}
192impl<M> Sub for MInt<M>
193where
194    M: MIntBase,
195{
196    type Output = Self;
197    #[inline]
198    fn sub(self, rhs: Self) -> Self::Output {
199        Self::new_unchecked(M::mod_sub(self.x, rhs.x))
200    }
201}
202impl<M> Mul for MInt<M>
203where
204    M: MIntBase,
205{
206    type Output = Self;
207    #[inline]
208    fn mul(self, rhs: Self) -> Self::Output {
209        Self::new_unchecked(M::mod_mul(self.x, rhs.x))
210    }
211}
212impl<M> Div for MInt<M>
213where
214    M: MIntBase,
215{
216    type Output = Self;
217    #[inline]
218    fn div(self, rhs: Self) -> Self::Output {
219        Self::new_unchecked(M::mod_div(self.x, rhs.x))
220    }
221}
222impl<M> Neg for MInt<M>
223where
224    M: MIntBase,
225{
226    type Output = Self;
227    #[inline]
228    fn neg(self) -> Self::Output {
229        Self::new_unchecked(M::mod_neg(self.x))
230    }
231}
232impl<M> Sum for MInt<M>
233where
234    M: MIntBase,
235{
236    #[inline]
237    fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
238        iter.fold(<Self as Zero>::zero(), Add::add)
239    }
240}
241impl<M> Product for MInt<M>
242where
243    M: MIntBase,
244{
245    #[inline]
246    fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
247        iter.fold(<Self as One>::one(), Mul::mul)
248    }
249}
250impl<'a, M: 'a> Sum<&'a MInt<M>> for MInt<M>
251where
252    M: MIntBase,
253{
254    #[inline]
255    fn sum<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
256        iter.fold(<Self as Zero>::zero(), Add::add)
257    }
258}
259impl<'a, M: 'a> Product<&'a MInt<M>> for MInt<M>
260where
261    M: MIntBase,
262{
263    #[inline]
264    fn product<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
265        iter.fold(<Self as One>::one(), Mul::mul)
266    }
267}
268impl<M> Display for MInt<M>
269where
270    M: MIntBase<Inner: Display>,
271{
272    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
273        write!(f, "{}", self.inner())
274    }
275}
276impl<M> FromStr for MInt<M>
277where
278    M: MIntConvert + MIntBase<Inner: FromStr>,
279{
280    type Err = <M::Inner as FromStr>::Err;
281    #[inline]
282    fn from_str(s: &str) -> Result<Self, Self::Err> {
283        s.parse::<M::Inner>().map(Self::new)
284    }
285}
286impl<M> IterScan for MInt<M>
287where
288    M: MIntConvert + MIntBase<Inner: FromStr>,
289{
290    type Output = Self;
291    #[inline]
292    fn scan<'a, I: Iterator<Item = &'a str>>(iter: &mut I) -> Option<Self::Output> {
293        iter.next()?.parse::<MInt<M>>().ok()
294    }
295}
296impl<M> SerdeByteStr for MInt<M>
297where
298    M: MIntBase<Inner: SerdeByteStr>,
299{
300    fn serialize(&self, buf: &mut Vec<u8>) {
301        self.inner().serialize(buf)
302    }
More examples
Hide additional examples
crates/competitive/src/math/number_theoretic_transform.rs (line 978)
970    fn transform(t: Self::T, len: usize) -> Self::F {
971        let npot = len.max(1).next_power_of_two();
972        let mut f = (
973            MVec::<N1>::with_capacity(npot),
974            MVec::<N2>::with_capacity(npot),
975            MVec::<N3>::with_capacity(npot),
976        );
977        for t in t {
978            f.0.push(<M as MIntConvert<u32>>::into(t.inner()).into());
979            f.1.push(<M as MIntConvert<u32>>::into(t.inner()).into());
980            f.2.push(<M as MIntConvert<u32>>::into(t.inner()).into());
981        }
982        f.0.resize_with(npot, Zero::zero);
983        f.1.resize_with(npot, Zero::zero);
984        f.2.resize_with(npot, Zero::zero);
985        ntt(&mut f.0);
986        ntt(&mut f.1);
987        ntt(&mut f.2);
988        f
989    }
990    fn inverse_transform(f: Self::F, len: usize) -> Self::T {
991        let t1 = MInt::<N2>::new(N1::get_mod()).inv();
992        let m1 = MInt::<M>::from(N1::get_mod());
993        let m1_3 = MInt::<N3>::new(N1::get_mod());
994        let t2 = (m1_3 * MInt::<N3>::new(N2::get_mod())).inv();
995        let m2 = m1 * MInt::<M>::from(N2::get_mod());
996        Convolve::<N1>::inverse_transform(f.0, len)
997            .into_iter()
998            .zip(Convolve::<N2>::inverse_transform(f.1, len))
999            .zip(Convolve::<N3>::inverse_transform(f.2, len))
1000            .map(|((c1, c2), c3)| {
1001                let d1 = c1.inner();
1002                let d2 = ((c2 - MInt::<N2>::from(d1)) * t1).inner();
1003                let x = MInt::<N3>::new(d1) + MInt::<N3>::new(d2) * m1_3;
1004                let d3 = ((c3 - x) * t2).inner();
1005                MInt::<M>::from(d1) + MInt::<M>::from(d2) * m1 + MInt::<M>::from(d3) * m2
1006            })
1007            .collect()
1008    }
1009    fn multiply(f: &mut Self::F, g: &Self::F) {
1010        assert_eq!(f.0.len(), g.0.len());
1011        assert_eq!(f.1.len(), g.1.len());
1012        assert_eq!(f.2.len(), g.2.len());
1013        for (f, g) in f.0.iter_mut().zip(g.0.iter()) {
1014            *f *= *g;
1015        }
1016        for (f, g) in f.1.iter_mut().zip(g.1.iter()) {
1017            *f *= *g;
1018        }
1019        for (f, g) in f.2.iter_mut().zip(g.2.iter()) {
1020            *f *= *g;
1021        }
1022    }
1023    fn convolve(a: Self::T, b: Self::T) -> Self::T {
1024        if Self::length(&a).max(Self::length(&b)) <= 300 {
1025            return convolve_karatsuba(&a, &b);
1026        }
1027        if Self::length(&a).min(Self::length(&b)) <= 60 {
1028            return convolve_naive(&a, &b);
1029        }
1030        let len = (Self::length(&a) + Self::length(&b)).saturating_sub(1);
1031        let mut a = Self::transform(a, len);
1032        let b = Self::transform(b, len);
1033        Self::multiply(&mut a, &b);
1034        Self::inverse_transform(a, len)
1035    }
1036}
1037
1038impl<N1, N2, N3> ConvolveSteps for Convolve<(u64, (N1, N2, N3))>
1039where
1040    N1: Montgomery32NttModulus,
1041    N2: Montgomery32NttModulus,
1042    N3: Montgomery32NttModulus,
1043{
1044    type T = Vec<u64>;
1045    type F = (MVec<N1>, MVec<N2>, MVec<N3>);
1046
1047    fn length(t: &Self::T) -> usize {
1048        t.len()
1049    }
1050
1051    fn transform(t: Self::T, len: usize) -> Self::F {
1052        let npot = len.max(1).next_power_of_two();
1053        let mut f = (
1054            MVec::<N1>::with_capacity(npot),
1055            MVec::<N2>::with_capacity(npot),
1056            MVec::<N3>::with_capacity(npot),
1057        );
1058        for t in t {
1059            f.0.push(t.into());
1060            f.1.push(t.into());
1061            f.2.push(t.into());
1062        }
1063        f.0.resize_with(npot, Zero::zero);
1064        f.1.resize_with(npot, Zero::zero);
1065        f.2.resize_with(npot, Zero::zero);
1066        ntt(&mut f.0);
1067        ntt(&mut f.1);
1068        ntt(&mut f.2);
1069        f
1070    }
1071
1072    fn inverse_transform(f: Self::F, len: usize) -> Self::T {
1073        let t1 = MInt::<N2>::new(N1::get_mod()).inv();
1074        let m1 = N1::get_mod() as u64;
1075        let m1_3 = MInt::<N3>::new(N1::get_mod());
1076        let t2 = (m1_3 * MInt::<N3>::new(N2::get_mod())).inv();
1077        let m2 = m1 * N2::get_mod() as u64;
1078        Convolve::<N1>::inverse_transform(f.0, len)
1079            .into_iter()
1080            .zip(Convolve::<N2>::inverse_transform(f.1, len))
1081            .zip(Convolve::<N3>::inverse_transform(f.2, len))
1082            .map(|((c1, c2), c3)| {
1083                let d1 = c1.inner();
1084                let d2 = ((c2 - MInt::<N2>::from(d1)) * t1).inner();
1085                let x = MInt::<N3>::new(d1) + MInt::<N3>::new(d2) * m1_3;
1086                let d3 = ((c3 - x) * t2).inner();
1087                d1 as u64 + d2 as u64 * m1 + d3 as u64 * m2
1088            })
1089            .collect()
1090    }
Source§

impl MInt<DynModuloU32>

Source

pub fn set_mod(m: u32)

Examples found in repository?
crates/library_checker/src/number_theory/sqrt_mod.rs (line 11)
6pub fn sqrt_mod(reader: impl Read, mut writer: impl Write) {
7    let s = read_all_unchecked(reader);
8    let mut scanner = Scanner::new(&s);
9    scan!(scanner, q, yp: [(u32, u32)]);
10    for (y, p) in yp.take(q) {
11        DynMIntU32::set_mod(p);
12        if let Some(x) = DynMIntU32::from(y).sqrt() {
13            writeln!(writer, "{}", x).ok();
14        } else {
15            writeln!(writer, "-1").ok();
16        }
17    }
18}
More examples
Hide additional examples
crates/library_checker/src/enumerative_combinatorics/binomial_coefficient_prime_mod.rs (line 10)
6pub fn binomial_coefficient_prime_mod(reader: impl Read, mut writer: impl Write) {
7    let s = read_all_unchecked(reader);
8    let mut scanner = Scanner::new(&s);
9    scan!(scanner, t, m: u32, nk: [(usize, usize); t]);
10    DynMIntU32::set_mod(m);
11    let max_n = nk.iter().map(|(n, _)| n).max().cloned().unwrap_or_default();
12    let f = MemorizedFactorial::new(max_n);
13    for (n, k) in nk {
14        let ans: DynMIntU32 = f.combination(n, k);
15        iter_print!(writer, ans);
16    }
17}
Source§

impl MInt<DynModuloU64>

Source

pub fn set_mod(m: u64)

Trait Implementations§

Source§

impl<M> Add<&MInt<M>> for &MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Add>::Output

The resulting type after applying the + operator.
Source§

fn add(self, other: &MInt<M>) -> <MInt<M> as Add<MInt<M>>>::Output

Performs the + operation. Read more
Source§

impl<M> Add<&MInt<M>> for MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Add>::Output

The resulting type after applying the + operator.
Source§

fn add(self, other: &MInt<M>) -> <MInt<M> as Add<MInt<M>>>::Output

Performs the + operation. Read more
Source§

impl<M> Add<MInt<M>> for &MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Add>::Output

The resulting type after applying the + operator.
Source§

fn add(self, other: MInt<M>) -> <MInt<M> as Add<MInt<M>>>::Output

Performs the + operation. Read more
Source§

impl<M> Add for MInt<M>
where M: MIntBase,

Source§

type Output = MInt<M>

The resulting type after applying the + operator.
Source§

fn add(self, rhs: Self) -> Self::Output

Performs the + operation. Read more
Source§

impl<M> AddAssign<&MInt<M>> for MInt<M>
where M: MIntBase,

Source§

fn add_assign(&mut self, other: &MInt<M>)

Performs the += operation. Read more
Source§

impl<M> AddAssign for MInt<M>
where M: MIntBase,

Source§

fn add_assign(&mut self, rhs: MInt<M>)

Performs the += operation. Read more
Source§

impl<M> Clone for MInt<M>
where M: MIntBase,

Source§

fn clone(&self) -> Self

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl<M> Debug for MInt<M>
where M: MIntBase,

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl<M> Default for MInt<M>
where M: MIntBase,

Source§

fn default() -> Self

Returns the “default value” for a type. Read more
Source§

impl<M> Display for MInt<M>
where M: MIntBase<Inner: Display>,

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error>

Formats the value using the given formatter. Read more
Source§

impl<M> Div<&MInt<M>> for &MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Div>::Output

The resulting type after applying the / operator.
Source§

fn div(self, other: &MInt<M>) -> <MInt<M> as Div<MInt<M>>>::Output

Performs the / operation. Read more
Source§

impl<M> Div<&MInt<M>> for MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Div>::Output

The resulting type after applying the / operator.
Source§

fn div(self, other: &MInt<M>) -> <MInt<M> as Div<MInt<M>>>::Output

Performs the / operation. Read more
Source§

impl<M> Div<MInt<M>> for &MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Div>::Output

The resulting type after applying the / operator.
Source§

fn div(self, other: MInt<M>) -> <MInt<M> as Div<MInt<M>>>::Output

Performs the / operation. Read more
Source§

impl<M> Div for MInt<M>
where M: MIntBase,

Source§

type Output = MInt<M>

The resulting type after applying the / operator.
Source§

fn div(self, rhs: Self) -> Self::Output

Performs the / operation. Read more
Source§

impl<M> DivAssign<&MInt<M>> for MInt<M>
where M: MIntBase,

Source§

fn div_assign(&mut self, other: &MInt<M>)

Performs the /= operation. Read more
Source§

impl<M> DivAssign for MInt<M>
where M: MIntBase,

Source§

fn div_assign(&mut self, rhs: MInt<M>)

Performs the /= operation. Read more
Source§

impl<M> FormalPowerSeriesCoefficient for MInt<M>

Source§

type Base = M

Source§

fn pow(self, exp: usize) -> Self

Source§

fn memorized_fact(mf: &MemorizedFactorial<Self::Base>) -> &[Self]

Source§

fn memorized_inv_fact(mf: &MemorizedFactorial<Self::Base>) -> &[Self]

Source§

fn memorized_inv(mf: &MemorizedFactorial<Self::Base>, n: usize) -> Self

Source§

fn signed_pow(self, exp: isize) -> Self

Source§

fn memorized_factorial(n: usize) -> MemorizedFactorial<Self::Base>

Source§

impl<M> FormalPowerSeriesCoefficientSqrt for MInt<M>

Source§

fn sqrt_coefficient(&self) -> Option<Self>

Source§

impl<M> From<MInt<M>> for i128
where M: MIntConvert<i128>,

Source§

fn from(x: MInt<M>) -> i128

Converts to this type from the input type.
Source§

impl<M> From<MInt<M>> for i16
where M: MIntConvert<i16>,

Source§

fn from(x: MInt<M>) -> i16

Converts to this type from the input type.
Source§

impl<M> From<MInt<M>> for i32
where M: MIntConvert<i32>,

Source§

fn from(x: MInt<M>) -> i32

Converts to this type from the input type.
Source§

impl<M> From<MInt<M>> for i64
where M: MIntConvert<i64>,

Source§

fn from(x: MInt<M>) -> i64

Converts to this type from the input type.
Source§

impl<M> From<MInt<M>> for i8
where M: MIntConvert<i8>,

Source§

fn from(x: MInt<M>) -> i8

Converts to this type from the input type.
Source§

impl<M> From<MInt<M>> for isize
where M: MIntConvert<isize>,

Source§

fn from(x: MInt<M>) -> isize

Converts to this type from the input type.
Source§

impl<M> From<MInt<M>> for u128
where M: MIntConvert<u128>,

Source§

fn from(x: MInt<M>) -> u128

Converts to this type from the input type.
Source§

impl<M> From<MInt<M>> for u16
where M: MIntConvert<u16>,

Source§

fn from(x: MInt<M>) -> u16

Converts to this type from the input type.
Source§

impl<M> From<MInt<M>> for u32
where M: MIntConvert<u32>,

Source§

fn from(x: MInt<M>) -> u32

Converts to this type from the input type.
Source§

impl<M> From<MInt<M>> for u64
where M: MIntConvert<u64>,

Source§

fn from(x: MInt<M>) -> u64

Converts to this type from the input type.
Source§

impl<M> From<MInt<M>> for u8
where M: MIntConvert<u8>,

Source§

fn from(x: MInt<M>) -> u8

Converts to this type from the input type.
Source§

impl<M> From<MInt<M>> for usize
where M: MIntConvert<usize>,

Source§

fn from(x: MInt<M>) -> usize

Converts to this type from the input type.
Source§

impl<M> From<i128> for MInt<M>
where M: MIntConvert<i128>,

Source§

fn from(x: i128) -> Self

Converts to this type from the input type.
Source§

impl<M> From<i16> for MInt<M>
where M: MIntConvert<i16>,

Source§

fn from(x: i16) -> Self

Converts to this type from the input type.
Source§

impl<M> From<i32> for MInt<M>
where M: MIntConvert<i32>,

Source§

fn from(x: i32) -> Self

Converts to this type from the input type.
Source§

impl<M> From<i64> for MInt<M>
where M: MIntConvert<i64>,

Source§

fn from(x: i64) -> Self

Converts to this type from the input type.
Source§

impl<M> From<i8> for MInt<M>
where M: MIntConvert<i8>,

Source§

fn from(x: i8) -> Self

Converts to this type from the input type.
Source§

impl<M> From<isize> for MInt<M>
where M: MIntConvert<isize>,

Source§

fn from(x: isize) -> Self

Converts to this type from the input type.
Source§

impl<M> From<u128> for MInt<M>
where M: MIntConvert<u128>,

Source§

fn from(x: u128) -> Self

Converts to this type from the input type.
Source§

impl<M> From<u16> for MInt<M>
where M: MIntConvert<u16>,

Source§

fn from(x: u16) -> Self

Converts to this type from the input type.
Source§

impl<M> From<u32> for MInt<M>
where M: MIntConvert<u32>,

Source§

fn from(x: u32) -> Self

Converts to this type from the input type.
Source§

impl<M> From<u64> for MInt<M>
where M: MIntConvert<u64>,

Source§

fn from(x: u64) -> Self

Converts to this type from the input type.
Source§

impl<M> From<u8> for MInt<M>
where M: MIntConvert<u8>,

Source§

fn from(x: u8) -> Self

Converts to this type from the input type.
Source§

impl<M> From<usize> for MInt<M>
where M: MIntConvert<usize>,

Source§

fn from(x: usize) -> Self

Converts to this type from the input type.
Source§

impl<M> FromStr for MInt<M>
where M: MIntConvert + MIntBase<Inner: FromStr>,

Source§

type Err = <<M as MIntBase>::Inner as FromStr>::Err

The associated error which can be returned from parsing.
Source§

fn from_str(s: &str) -> Result<Self, Self::Err>

Parses a string s to return a value of this type. Read more
Source§

impl<M> Hash for MInt<M>
where M: MIntBase,

Source§

fn hash<H: Hasher>(&self, state: &mut H)

Feeds this value into the given Hasher. Read more
1.3.0 · Source§

fn hash_slice<H>(data: &[Self], state: &mut H)
where H: Hasher, Self: Sized,

Feeds a slice of this type into the given Hasher. Read more
Source§

impl<M> IterScan for MInt<M>
where M: MIntConvert + MIntBase<Inner: FromStr>,

Source§

type Output = MInt<M>

Source§

fn scan<'a, I: Iterator<Item = &'a str>>(iter: &mut I) -> Option<Self::Output>

Source§

impl<M> Mul<&MInt<M>> for &MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Mul>::Output

The resulting type after applying the * operator.
Source§

fn mul(self, other: &MInt<M>) -> <MInt<M> as Mul<MInt<M>>>::Output

Performs the * operation. Read more
Source§

impl<M> Mul<&MInt<M>> for MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Mul>::Output

The resulting type after applying the * operator.
Source§

fn mul(self, other: &MInt<M>) -> <MInt<M> as Mul<MInt<M>>>::Output

Performs the * operation. Read more
Source§

impl<M> Mul<MInt<M>> for &MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Mul>::Output

The resulting type after applying the * operator.
Source§

fn mul(self, other: MInt<M>) -> <MInt<M> as Mul<MInt<M>>>::Output

Performs the * operation. Read more
Source§

impl<M> Mul for MInt<M>
where M: MIntBase,

Source§

type Output = MInt<M>

The resulting type after applying the * operator.
Source§

fn mul(self, rhs: Self) -> Self::Output

Performs the * operation. Read more
Source§

impl<M> MulAssign<&MInt<M>> for MInt<M>
where M: MIntBase,

Source§

fn mul_assign(&mut self, other: &MInt<M>)

Performs the *= operation. Read more
Source§

impl<M> MulAssign for MInt<M>
where M: MIntBase,

Source§

fn mul_assign(&mut self, rhs: MInt<M>)

Performs the *= operation. Read more
Source§

impl<M> Neg for &MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Neg>::Output

The resulting type after applying the - operator.
Source§

fn neg(self) -> <MInt<M> as Neg>::Output

Performs the unary - operation. Read more
Source§

impl<M> Neg for MInt<M>
where M: MIntBase,

Source§

type Output = MInt<M>

The resulting type after applying the - operator.
Source§

fn neg(self) -> Self::Output

Performs the unary - operation. Read more
Source§

impl<M> One for MInt<M>
where M: MIntBase,

Source§

fn one() -> Self

Source§

fn is_one(&self) -> bool
where Self: PartialEq,

Source§

fn set_one(&mut self)

Source§

impl<M> PartialEq for MInt<M>
where M: MIntBase,

Source§

fn eq(&self, other: &Self) -> bool

Tests for self and other values to be equal, and is used by ==.
1.0.0 · Source§

fn ne(&self, other: &Rhs) -> bool

Tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
Source§

impl<'a, M> Product<&'a MInt<M>> for MInt<M>
where M: MIntBase + 'a,

Source§

fn product<I: Iterator<Item = &'a Self>>(iter: I) -> Self

Takes an iterator and generates Self from the elements by multiplying the items.
Source§

impl<M> Product for MInt<M>
where M: MIntBase,

Source§

fn product<I: Iterator<Item = Self>>(iter: I) -> Self

Takes an iterator and generates Self from the elements by multiplying the items.
Source§

impl<M> RandomSpec<MInt<M>> for RangeFull
where M: MIntBase, RangeTo<M::Inner>: RandomSpec<M::Inner>,

Source§

fn rand(&self, rng: &mut Xorshift) -> MInt<M>

Return a random value.
Source§

fn rand_iter(self, rng: &mut Xorshift) -> RandIter<'_, T, Self>

Return an iterator that generates random values.
Source§

impl<M> SerdeByteStr for MInt<M>
where M: MIntBase<Inner: SerdeByteStr>,

Source§

fn serialize(&self, buf: &mut Vec<u8>)

Source§

fn deserialize<I>(iter: &mut I) -> Self
where I: Iterator<Item = u8>,

Source§

fn serialize_bytestr(&self) -> String

Source§

fn deserialize_from_bytes(bytes: &[u8]) -> Self
where Self: Sized,

Source§

impl<M> Sub<&MInt<M>> for &MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Sub>::Output

The resulting type after applying the - operator.
Source§

fn sub(self, other: &MInt<M>) -> <MInt<M> as Sub<MInt<M>>>::Output

Performs the - operation. Read more
Source§

impl<M> Sub<&MInt<M>> for MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Sub>::Output

The resulting type after applying the - operator.
Source§

fn sub(self, other: &MInt<M>) -> <MInt<M> as Sub<MInt<M>>>::Output

Performs the - operation. Read more
Source§

impl<M> Sub<MInt<M>> for &MInt<M>
where M: MIntBase,

Source§

type Output = <MInt<M> as Sub>::Output

The resulting type after applying the - operator.
Source§

fn sub(self, other: MInt<M>) -> <MInt<M> as Sub<MInt<M>>>::Output

Performs the - operation. Read more
Source§

impl<M> Sub for MInt<M>
where M: MIntBase,

Source§

type Output = MInt<M>

The resulting type after applying the - operator.
Source§

fn sub(self, rhs: Self) -> Self::Output

Performs the - operation. Read more
Source§

impl<M> SubAssign<&MInt<M>> for MInt<M>
where M: MIntBase,

Source§

fn sub_assign(&mut self, other: &MInt<M>)

Performs the -= operation. Read more
Source§

impl<M> SubAssign for MInt<M>
where M: MIntBase,

Source§

fn sub_assign(&mut self, rhs: MInt<M>)

Performs the -= operation. Read more
Source§

impl<'a, M> Sum<&'a MInt<M>> for MInt<M>
where M: MIntBase + 'a,

Source§

fn sum<I: Iterator<Item = &'a Self>>(iter: I) -> Self

Takes an iterator and generates Self from the elements by “summing up” the items.
Source§

impl<M> Sum for MInt<M>
where M: MIntBase,

Source§

fn sum<I: Iterator<Item = Self>>(iter: I) -> Self

Takes an iterator and generates Self from the elements by “summing up” the items.
Source§

impl<M> Zero for MInt<M>
where M: MIntBase,

Source§

fn zero() -> Self

Source§

fn is_zero(&self) -> bool
where Self: PartialEq,

Source§

fn set_zero(&mut self)

Source§

impl<M> Copy for MInt<M>
where M: MIntBase,

Source§

impl<M> Eq for MInt<M>
where M: MIntBase,

Auto Trait Implementations§

§

impl<M> Freeze for MInt<M>
where <M as MIntBase>::Inner: Freeze,

§

impl<M> RefUnwindSafe for MInt<M>
where <M as MIntBase>::Inner: RefUnwindSafe,

§

impl<M> Send for MInt<M>
where <M as MIntBase>::Inner: Send,

§

impl<M> Sync for MInt<M>
where <M as MIntBase>::Inner: Sync,

§

impl<M> Unpin for MInt<M>
where <M as MIntBase>::Inner: Unpin,

§

impl<M> UnsafeUnpin for MInt<M>
where <M as MIntBase>::Inner: UnsafeUnpin,

§

impl<M> UnwindSafe for MInt<M>
where <M as MIntBase>::Inner: UnwindSafe,

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> ToArrayVecScalar for T

Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T> ToString for T
where T: Display + ?Sized,

Source§

fn to_string(&self) -> String

Converts the given value to a String. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.