@@ -1954,16 +1954,16 @@ static inline uint8_t lean_int8_xor(uint8_t a1, uint8_t a2) {
1954
1954
1955
1955
static inline uint8_t lean_int8_shift_right (uint8_t a1, uint8_t a2) {
1956
1956
int8_t lhs = (int8_t )a1;
1957
- int8_t rhs = (int8_t )a2;
1957
+ int8_t rhs = ((( int8_t )a2 % 8 ) + 8 ) % 8 ; // this is smod 8
1958
1958
1959
- return (uint8_t )(lhs >> ( rhs % 8 ) );
1959
+ return (uint8_t )(lhs >> rhs);
1960
1960
}
1961
1961
1962
1962
static inline uint8_t lean_int8_shift_left (uint8_t a1, uint8_t a2) {
1963
1963
int8_t lhs = (int8_t )a1;
1964
- int8_t rhs = (int8_t )a2;
1964
+ int8_t rhs = ((( int8_t )a2 % 8 ) + 8 ) % 8 ; // this is smod 8
1965
1965
1966
- return (uint8_t )(lhs << ( rhs % 8 ) );
1966
+ return (uint8_t )(lhs << rhs);
1967
1967
}
1968
1968
1969
1969
static inline uint8_t lean_int8_complement (uint8_t a) {
@@ -2094,16 +2094,16 @@ static inline uint16_t lean_int16_xor(uint16_t a1, uint16_t a2) {
2094
2094
2095
2095
static inline uint16_t lean_int16_shift_right (uint16_t a1, uint16_t a2) {
2096
2096
int16_t lhs = (int16_t )a1;
2097
- int16_t rhs = (int16_t )a2;
2097
+ int16_t rhs = ((( int16_t )a2 % 16 ) + 16 ) % 16 ; // this is smod 16
2098
2098
2099
- return (uint16_t )(lhs >> ( rhs % 16 ) );
2099
+ return (uint16_t )(lhs >> rhs);
2100
2100
}
2101
2101
2102
2102
static inline uint16_t lean_int16_shift_left (uint16_t a1, uint16_t a2) {
2103
2103
int16_t lhs = (int16_t )a1;
2104
- int16_t rhs = (int16_t )a2;
2104
+ int16_t rhs = ((( int16_t )a2 % 16 ) + 16 ) % 16 ; // this is smod 16
2105
2105
2106
- return (uint16_t )(lhs << ( rhs % 16 ) );
2106
+ return (uint16_t )(lhs << rhs);
2107
2107
}
2108
2108
2109
2109
static inline uint16_t lean_int16_complement (uint16_t a) {
@@ -2233,16 +2233,16 @@ static inline uint32_t lean_int32_xor(uint32_t a1, uint32_t a2) {
2233
2233
2234
2234
static inline uint32_t lean_int32_shift_right (uint32_t a1, uint32_t a2) {
2235
2235
int32_t lhs = (int32_t )a1;
2236
- int32_t rhs = (int32_t )a2;
2236
+ int32_t rhs = ((( int32_t )a2 % 32 ) + 32 ) % 32 ; // this is smod 32
2237
2237
2238
- return (uint32_t )(lhs >> ( rhs % 32 ) );
2238
+ return (uint32_t )(lhs >> rhs);
2239
2239
}
2240
2240
2241
2241
static inline uint32_t lean_int32_shift_left (uint32_t a1, uint32_t a2) {
2242
2242
int32_t lhs = (int32_t )a1;
2243
- int32_t rhs = (int32_t )a2;
2243
+ int32_t rhs = ((( int32_t )a2 % 32 ) + 32 ) % 32 ; // this is smod 32
2244
2244
2245
- return (uint32_t )(lhs << ( rhs % 32 ) );
2245
+ return (uint32_t )(lhs << rhs);
2246
2246
}
2247
2247
2248
2248
static inline uint32_t lean_int32_complement (uint32_t a) {
@@ -2372,16 +2372,16 @@ static inline uint64_t lean_int64_xor(uint64_t a1, uint64_t a2) {
2372
2372
2373
2373
static inline uint64_t lean_int64_shift_right (uint64_t a1, uint64_t a2) {
2374
2374
int64_t lhs = (int64_t )a1;
2375
- int64_t rhs = (int64_t )a2;
2375
+ int64_t rhs = ((( int64_t )a2 % 64 ) + 64 ) % 64 ; // this is smod 64
2376
2376
2377
- return (uint64_t )(lhs >> ( rhs % 64 ) );
2377
+ return (uint64_t )(lhs >> rhs);
2378
2378
}
2379
2379
2380
2380
static inline uint64_t lean_int64_shift_left (uint64_t a1, uint64_t a2) {
2381
2381
int64_t lhs = (int64_t )a1;
2382
- int64_t rhs = (int64_t )a2;
2382
+ int64_t rhs = ((( int64_t )a2 % 64 ) + 64 ) % 64 ; // this is smod 64
2383
2383
2384
- return (uint64_t )(lhs << ( rhs % 64 ) );
2384
+ return (uint64_t )(lhs << rhs);
2385
2385
}
2386
2386
2387
2387
static inline uint64_t lean_int64_complement (uint64_t a) {
0 commit comments