1#ifndef FASTFLOAT_BIGINT_H
2#define FASTFLOAT_BIGINT_H
19#if defined(FASTFLOAT_64BIT) && !defined(__sparc)
20#define FASTFLOAT_64BIT_LIMB
24#define FASTFLOAT_32BIT_LIMB
40template <u
int16_t size>
76 constexpr size_t len() const noexcept {
102 ::memcpy((
void*)ptr, (
const void*)s.ptr,
sizeof(
limb) * s.len());
118 if (new_len >
len()) {
119 size_t count = new_len -
len();
122 ::std::fill(first,
last, value);
141 while (index <
len()) {
171uint64_t
uint64_hi64(uint64_t r0, uint64_t r1,
bool& truncated)
noexcept {
178 truncated = (r1 << shl) != 0;
179 return (r0 << shl) | (r1 >> shr);
189uint64_t
uint32_hi64(uint32_t r0, uint32_t r1,
bool& truncated)
noexcept {
196uint64_t
uint32_hi64(uint32_t r0, uint32_t r1, uint32_t r2,
bool& truncated)
noexcept {
200 return uint64_hi64(x0, (x1 << 32) | x2, truncated);
212#if defined(__has_builtin)
213 #if __has_builtin(__builtin_add_overflow)
214 overflow = __builtin_add_overflow(x, y, &z);
228#ifdef FASTFLOAT_64BIT_LIMB
229 #if defined(__SIZEOF_INT128__)
231 __uint128_t z = __uint128_t(x) * __uint128_t(y) + __uint128_t(carry);
240 z.high += uint64_t(overflow);
245 uint64_t z = uint64_t(x) * uint64_t(y) + uint64_t(carry);
253template <u
int16_t size>
255 size_t index = start;
258 while (carry != 0 && index < vec.len()) {
259 vec[index] =
scalar_add(vec[index], carry, overflow);
260 carry =
limb(overflow);
270template <u
int16_t size>
276template <u
int16_t size>
279 for (
size_t index = 0; index < vec.len(); index++) {
280 vec[index] =
scalar_mul(vec[index], y, carry);
290template <u
int16_t size>
294 if (x.len() < start || y.len() > x.len() - start) {
299 for (
size_t index = 0; index < y.len(); index++) {
300 limb xi = x[index + start];
308 x[index + start] = xi;
320template <u
int16_t size>
326template <u
int16_t size>
335 for (
size_t index = 1; index < y.len(); index++) {
354template <u
int16_t size>
379#ifdef FASTFLOAT_64BIT_LIMB
390 uint64_t
hi64(
bool& truncated)
const noexcept {
391#ifdef FASTFLOAT_64BIT_LIMB
394 }
else if (
vec.
len() == 1) {
404 }
else if (
vec.
len() == 1) {
406 }
else if (
vec.
len() == 2) {
423 if (
vec.
len() > other.vec.len()) {
425 }
else if (
vec.
len() < other.vec.len()) {
428 for (
size_t index =
vec.
len(); index > 0; index--) {
430 limb yi = other.vec[index - 1];
433 }
else if (xi < yi) {
455 for (
size_t index = 0; index <
vec.
len(); index++) {
457 vec[index] = (xi <<
shl) | (prev >> shr);
461 limb carry = prev >> shr;
477 ::memmove(dst, src,
sizeof(
limb) *
vec.
len());
481 ::std::fill(first,
last, 0);
490 bool shl(
size_t n)
noexcept {
507#ifdef FASTFLOAT_64BIT_LIMB
532 bool pow2(uint32_t exp)
noexcept {
537 bool pow5(uint32_t exp)
noexcept {
539 static constexpr uint32_t large_step = 135;
540 static constexpr uint64_t small_power_of_5[] = {
541 1UL, 5UL, 25UL, 125UL, 625UL, 3125UL, 15625UL, 78125UL, 390625UL,
542 1953125UL, 9765625UL, 48828125UL, 244140625UL, 1220703125UL,
543 6103515625UL, 30517578125UL, 152587890625UL, 762939453125UL,
544 3814697265625UL, 19073486328125UL, 95367431640625UL, 476837158203125UL,
545 2384185791015625UL, 11920928955078125UL, 59604644775390625UL,
546 298023223876953125UL, 1490116119384765625UL, 7450580596923828125UL,
548#ifdef FASTFLOAT_64BIT_LIMB
549 constexpr static limb large_power_of_5[] = {
550 1414648277510068013UL, 9180637584431281687UL, 4539964771860779200UL,
551 10482974169319127550UL, 198276706040285095UL};
553 constexpr static limb large_power_of_5[] = {
554 4279965485U, 329373468U, 4020270615U, 2137533757U, 4287402176U,
555 1057042919U, 1071430142U, 2440757623U, 381945767U, 46164893U};
557 size_t large_length =
sizeof(large_power_of_5) /
sizeof(
limb);
559 while (exp >= large_step) {
563#ifdef FASTFLOAT_64BIT_LIMB
564 uint32_t small_step = 27;
565 limb max_native = 7450580596923828125UL;
567 uint32_t small_step = 13;
568 limb max_native = 1220703125U;
570 while (exp >= small_step) {
#define FASTFLOAT_ASSERT(x)
#define FASTFLOAT_DEBUG_ASSERT(x)
#define fastfloat_really_inline
CONSTDATA date::last_spec last
fastfloat_really_inline limb scalar_add(limb x, limb y, bool &overflow) noexcept
fastfloat_really_inline uint64_t uint32_hi64(uint32_t r0, bool &truncated) noexcept
fastfloat_really_inline limb scalar_mul(limb x, limb y, limb &carry) noexcept
fastfloat_really_inline uint64_t empty_hi64(bool &truncated) noexcept
fastfloat_really_inline int leading_zeroes(uint64_t input_num)
constexpr size_t bigint_bits
bool long_mul(stackvec< size > &x, limb_span y) noexcept
constexpr size_t limb_bits
bool large_add_from(stackvec< size > &x, limb_span y, size_t start) noexcept
bool small_add_from(stackvec< size > &vec, limb y, size_t start) noexcept
bool large_mul(stackvec< size > &x, limb_span y) noexcept
bool small_mul(stackvec< size > &vec, limb y) noexcept
constexpr size_t bigint_limbs
fastfloat_really_inline value128 full_multiplication(uint64_t a, uint64_t b)
fastfloat_really_inline uint64_t uint64_hi64(uint64_t r0, bool &truncated) noexcept
fastfloat_really_inline bool small_add(stackvec< size > &vec, limb y) noexcept
stackvec< bigint_limbs > vec
uint64_t hi64(bool &truncated) const noexcept
bool shl_limbs(size_t n) noexcept
int ctlz() const noexcept
bool add(limb y) noexcept
bool shl_bits(size_t n) noexcept
bigint & operator=(const bigint &)=delete
bool pow2(uint32_t exp) noexcept
bool pow10(uint32_t exp) noexcept
bool pow5(uint32_t exp) noexcept
bool mul(limb y) noexcept
bool shl(size_t n) noexcept
bigint(const bigint &)=delete
bigint & operator=(bigint &&other)=delete
int bit_length() const noexcept
int compare(const bigint &other) const noexcept
void resize_unchecked(size_t new_len, limb value) noexcept
bool nonzero(size_t index) const noexcept
limb & operator[](size_t index) noexcept
constexpr size_t capacity() const noexcept
bool try_push(limb value) noexcept
constexpr size_t len() const noexcept
constexpr bool is_empty() const noexcept
stackvec(stackvec &&)=delete
void set_len(size_t len) noexcept
const limb & operator[](size_t index) const noexcept
bool try_extend(limb_span s) noexcept
void extend_unchecked(limb_span s) noexcept
stackvec & operator=(const stackvec &)=delete
stackvec(const stackvec &)=delete
void push_unchecked(limb value) noexcept
bool try_resize(size_t new_len, limb value) noexcept
stackvec & operator=(stackvec &&other)=delete
const limb & rindex(size_t index) const noexcept
void normalize() noexcept