89#if defined(__GNUC__) || defined(__clang__)
90#if SIZE_MAX <= UINT_MAX
91 return (
sizeof(
unsigned int) * CHAR_BIT - 1u) - (size_t)__builtin_clz((
unsigned int)n);
92#elif SIZE_MAX <= ULONG_MAX
93 return (
sizeof(
unsigned long) * CHAR_BIT - 1u) - (
size_t)__builtin_clzl((
unsigned long)n);
94#elif SIZE_MAX <= ULLONG_MAX
95 return (
sizeof(
unsigned long long) * CHAR_BIT - 1u) - (size_t)__builtin_clzll((
unsigned long long)n);
97#error "size_t wider than unsigned long long is unsupported"
99#elif defined(_MSC_VER)
102#if SIZE_MAX > UINT_MAX
103 if (_BitScanReverse64(&index, (
unsigned __int64)n) != 0) {
104 return (
size_t)index;
107 if (_BitScanReverse(&index, (
unsigned long)n) != 0) {
108 return (
size_t)index;
138 VERIFY_CHECK((r + len <= a) || (a + len <= r));
144 secp256k1_scalar_mul(&r[i], &r[i - 1], &a[i]);
147 secp256k1_scalar_inverse_var(&u, &r[--i]);
151 secp256k1_scalar_mul(&r[j], &r[i], &u);
152 secp256k1_scalar_mul(&u, &u, &a[j]);
159 const size_t bitveclen = (n + 7) / 8;
162 memset(out, 0, bitveclen);
163 for (i = 0; i < n; i++) {
166 secp256k1_fe_normalize(&pointx);
167 secp256k1_fe_get_b32(&out[bitveclen + i*32], &pointx);
168 if (!secp256k1_fe_is_square_var(&pt[i].y)) {
169 out[i/8] |= (1ull << (i % 8));
179 const size_t bitveclen = (n + 7) / 8;
180 const size_t offset = bitveclen + i*32;
183 if (!secp256k1_fe_set_b32_limit(&fe, &data[offset])) {
184 secp256k1_ge_clear(pt);
187 if (!secp256k1_ge_set_xquad(pt, &fe)) {
188 secp256k1_ge_clear(pt);
191 if (data[i / 8] & (1 << (i % 8))) {
192 secp256k1_ge_neg(pt, pt);
199 secp256k1_sha256 sha256;
200 unsigned char lrparity;
201 lrparity = (!secp256k1_fe_is_square_var(&lpt->y) << 1) + !secp256k1_fe_is_square_var(&rpt->y);
202 secp256k1_sha256_initialize(&sha256);
203 secp256k1_sha256_write(&sha256, commit, 32);
204 secp256k1_sha256_write(&sha256, &lrparity, 1);
206 secp256k1_fe_normalize(&pointx);
207 secp256k1_fe_get_b32(commit, &pointx);
208 secp256k1_sha256_write(&sha256, commit, 32);
210 secp256k1_fe_normalize(&pointx);
211 secp256k1_fe_get_b32(commit, &pointx);
212 secp256k1_sha256_write(&sha256, commit, 32);
213 secp256k1_sha256_finalize(&sha256, commit);
217 secp256k1_sha256 sha256;
218 unsigned char lrparity = 0;
223 for (i = 0; i < n; i++) {
224 lrparity |= secp256k1_fe_is_square_var(&pt[i].y) << i;
227 secp256k1_sha256_initialize(&sha256);
228 secp256k1_sha256_write(&sha256, commit, 32);
229 secp256k1_sha256_write(&sha256, &lrparity, 1);
230 for (i = 0; i < n; i++) {
233 secp256k1_fe_normalize(&pointx);
234 secp256k1_fe_get_b32(commit, &pointx);
235 secp256k1_sha256_write(&sha256, commit, 32);
237 secp256k1_sha256_finalize(&sha256, commit);
309 secp256k1_scalar zero;
313 g = &secp256k1_ge_const_g;
316 secp256k1_scalar_clear(&zero);
321 secp256k1_ecmult_const(r, g, blind);
331 secp256k1_ge_set_gej(&tmpge, r);
332 secp256k1_gej_add_ge(r, r, g);
333 secp256k1_ge_set_gej(&rge, r);
335 inf = secp256k1_ge_is_infinity(&rge);
336 secp256k1_fe_cmov(&rge.x, &tmpge.x, inf);
337 secp256k1_fe_cmov(&rge.y, &tmpge.y, inf);
341 secp256k1_ecmult_const(&tmpj, &gen[n], &s[n]);
342 secp256k1_gej_add_ge(r, &tmpj, &rge);
345 secp256k1_ge_neg(&negg, g);
346 secp256k1_ge_set_gej(&tmpge, r);
347 secp256k1_gej_add_ge(r, r, &negg);
348 secp256k1_ge_set_gej(&rge, r);
350 secp256k1_fe_cmov(&rge.x, &tmpge.x, inf);
351 secp256k1_fe_cmov(&rge.y, &tmpge.y, inf);
352 rge.infinity = rge.infinity * (1 - inf) + tmpge.infinity * inf;
static void secp256k1_bulletproof_vector_commit(secp256k1_gej *r, const secp256k1_scalar *s, const secp256k1_ge *gen, size_t n, const secp256k1_scalar *blind, const secp256k1_ge *g)