purify
C++ Purify implementation with native circuit and BPP support
Loading...
Searching...
No Matches
util.h
Go to the documentation of this file.
1/**********************************************************************
2 * Copyright (c) 2018 Andrew Poelstra *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5 **********************************************************************/
6
7#ifndef SECP256K1_MODULE_BULLETPROOF_UTIL
8#define SECP256K1_MODULE_BULLETPROOF_UTIL
9
10#include <limits.h>
11#include <stdint.h>
12
13#if defined(_MSC_VER)
14#include <intrin.h>
15#endif
16
17SECP256K1_INLINE static size_t secp256k1_popcount_size_t(size_t n) {
18#if defined(__GNUC__) || defined(__clang__)
19#if SIZE_MAX <= UINT_MAX
20 return (size_t)__builtin_popcount((unsigned int)n);
21#elif SIZE_MAX <= ULONG_MAX
22 return (size_t)__builtin_popcountl((unsigned long)n);
23#elif SIZE_MAX <= ULLONG_MAX
24 return (size_t)__builtin_popcountll((unsigned long long)n);
25#else
26#error "size_t wider than unsigned long long is unsupported"
27#endif
28#elif defined(_MSC_VER)
29#if SIZE_MAX > UINT_MAX
30 return (size_t)__popcnt64((unsigned __int64)n);
31#else
32 return (size_t)__popcnt((unsigned int)n);
33#endif
34#else
35 size_t count = 0;
36 while (n != 0) {
37 count += n & 1u;
38 n >>= 1;
39 }
40 return count;
41#endif
42}
43
44SECP256K1_INLINE static size_t secp256k1_ctz_size_t(size_t n) {
45 if (n == 0) {
46 return sizeof(size_t) * CHAR_BIT;
47 }
48#if defined(__GNUC__) || defined(__clang__)
49#if SIZE_MAX <= UINT_MAX
50 return (size_t)__builtin_ctz((unsigned int)n);
51#elif SIZE_MAX <= ULONG_MAX
52 return (size_t)__builtin_ctzl((unsigned long)n);
53#elif SIZE_MAX <= ULLONG_MAX
54 return (size_t)__builtin_ctzll((unsigned long long)n);
55#else
56#error "size_t wider than unsigned long long is unsupported"
57#endif
58#elif defined(_MSC_VER)
59 {
60 unsigned long index;
61#if SIZE_MAX > UINT_MAX
62 if (_BitScanForward64(&index, (unsigned __int64)n) != 0) {
63 return (size_t)index;
64 }
65#else
66 if (_BitScanForward(&index, (unsigned long)n) != 0) {
67 return (size_t)index;
68 }
69#endif
70 }
71 return sizeof(size_t) * CHAR_BIT;
72#else
73 {
74 size_t count = 0;
75 while ((n & 1u) == 0) {
76 ++count;
77 n >>= 1;
78 }
79 return count;
80 }
81#endif
82}
83
84/* floor(log2(n)) which returns 0 for 0, since this is used to estimate proof sizes */
85SECP256K1_INLINE static size_t secp256k1_floor_lg(size_t n) {
86 if (n == 0) {
87 return 0;
88 }
89#if defined(__GNUC__) || defined(__clang__)
90#if SIZE_MAX <= UINT_MAX
91 return (sizeof(unsigned int) * CHAR_BIT - 1u) - (size_t)__builtin_clz((unsigned int)n);
92#elif SIZE_MAX <= ULONG_MAX
93 return (sizeof(unsigned long) * CHAR_BIT - 1u) - (size_t)__builtin_clzl((unsigned long)n);
94#elif SIZE_MAX <= ULLONG_MAX
95 return (sizeof(unsigned long long) * CHAR_BIT - 1u) - (size_t)__builtin_clzll((unsigned long long)n);
96#else
97#error "size_t wider than unsigned long long is unsupported"
98#endif
99#elif defined(_MSC_VER)
100 {
101 unsigned long index;
102#if SIZE_MAX > UINT_MAX
103 if (_BitScanReverse64(&index, (unsigned __int64)n) != 0) {
104 return (size_t)index;
105 }
106#else
107 if (_BitScanReverse(&index, (unsigned long)n) != 0) {
108 return (size_t)index;
109 }
110#endif
111 }
112 return 0;
113#else
114 size_t i = 0;
115 while (n >>= 1) {
116 ++i;
117 }
118 return i;
119#endif
120}
121
122static void secp256k1_scalar_dot_product(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, size_t n) {
123 secp256k1_scalar_clear(r);
124 while(n--) {
125 secp256k1_scalar term;
126 secp256k1_scalar_mul(&term, &a[n], &b[n]);
127 secp256k1_scalar_add(r, r, &term);
128 }
129}
130
131static void secp256k1_scalar_inverse_all_var(secp256k1_scalar *r, const secp256k1_scalar *a, size_t len) {
132 secp256k1_scalar u;
133 size_t i;
134 if (len < 1) {
135 return;
136 }
137
138 VERIFY_CHECK((r + len <= a) || (a + len <= r));
139
140 r[0] = a[0];
141
142 i = 0;
143 while (++i < len) {
144 secp256k1_scalar_mul(&r[i], &r[i - 1], &a[i]);
145 }
146
147 secp256k1_scalar_inverse_var(&u, &r[--i]);
148
149 while (i > 0) {
150 size_t j = i--;
151 secp256k1_scalar_mul(&r[j], &r[i], &u);
152 secp256k1_scalar_mul(&u, &u, &a[j]);
153 }
154
155 r[0] = u;
156}
157
158SECP256K1_INLINE static void secp256k1_bulletproof_serialize_points(unsigned char *out, secp256k1_ge *pt, size_t n) {
159 const size_t bitveclen = (n + 7) / 8;
160 size_t i;
161
162 memset(out, 0, bitveclen);
163 for (i = 0; i < n; i++) {
164 secp256k1_fe pointx;
165 pointx = pt[i].x;
166 secp256k1_fe_normalize(&pointx);
167 secp256k1_fe_get_b32(&out[bitveclen + i*32], &pointx);
168 if (!secp256k1_fe_is_square_var(&pt[i].y)) {
169 out[i/8] |= (1ull << (i % 8));
170 }
171 }
172}
173
174/* Fail closed on malformed encodings. The verifier later hashes parsed points
175 * into Fiat-Shamir, so it cannot safely accept bytes that do not decode to a
176 * unique curve point.
177 */
178SECP256K1_INLINE static int secp256k1_bulletproof_deserialize_point(secp256k1_ge *pt, const unsigned char *data, size_t i, size_t n) {
179 const size_t bitveclen = (n + 7) / 8;
180 const size_t offset = bitveclen + i*32;
181 secp256k1_fe fe;
182
183 if (!secp256k1_fe_set_b32_limit(&fe, &data[offset])) {
184 secp256k1_ge_clear(pt);
185 return 0;
186 }
187 if (!secp256k1_ge_set_xquad(pt, &fe)) {
188 secp256k1_ge_clear(pt);
189 return 0;
190 }
191 if (data[i / 8] & (1 << (i % 8))) {
192 secp256k1_ge_neg(pt, pt);
193 }
194 return 1;
195}
196
197static void secp256k1_bulletproof_update_commit(unsigned char *commit, const secp256k1_ge *lpt, const secp256k1_ge *rpt) {
198 secp256k1_fe pointx;
199 secp256k1_sha256 sha256;
200 unsigned char lrparity;
201 lrparity = (!secp256k1_fe_is_square_var(&lpt->y) << 1) + !secp256k1_fe_is_square_var(&rpt->y);
202 secp256k1_sha256_initialize(&sha256);
203 secp256k1_sha256_write(&sha256, commit, 32);
204 secp256k1_sha256_write(&sha256, &lrparity, 1);
205 pointx = lpt->x;
206 secp256k1_fe_normalize(&pointx);
207 secp256k1_fe_get_b32(commit, &pointx);
208 secp256k1_sha256_write(&sha256, commit, 32);
209 pointx = rpt->x;
210 secp256k1_fe_normalize(&pointx);
211 secp256k1_fe_get_b32(commit, &pointx);
212 secp256k1_sha256_write(&sha256, commit, 32);
213 secp256k1_sha256_finalize(&sha256, commit);
214}
215
216static void secp256k1_bulletproof_update_commit_n(unsigned char *commit, const secp256k1_ge *pt, size_t n) {
217 secp256k1_sha256 sha256;
218 unsigned char lrparity = 0;
219 size_t i;
220
221 VERIFY_CHECK(n < 8);
222
223 for (i = 0; i < n; i++) {
224 lrparity |= secp256k1_fe_is_square_var(&pt[i].y) << i;
225 }
226
227 secp256k1_sha256_initialize(&sha256);
228 secp256k1_sha256_write(&sha256, commit, 32);
229 secp256k1_sha256_write(&sha256, &lrparity, 1);
230 for (i = 0; i < n; i++) {
231 secp256k1_fe pointx;
232 pointx = pt[i].x;
233 secp256k1_fe_normalize(&pointx);
234 secp256k1_fe_get_b32(commit, &pointx);
235 secp256k1_sha256_write(&sha256, commit, 32);
236 }
237 secp256k1_sha256_finalize(&sha256, commit);
238}
239
240/* Hash primitive values in a stable byte format so prover and verifier commit
241 * to the exact same circuit structure and coefficients.
242 */
243static void secp256k1_bulletproof_sha256_write_size(secp256k1_sha256 *sha256, size_t n) {
244 unsigned char ser[8];
245 size_t i;
246 for (i = 0; i < sizeof(ser); i++) {
247 ser[i] = (unsigned char)(n & 0xffu);
248 n >>= 8;
249 }
250 secp256k1_sha256_write(sha256, ser, sizeof(ser));
251}
252
253static void secp256k1_bulletproof_sha256_write_fast_scalar(secp256k1_sha256 *sha256, const secp256k1_fast_scalar *scal) {
254 unsigned char ser[32];
255 secp256k1_scalar_get_b32(ser, &scal->scal);
256 secp256k1_sha256_write(sha256, ser, sizeof(ser));
257}
258
259static void secp256k1_bulletproof_sha256_write_row_family(secp256k1_sha256 *sha256, const secp256k1_bulletproof_wmatrix_row *rows, size_t n_rows) {
260 size_t i;
262 for (i = 0; i < n_rows; i++) {
263 size_t j;
264 secp256k1_bulletproof_sha256_write_size(sha256, rows[i].size);
265 for (j = 0; j < rows[i].size; j++) {
266 secp256k1_bulletproof_sha256_write_size(sha256, rows[i].entry[j].idx);
267 secp256k1_bulletproof_sha256_write_fast_scalar(sha256, &rows[i].entry[j].scal);
268 }
269 }
270}
271
272/* Bind the full circuit into the legacy transcript. extra_commit still layers
273 * on top, but callers should not need to supply circuit bytes just to make the
274 * base protocol sound.
275 */
276static void secp256k1_bulletproof_update_commit_circuit(unsigned char *commit, const secp256k1_bulletproof_circuit *circ) {
277 secp256k1_sha256 sha256;
278 size_t i;
279
280 secp256k1_sha256_initialize(&sha256);
281 secp256k1_sha256_write(&sha256, commit, 32);
291 for (i = 0; i < circ->n_constraints; i++) {
293 }
294 secp256k1_sha256_finalize(&sha256, commit);
295}
296
297/* Convenience function to compute blind*G + sum_i (s[i] * gen[i])
298 * If G is passed as NULL, use the standard generator. While in the
299 * standard-generator case we could use ecmult_gen rather than
300 * ecmult_const, we don't. This function is only used during proof
301 * generation so performance is not critical.
302 *
303 * If `blind` is NULL it is treated as zero.
304 *
305 * This function is not constant-time with respect to the NULLness
306 * of its inputs. NULLness should never be correlated with secret data.
307 */
308static void secp256k1_bulletproof_vector_commit(secp256k1_gej *r, const secp256k1_scalar *s, const secp256k1_ge *gen, size_t n, const secp256k1_scalar *blind, const secp256k1_ge *g) {
309 secp256k1_scalar zero;
310 secp256k1_ge rge;
311
312 if (g == NULL) {
313 g = &secp256k1_ge_const_g;
314 }
315 if (blind == NULL) {
316 secp256k1_scalar_clear(&zero);
317 blind = &zero;
318 }
319
320 /* Multiply by blinding factor */
321 secp256k1_ecmult_const(r, g, blind);
322
323 /* Do the non-blind sum, going through contortions to avoid adding infinities */
324 while (n--) {
325 int inf;
326 secp256k1_ge tmpge;
327 secp256k1_ge negg;
328 secp256k1_gej tmpj;
329
330 /* Add G, undoing it if this causes rge == infinity */
331 secp256k1_ge_set_gej(&tmpge, r);
332 secp256k1_gej_add_ge(r, r, g);
333 secp256k1_ge_set_gej(&rge, r);
334
335 inf = secp256k1_ge_is_infinity(&rge);
336 secp256k1_fe_cmov(&rge.x, &tmpge.x, inf);
337 secp256k1_fe_cmov(&rge.y, &tmpge.y, inf);
338 rge.infinity = 0;
339
340 /* Add the next term to our now-guaranteed-noninfinite R */
341 secp256k1_ecmult_const(&tmpj, &gen[n], &s[n]);
342 secp256k1_gej_add_ge(r, &tmpj, &rge); /* here tmpj may be infinite but tmpge won't be */
343
344 /* Subtract G, undoing it if we undid the addition above */
345 secp256k1_ge_neg(&negg, g);
346 secp256k1_ge_set_gej(&tmpge, r);
347 secp256k1_gej_add_ge(r, r, &negg);
348 secp256k1_ge_set_gej(&rge, r);
349
350 secp256k1_fe_cmov(&rge.x, &tmpge.x, inf);
351 secp256k1_fe_cmov(&rge.y, &tmpge.y, inf);
352 rge.infinity = rge.infinity * (1 - inf) + tmpge.infinity * inf;
353 }
354}
355
356#endif
secp256k1_bulletproof_wmatrix_row * wl
Definition core.h:52
secp256k1_bulletproof_wmatrix_row * wv
Definition core.h:55
secp256k1_bulletproof_wmatrix_row * wo
Definition core.h:54
secp256k1_bulletproof_wmatrix_row * wr
Definition core.h:53
secp256k1_fast_scalar * c
Definition core.h:56
secp256k1_scalar scal
Definition core.h:30
static SECP256K1_INLINE size_t secp256k1_ctz_size_t(size_t n)
Definition util.h:44
static void secp256k1_bulletproof_update_commit_circuit(unsigned char *commit, const secp256k1_bulletproof_circuit *circ)
Definition util.h:276
static void secp256k1_bulletproof_sha256_write_row_family(secp256k1_sha256 *sha256, const secp256k1_bulletproof_wmatrix_row *rows, size_t n_rows)
Definition util.h:259
static SECP256K1_INLINE int secp256k1_bulletproof_deserialize_point(secp256k1_ge *pt, const unsigned char *data, size_t i, size_t n)
Definition util.h:178
static void secp256k1_bulletproof_update_commit(unsigned char *commit, const secp256k1_ge *lpt, const secp256k1_ge *rpt)
Definition util.h:197
static void secp256k1_bulletproof_update_commit_n(unsigned char *commit, const secp256k1_ge *pt, size_t n)
Definition util.h:216
static void secp256k1_scalar_dot_product(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, size_t n)
Definition util.h:122
static void secp256k1_bulletproof_vector_commit(secp256k1_gej *r, const secp256k1_scalar *s, const secp256k1_ge *gen, size_t n, const secp256k1_scalar *blind, const secp256k1_ge *g)
Definition util.h:308
static SECP256K1_INLINE size_t secp256k1_floor_lg(size_t n)
Definition util.h:85
static void secp256k1_bulletproof_sha256_write_fast_scalar(secp256k1_sha256 *sha256, const secp256k1_fast_scalar *scal)
Definition util.h:253
static SECP256K1_INLINE size_t secp256k1_popcount_size_t(size_t n)
Definition util.h:17
static SECP256K1_INLINE void secp256k1_bulletproof_serialize_points(unsigned char *out, secp256k1_ge *pt, size_t n)
Definition util.h:158
static void secp256k1_scalar_inverse_all_var(secp256k1_scalar *r, const secp256k1_scalar *a, size_t len)
Definition util.h:131
static void secp256k1_bulletproof_sha256_write_size(secp256k1_sha256 *sha256, size_t n)
Definition util.h:243