purify
C++ Purify implementation with native circuit and BPP support
Loading...
Searching...
No Matches
inner_product_impl.h
Go to the documentation of this file.
1/**********************************************************************
2 * Copyright (c) 2018 Andrew Poelstra *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5 **********************************************************************/
6
7#ifndef SECP256K1_MODULE_BULLETPROOF_INNER_PRODUCT_IMPL
8#define SECP256K1_MODULE_BULLETPROOF_INNER_PRODUCT_IMPL
9
10#include "third_party/secp256k1-zkp/src/group.h"
11#include "third_party/secp256k1-zkp/src/scalar.h"
12
15
16#define POPCOUNT(x) (secp256k1_popcount_size_t((size_t)(x)))
17#define CTZ(x) (secp256k1_ctz_size_t((size_t)(x)))
18
19#define IP_AB_SCALARS 4
20
21typedef int (secp256k1_bulletproof_vfy_callback)(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data);
22
23/* used by callers to wrap a proof with surrounding context */
24typedef struct {
25 const unsigned char *proof;
26 secp256k1_scalar p_offs;
27 secp256k1_scalar yinv;
28 unsigned char commit[32];
33
34/* used internally: parsed inner-product proof state. We keep decoded a/b
35 * scalars, decoded L/R points, and explicit challenge powers here so the
36 * verifier derives Fiat-Shamir from validated points and rebuilds multiexp
37 * coefficients directly from parsed proof data.
38 */
39typedef struct {
41 secp256k1_scalar a[IP_AB_SCALARS / 2];
42 secp256k1_scalar b[IP_AB_SCALARS / 2];
43 secp256k1_scalar x[SECP256K1_BULLETPROOF_MAX_DEPTH + 1];
44 secp256k1_scalar xinv[SECP256K1_BULLETPROOF_MAX_DEPTH + 1];
45 secp256k1_scalar xsq[SECP256K1_BULLETPROOF_MAX_DEPTH + 1];
46 secp256k1_scalar xsqinv[SECP256K1_BULLETPROOF_MAX_DEPTH + 1];
47 secp256k1_scalar yinvpow2[SECP256K1_BULLETPROOF_MAX_DEPTH + 1];
48 secp256k1_ge lr[2 * SECP256K1_BULLETPROOF_MAX_DEPTH];
50 size_t block_size;
52
53/* used by callers to modify the multiexp */
54typedef struct {
55 size_t n_proofs;
56 secp256k1_scalar p_offs;
57 const secp256k1_ge *g;
58 const secp256k1_ge *geng;
59 const secp256k1_ge *genh;
60 size_t vec_len;
61 size_t lg_vec_len;
63 secp256k1_scalar *randomizer;
66
68 if (n < IP_AB_SCALARS / 2) {
69 return 32 * (1 + 2 * n);
70 } else {
71 size_t bit_count = POPCOUNT(n);
72 size_t log = secp256k1_floor_lg(2 * n / IP_AB_SCALARS);
73 return 32 * (1 + 2 * (bit_count - 1 + log) + IP_AB_SCALARS) + (2*log + 7) / 8;
74 }
75}
76
77/* Bulletproof rangeproof verification comes down to a single multiexponentiation of the form
78 *
79 * P + (c-a*b)*x*G - sum_{i=1}^n [a*s'_i*G_i + b*s_i*H_i] + sum_{i=1}^log2(n) [x_i^-2 L_i + x_i^2 R_i
80 *
81 * which will equal infinity if the rangeproof is correct. Here
82 * - `G_i` and `H_i` are standard NUMS generators. `G` is the standard secp256k1 generator.
83 * - `P` and `c` are inputs to the proof, which claims that there exist `a_i` and `b_i`, `i` ranging
84 * from 0 to `n-1`, such that `P = sum_i [a_i G_i + b_i H_i]` and that `<{a_i}, {b_i}> = c`.
85 * - `a`, `b`, `L_i` and `R_i`are auxillary components of the proof, where `i` ranges from 0 to `log2(n)-1`.
86 * - `x_i = H(x_{i-1} || L_i || R_i)`, where `x_{-1}` is passed through the `commit` variable and
87 * must be a commitment to `P` and `c`.
88 * - `x` is a hash of `commit` and is used to rerandomize `c`. See Protocol 2 vs Protocol 1 in the paper.
89 * - `s_i` and `s'_i` are computed as follows.
90 *
91 * For each `i` between 0 and `n-1` inclusive, let `b_{ij}` be -1 (1) if the `j`th bit of `i` is zero (one).
92 * Here `j` ranges from 0 to `log2(n)-1`. Then for each such `i` we define
93 * - `s_i = prod_j x_j^{b_{ij}}`
94 * - `s'_i = 1/s_i`
95 *
96 * Alternately we can define `s_i` and `s'_i` recursively as follows:
97 * - `s_0 = s`_{n - 1} = 1 / prod_j x_j`
98 * - `s_i = s'_{n - 1 - i} = s_{i - 2^j} * x_j^2` where `j = i & (i - 1)` is `i` with its least significant 1 set to 0.
99 *
100 * Our ecmult_multi function takes `(c - a*b)*x` directly and multiplies this by `G`. For every other
101 * (scalar, point) pair it calls the following callback function, which takes an index and outputs a
102 * pair. The function therefore has three regimes:
103 *
104 * For the first `2n` invocations, it returns the generator terms for the G and
105 * H halves of the proof. Earlier versions built these coefficients incrementally
106 * inside the callback via caches and inverse tricks. We now preparse the final
107 * `a`/`b` scalars, challenges `x_j`, and powers of `y^-1` during verifier
108 * setup, then reconstruct each generator coefficient directly from that parsed
109 * state.
110 *
111 * For the next `2*log2(n)` invocations it returns the parsed `L_i`/`R_i`
112 * points with scalars `x_i^2` and `x_i^-2`.
113 *
114 * For the remaining invocations it passes through to another callback, `rangeproof_cb_data` which
115 * computes `P`. The reason for this is that in practice `P` is usually defined by another multiexp
116 * rather than being a known point, and it is more efficient to compute one exponentiation.
117 *
118 */
119
120/* Rebuild the x_j product for a specific generator index directly from the
121 * parsed challenges. This keeps verification coupled to decoded proof state.
122 */
124 secp256k1_scalar *out,
126 size_t idx,
127 size_t lg_vec_len,
128 int invert
129) {
130 size_t i;
131 secp256k1_scalar_set_int(out, 1);
132 for (i = 0; i < lg_vec_len; i++) {
133 if (idx & ((size_t)1 << i)) {
134 secp256k1_scalar_mul(out, out, invert ? &proof->xinv[i] : &proof->x[i]);
135 } else {
136 secp256k1_scalar_mul(out, out, invert ? &proof->x[i] : &proof->xinv[i]);
137 }
138 }
139}
140
141/* Recover y^(-idx) from powers of two so H coefficients can be rebuilt
142 * without making any assumptions about the final a/b values.
143 */
145 secp256k1_scalar *out,
147 size_t idx,
148 size_t lg_vec_len
149) {
150 size_t i;
151 secp256k1_scalar_set_int(out, 1);
152 for (i = 0; i <= lg_vec_len; i++) {
153 if (idx & ((size_t)1 << i)) {
154 secp256k1_scalar_mul(out, out, &proof->yinvpow2[i]);
155 }
156 }
157}
158
159/* Derive the final G/H coefficient directly from parsed a/b and challenges.
160 * Zero a or b is valid, so only malformed encodings are rejected during parse.
161 */
163 secp256k1_scalar *term,
166 const secp256k1_scalar *randomizer,
167 size_t idx
168) {
169 const int use_h = idx >= ctx->vec_len;
170 const size_t proof_idx = use_h ? idx - ctx->vec_len : idx;
171 const size_t scalar_idx = proof_idx / proof->block_size;
172 const size_t local_idx = proof_idx % proof->block_size;
173 secp256k1_scalar factor;
174
175 VERIFY_CHECK(proof->final_grouping != 0);
176 VERIFY_CHECK(proof->block_size != 0);
177 VERIFY_CHECK(scalar_idx < proof->final_grouping);
178
179 secp256k1_bulletproof_innerproduct_vfy_xproduct(&factor, proof, local_idx, ctx->lg_vec_len, use_h);
180 if (use_h) {
181 secp256k1_scalar yfactor;
182 secp256k1_bulletproof_innerproduct_vfy_yinvpow(&yfactor, proof, proof_idx, ctx->lg_vec_len);
183 secp256k1_scalar_mul(&factor, &factor, &yfactor);
184 secp256k1_scalar_mul(&factor, &factor, &proof->b[scalar_idx]);
185 } else {
186 secp256k1_scalar_mul(&factor, &factor, &proof->a[scalar_idx]);
187 }
188 secp256k1_scalar_mul(&factor, &factor, randomizer);
189 secp256k1_scalar_negate(term, &factor);
190}
191
192static int secp256k1_bulletproof_innerproduct_vfy_ecmult_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
194
195 if (idx < 2 * ctx->vec_len) {
196 size_t i;
197 if (idx < ctx->vec_len) {
198 *pt = ctx->geng[idx];
199 } else {
200 *pt = ctx->genh[idx - ctx->vec_len];
201 }
202
203 secp256k1_scalar_clear(sc);
204 for (i = 0; i < ctx->n_proofs; i++) {
205 secp256k1_scalar term;
207 if (ctx->proof[i].proof->rangeproof_cb != NULL) {
208 secp256k1_scalar rangeproof_offset;
209 if ((ctx->proof[i].proof->rangeproof_cb)(&rangeproof_offset, NULL, &ctx->randomizer[i], idx, ctx->proof[i].proof->rangeproof_cb_data) == 0) {
210 return 0;
211 }
212 secp256k1_scalar_add(&term, &term, &rangeproof_offset);
213 }
214 secp256k1_scalar_add(sc, sc, &term);
215 }
216 } else if (idx < 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs)) {
217 size_t real_idx = idx - 2 * ctx->vec_len;
218 const size_t proof_idx = real_idx / (2 * ctx->lg_vec_len);
219 real_idx = real_idx % (2 * ctx->lg_vec_len);
220 *pt = ctx->proof[proof_idx].lr[real_idx];
221 if (idx % 2 == 0) {
222 *sc = ctx->proof[proof_idx].xsq[real_idx / 2];
223 } else {
224 *sc = ctx->proof[proof_idx].xsqinv[real_idx / 2];
225 }
226 secp256k1_scalar_mul(sc, sc, &ctx->randomizer[proof_idx]);
227 } else if (idx == 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs)) {
228 *sc = ctx->p_offs;
229 *pt = *ctx->g;
230 } else if (ctx->shared_g && idx == 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs) + 1) {
231 size_t i;
232 secp256k1_scalar_clear(sc);
233 for (i = 0; i < ctx->n_proofs; i++) {
234 secp256k1_scalar term;
235 if ((ctx->proof[i].proof->rangeproof_cb)(&term, pt, &ctx->randomizer[i], 2 * (ctx->vec_len + ctx->lg_vec_len), ctx->proof[i].proof->rangeproof_cb_data) == 0) {
236 return 0;
237 }
238 secp256k1_scalar_add(sc, sc, &term);
239 }
240 } else {
241 size_t proof_idx = 0;
242 size_t real_idx = idx - 2 * (ctx->vec_len + ctx->lg_vec_len * ctx->n_proofs) - 1 - !!ctx->shared_g;
243 while (real_idx >= ctx->proof[proof_idx].proof->n_extra_rangeproof_points - !!ctx->shared_g) {
244 real_idx -= ctx->proof[proof_idx].proof->n_extra_rangeproof_points - !!ctx->shared_g;
245 proof_idx++;
246 VERIFY_CHECK(proof_idx < ctx->n_proofs);
247 }
248 if ((ctx->proof[proof_idx].proof->rangeproof_cb)(sc, pt, &ctx->randomizer[proof_idx], 2 * (ctx->vec_len + ctx->lg_vec_len), ctx->proof[proof_idx].proof->rangeproof_cb_data) == 0) {
249 return 0;
250 }
251 }
252
253 return 1;
254}
255
256/* nb For security it is essential that `commit_inp` already commit to all data
257 * needed to compute `P`. We do not hash it in during verification since `P`
258 * may be specified indirectly as a bunch of scalar offsets.
259 */
260static int secp256k1_bulletproof_inner_product_verify_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, const secp256k1_bulletproof_generators *gens, size_t vec_len, const secp256k1_bulletproof_innerproduct_context *proof, size_t n_proofs, size_t plen, int shared_g) {
261 secp256k1_sha256 sha256;
263 unsigned char commit[32];
264 size_t total_n_points = 2 * vec_len + !!shared_g + 1; /* +1 for shared G (value_gen), +1 for H (blinding_gen) */
265 secp256k1_gej r;
266 secp256k1_scalar zero;
267 size_t i;
268
270 return 0;
271 }
272
273 if (n_proofs == 0) {
274 return 1;
275 }
276
277 if (!secp256k1_scratch_allocate_frame(scratch, n_proofs * (sizeof(*ecmult_data.randomizer) + sizeof(*ecmult_data.proof)), 2)) {
278 return 0;
279 }
280
281 secp256k1_scalar_clear(&zero);
282 ecmult_data.n_proofs = n_proofs;
283 ecmult_data.g = gens->blinding_gen;
284 ecmult_data.geng = gens->gens;
285 ecmult_data.genh = gens->gens + gens->n / 2;
286 ecmult_data.vec_len = vec_len;
287 ecmult_data.lg_vec_len = secp256k1_floor_lg(2 * vec_len / IP_AB_SCALARS);
288 ecmult_data.shared_g = shared_g;
289 ecmult_data.randomizer = (secp256k1_scalar *)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*ecmult_data.randomizer));
290 ecmult_data.proof = (secp256k1_bulletproof_innerproduct_vfy_data *)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*ecmult_data.proof));
291 /* Seed RNG for per-proof randomizers */
292 secp256k1_sha256_initialize(&sha256);
293 for (i = 0; i < n_proofs; i++) {
294 secp256k1_sha256_write(&sha256, proof[i].proof, plen);
295 secp256k1_sha256_write(&sha256, proof[i].commit, 32);
296 secp256k1_scalar_get_b32(commit, &proof[i].p_offs);
297 secp256k1_sha256_write(&sha256, commit, 32);
298 }
299 secp256k1_sha256_finalize(&sha256, commit);
300
301 secp256k1_scalar_clear(&ecmult_data.p_offs);
302 for (i = 0; i < n_proofs; i++) {
303 const unsigned char *serproof = proof[i].proof;
304 unsigned char proof_commit[32];
305 secp256k1_scalar dot;
306 secp256k1_scalar ab[IP_AB_SCALARS];
307 secp256k1_scalar negprod;
308 secp256k1_scalar x;
309 int overflow;
310 size_t j;
311 const size_t n_ab = 2 * vec_len < IP_AB_SCALARS ? 2 * vec_len : IP_AB_SCALARS;
312
313 total_n_points += 2 * ecmult_data.lg_vec_len + proof[i].n_extra_rangeproof_points - !!shared_g; /* -1 for shared G */
314
315 /* Extract dot product, will always be the first 32 bytes */
316 secp256k1_scalar_set_b32(&dot, serproof, &overflow);
317 if (overflow) {
319 return 0;
320 }
321 /* Commit to dot product */
322 secp256k1_sha256_initialize(&sha256);
323 secp256k1_sha256_write(&sha256, proof[i].commit, 32);
324 secp256k1_sha256_write(&sha256, serproof, 32);
325 secp256k1_sha256_finalize(&sha256, proof_commit);
326 serproof += 32;
327
328 /* Extract a, b */
329 for (j = 0; j < n_ab; j++) {
330 /* Zero a/b scalars are valid; only non-canonical encodings fail. */
331 secp256k1_scalar_set_b32(&ab[j], serproof, &overflow);
332 if (overflow) {
334 return 0;
335 }
336 serproof += 32;
337 }
338 secp256k1_scalar_dot_product(&negprod, &ab[0], &ab[n_ab / 2], n_ab / 2);
339
340 ecmult_data.proof[i].proof = &proof[i];
341 ecmult_data.proof[i].final_grouping = n_ab / 2;
342 ecmult_data.proof[i].block_size = ecmult_data.proof[i].final_grouping == 0 ? 0 : vec_len / ecmult_data.proof[i].final_grouping;
343 for (j = 0; j < ecmult_data.proof[i].final_grouping; j++) {
344 ecmult_data.proof[i].a[j] = ab[j];
345 ecmult_data.proof[i].b[j] = ab[j + ecmult_data.proof[i].final_grouping];
346 }
347 /* set per-proof randomizer */
348 secp256k1_sha256_initialize(&sha256);
349 secp256k1_sha256_write(&sha256, commit, 32);
350 secp256k1_sha256_finalize(&sha256, commit);
351 secp256k1_scalar_set_b32(&ecmult_data.randomizer[i], commit, &overflow);
352 if (overflow || secp256k1_scalar_is_zero(&ecmult_data.randomizer[i])) {
353 /* cryptographically unreachable */
355 return 0;
356 }
357
358 /* Compute x*(dot - a*b) for each proof; add it and p_offs to the p_offs accumulator */
359 secp256k1_scalar_set_b32(&x, proof_commit, &overflow);
360 if (overflow || secp256k1_scalar_is_zero(&x)) {
362 return 0;
363 }
364 secp256k1_scalar_negate(&negprod, &negprod);
365 secp256k1_scalar_add(&negprod, &negprod, &dot);
366 secp256k1_scalar_mul(&x, &x, &negprod);
367 secp256k1_scalar_add(&x, &x, &proof[i].p_offs);
368
369 secp256k1_scalar_mul(&x, &x, &ecmult_data.randomizer[i]);
370 secp256k1_scalar_add(&ecmult_data.p_offs, &ecmult_data.p_offs, &x);
371
372 /* Special-case: trivial proofs are valid iff the explicitly revealed scalars
373 * dot to the explicitly revealed dot product. */
374 if (2 * vec_len <= IP_AB_SCALARS) {
375 if (!secp256k1_scalar_is_zero(&negprod)) {
377 return 0;
378 }
379 /* remaining data does not (and cannot) be computed for proofs with no a's or b's. */
380 if (vec_len == 0) {
381 continue;
382 }
383 }
384
385 /* Store y^(-2^j) so y^(-idx) can be reconstructed by bit decomposition. */
386 ecmult_data.proof[i].yinvpow2[0] = proof[i].yinv;
387 for (j = 1; j <= ecmult_data.lg_vec_len; j++) {
388 secp256k1_scalar_sqr(&ecmult_data.proof[i].yinvpow2[j], &ecmult_data.proof[i].yinvpow2[j - 1]);
389 }
390
391 for (j = 0; j < ecmult_data.lg_vec_len; j++) {
392 const size_t lidx = 2 * j;
393 const size_t ridx = 2 * j + 1;
394 /* Parse and validate each round point before deriving x_j so the
395 * transcript hash and the later multiexp see the same curve points.
396 */
397 if (!secp256k1_bulletproof_deserialize_point(&ecmult_data.proof[i].lr[lidx], serproof, lidx, 2 * ecmult_data.lg_vec_len) ||
398 !secp256k1_bulletproof_deserialize_point(&ecmult_data.proof[i].lr[ridx], serproof, ridx, 2 * ecmult_data.lg_vec_len)) {
400 return 0;
401 }
402 secp256k1_bulletproof_update_commit(proof_commit, &ecmult_data.proof[i].lr[lidx], &ecmult_data.proof[i].lr[ridx]);
403 secp256k1_scalar_set_b32(&ecmult_data.proof[i].x[j], proof_commit, &overflow);
404 if (overflow || secp256k1_scalar_is_zero(&ecmult_data.proof[i].x[j])) {
406 return 0;
407 }
408 secp256k1_scalar_inverse_var(&ecmult_data.proof[i].xinv[j], &ecmult_data.proof[i].x[j]);
409 secp256k1_scalar_sqr(&ecmult_data.proof[i].xsq[j], &ecmult_data.proof[i].x[j]);
410 secp256k1_scalar_sqr(&ecmult_data.proof[i].xsqinv[j], &ecmult_data.proof[i].xinv[j]);
411 }
412 }
413
414 /* Do the multiexp */
415 if (secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &r, &zero, secp256k1_bulletproof_innerproduct_vfy_ecmult_callback, (void *) &ecmult_data, total_n_points) != 1) {
417 return 0;
418 }
420 return secp256k1_gej_is_infinity(&r);
421}
422
423typedef struct {
425 secp256k1_scalar xinv[SECP256K1_BULLETPROOF_MAX_DEPTH];
426 secp256k1_scalar yinv;
427 secp256k1_scalar yinvn;
428 const secp256k1_ge *geng;
429 const secp256k1_ge *genh;
430 const secp256k1_ge *g;
431 const secp256k1_scalar *a;
432 const secp256k1_scalar *b;
433 secp256k1_scalar g_sc;
434 size_t grouping;
435 size_t n;
437
438/* At each level i of recursion (i from 0 upto lg(vector size) - 1)
439 * L = a_even . G_odd + b_odd . H_even (18)
440 * which, by expanding the generators into the original G's and H's
441 * and setting n = (1 << i), can be computed as follows:
442 *
443 * For j from 1 to [vector size],
444 * 1. Use H[j] or G[j] as generator, starting with H and switching
445 * every n.
446 * 2. Start with b1 with H and a0 with G, and increment by 2 each switch.
447 * 3. For k = 1, 2, 4, ..., n/2, use the same algorithm to choose
448 * between a and b to choose between x and x^-1, except using
449 * k in place of n. With H's choose x then x^-1, with G's choose
450 * x^-1 then x.
451 *
452 * For R everything is the same except swap G/H and a/b and x/x^-1.
453 */
454static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_l(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
456 const size_t ab_idx = (idx / ctx->grouping) ^ 1;
457 size_t i;
458
459 /* Special-case the primary generator */
460 if (idx == ctx->n) {
461 *pt = *ctx->g;
462 *sc = ctx->g_sc;
463 return 1;
464 }
465
466 /* steps 1/2 */
467 if ((idx / ctx->grouping) % 2 == 0) {
468 *pt = ctx->genh[idx];
469 *sc = ctx->b[ab_idx];
470 /* Map h -> h' (eqn 59) */
471 secp256k1_scalar_mul(sc, sc, &ctx->yinvn);
472 } else {
473 *pt = ctx->geng[idx];
474 *sc = ctx->a[ab_idx];
475 }
476
477 /* step 3 */
478 for (i = 0; (((size_t)1) << i) < ctx->grouping; i++) {
479 size_t grouping = ((size_t)1) << i;
480 if ((((idx / grouping) % 2) ^ ((idx / ctx->grouping) % 2)) == 0) {
481 secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
482 } else {
483 secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
484 }
485 }
486
487 secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv);
488 return 1;
489}
490
491/* Identical code except `== 0` changed to `== 1` twice, and the
492 * `+ 1` from Step 1/2 was moved to the other if branch. */
493static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_r(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
495 const size_t ab_idx = (idx / ctx->grouping) ^ 1;
496 size_t i;
497
498 /* Special-case the primary generator */
499 if (idx == ctx->n) {
500 *pt = *ctx->g;
501 *sc = ctx->g_sc;
502 return 1;
503 }
504
505 /* steps 1/2 */
506 if ((idx / ctx->grouping) % 2 == 1) {
507 *pt = ctx->genh[idx];
508 *sc = ctx->b[ab_idx];
509 /* Map h -> h' (eqn 59) */
510 secp256k1_scalar_mul(sc, sc, &ctx->yinvn);
511 } else {
512 *pt = ctx->geng[idx];
513 *sc = ctx->a[ab_idx];
514 }
515
516 /* step 3 */
517 for (i = 0; (((size_t)1) << i) < ctx->grouping; i++) {
518 size_t grouping = ((size_t)1) << i;
519 if ((((idx / grouping) % 2) ^ ((idx / ctx->grouping) % 2)) == 1) {
520 secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
521 } else {
522 secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
523 }
524 }
525
526 secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv);
527 return 1;
528}
529
530static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_g(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
532 size_t i;
533
534 *pt = ctx->geng[idx];
535 secp256k1_scalar_set_int(sc, 1);
536 for (i = 0; (((size_t)1) << i) <= ctx->grouping; i++) {
537 if (idx & (((size_t)1) << i)) {
538 secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
539 } else {
540 secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
541 }
542 }
543 return 1;
544}
545
546static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_h(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
548 size_t i;
549
550 *pt = ctx->genh[idx];
551 secp256k1_scalar_set_int(sc, 1);
552 for (i = 0; (((size_t)1) << i) <= ctx->grouping; i++) {
553 if (idx & (((size_t)1) << i)) {
554 secp256k1_scalar_mul(sc, sc, &ctx->xinv[i]);
555 } else {
556 secp256k1_scalar_mul(sc, sc, &ctx->x[i]);
557 }
558 }
559 secp256k1_scalar_mul(sc, sc, &ctx->yinvn);
560 secp256k1_scalar_mul(&ctx->yinvn, &ctx->yinvn, &ctx->yinv);
561 return 1;
562}
563
564/* These proofs are not zero-knowledge. There is no need to worry about constant timeness.
565 * `commit_inp` must contain 256 bits of randomness, it is used immediately as a randomizer.
566 */
567static int secp256k1_bulletproof_inner_product_real_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, secp256k1_ge *out_pt, size_t *pt_idx, const secp256k1_ge *g, secp256k1_ge *geng, secp256k1_ge *genh, secp256k1_scalar *a_arr, secp256k1_scalar *b_arr, const secp256k1_scalar *yinv, const secp256k1_scalar *ux, const size_t n, unsigned char *commit) {
568 size_t i;
569 size_t halfwidth;
570 secp256k1_scalar zero;
571
573 pfdata.yinv = *yinv;
574 pfdata.g = g;
575 pfdata.geng = geng;
576 pfdata.genh = genh;
577 pfdata.a = a_arr;
578 pfdata.b = b_arr;
579 pfdata.n = n;
580
581 secp256k1_scalar_clear(&zero);
582
583 /* Protocol 1: Iterate, halving vector size until it is 1 */
584 for (halfwidth = n / 2, i = 0; halfwidth > IP_AB_SCALARS / 4; halfwidth /= 2, i++) {
585 secp256k1_gej tmplj, tmprj;
586 size_t j;
587 int overflow;
588
589 pfdata.grouping = ((size_t)1) << i;
590
591 /* L */
592 secp256k1_scalar_clear(&pfdata.g_sc);
593 for (j = 0; j < halfwidth; j++) {
594 secp256k1_scalar prod;
595 secp256k1_scalar_mul(&prod, &a_arr[2*j], &b_arr[2*j + 1]);
596 secp256k1_scalar_add(&pfdata.g_sc, &pfdata.g_sc, &prod);
597 }
598 secp256k1_scalar_mul(&pfdata.g_sc, &pfdata.g_sc, ux);
599
600 secp256k1_scalar_set_int(&pfdata.yinvn, 1);
601 secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &tmplj, &zero, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_l, (void *) &pfdata, n + 1);
602 secp256k1_ge_set_gej(&out_pt[(*pt_idx)++], &tmplj);
603
604 /* R */
605 secp256k1_scalar_clear(&pfdata.g_sc);
606 for (j = 0; j < halfwidth; j++) {
607 secp256k1_scalar prod;
608 secp256k1_scalar_mul(&prod, &a_arr[2*j + 1], &b_arr[2*j]);
609 secp256k1_scalar_add(&pfdata.g_sc, &pfdata.g_sc, &prod);
610 }
611 secp256k1_scalar_mul(&pfdata.g_sc, &pfdata.g_sc, ux);
612
613 secp256k1_scalar_set_int(&pfdata.yinvn, 1);
614 secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &tmprj, &zero, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_r, (void *) &pfdata, n + 1);
615 secp256k1_ge_set_gej(&out_pt[(*pt_idx)++], &tmprj);
616
617 /* x, x^2, x^-1, x^-2 */
618 secp256k1_bulletproof_update_commit(commit, &out_pt[*pt_idx - 2], &out_pt[*pt_idx] - 1);
619 secp256k1_scalar_set_b32(&pfdata.x[i], commit, &overflow);
620 if (overflow || secp256k1_scalar_is_zero(&pfdata.x[i])) {
621 return 0;
622 }
623 secp256k1_scalar_inverse_var(&pfdata.xinv[i], &pfdata.x[i]);
624
625 /* update scalar array */
626 for (j = 0; j < halfwidth; j++) {
627 secp256k1_scalar tmps;
628 secp256k1_scalar_mul(&a_arr[2*j], &a_arr[2*j], &pfdata.x[i]);
629 secp256k1_scalar_mul(&tmps, &a_arr[2*j + 1], &pfdata.xinv[i]);
630 secp256k1_scalar_add(&a_arr[j], &a_arr[2*j], &tmps);
631
632 secp256k1_scalar_mul(&b_arr[2*j], &b_arr[2*j], &pfdata.xinv[i]);
633 secp256k1_scalar_mul(&tmps, &b_arr[2*j + 1], &pfdata.x[i]);
634 secp256k1_scalar_add(&b_arr[j], &b_arr[2*j], &tmps);
635
636 }
637
638 /* Combine G generators and recurse, if that would be more optimal */
639 if ((n > 2048 && i == 3) || (n > 128 && i == 2) || (n > 32 && i == 1)) {
640 secp256k1_scalar yinv2;
641
642 for (j = 0; j < halfwidth; j++) {
643 secp256k1_gej rj;
644 secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &rj, &zero, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_g, (void *) &pfdata, 2u << i);
645 pfdata.geng += 2u << i;
646 secp256k1_ge_set_gej(&geng[j], &rj);
647 secp256k1_scalar_set_int(&pfdata.yinvn, 1);
648 secp256k1_ecmult_multi_var(ecmult_ctx, scratch, &rj, &zero, &secp256k1_bulletproof_innerproduct_pf_ecmult_callback_h, (void *) &pfdata, 2u << i);
649 pfdata.genh += 2u << i;
650 secp256k1_ge_set_gej(&genh[j], &rj);
651 }
652
653 secp256k1_scalar_sqr(&yinv2, yinv);
654 for (j = 0; j < i; j++) {
655 secp256k1_scalar_sqr(&yinv2, &yinv2);
656 }
657 if (!secp256k1_bulletproof_inner_product_real_prove_impl(ecmult_ctx, scratch, out_pt, pt_idx, g, geng, genh, a_arr, b_arr, &yinv2, ux, halfwidth, commit)) {
658 return 0;
659 }
660 break;
661 }
662 }
663 return 1;
664}
665
666static int secp256k1_bulletproof_inner_product_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, unsigned char *proof, size_t *proof_len, const secp256k1_bulletproof_generators *gens, const secp256k1_scalar *yinv, const size_t n, secp256k1_ecmult_multi_callback *cb, void *cb_data, const unsigned char *commit_inp) {
667 secp256k1_sha256 sha256;
668 size_t i;
669 unsigned char commit[32];
670 secp256k1_scalar *a_arr;
671 secp256k1_scalar *b_arr;
672 secp256k1_ge *out_pt;
673 secp256k1_ge *geng;
674 secp256k1_ge *genh;
675 secp256k1_scalar ux;
676 int overflow;
677 size_t pt_idx = 0;
678 secp256k1_scalar dot;
679 size_t half_n_ab = n < IP_AB_SCALARS / 2 ? n : IP_AB_SCALARS / 2;
680
682 return 0;
683 }
685
686 /* Special-case lengths 0 and 1 whose proofs are just expliict lists of scalars */
687 if (n <= IP_AB_SCALARS / 2) {
688 secp256k1_scalar a[IP_AB_SCALARS / 2];
689 secp256k1_scalar b[IP_AB_SCALARS / 2];
690
691 for (i = 0; i < n; i++) {
692 cb(&a[i], NULL, 2*i, cb_data);
693 cb(&b[i], NULL, 2*i+1, cb_data);
694 }
695
696 secp256k1_scalar_dot_product(&dot, a, b, n);
697 secp256k1_scalar_get_b32(proof, &dot);
698
699 for (i = 0; i < n; i++) {
700 secp256k1_scalar_get_b32(&proof[32 * (i + 1)], &a[i]);
701 secp256k1_scalar_get_b32(&proof[32 * (i + n + 1)], &b[i]);
702 }
703 VERIFY_CHECK(*proof_len == 32 * (2 * n + 1));
704 return 1;
705 }
706
707 /* setup for nontrivial proofs */
708 if (!secp256k1_scratch_allocate_frame(scratch, 2 * n * (sizeof(secp256k1_scalar) + sizeof(secp256k1_ge)) + 2 * secp256k1_floor_lg(n) * sizeof(secp256k1_ge), 5)) {
709 return 0;
710 }
711
712 a_arr = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_scalar));
713 b_arr = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_scalar));
714 geng = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_ge));
715 genh = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n * sizeof(secp256k1_ge));
716 out_pt = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, 2 * secp256k1_floor_lg(n) * sizeof(secp256k1_ge));
717 VERIFY_CHECK(a_arr != NULL);
718 VERIFY_CHECK(b_arr != NULL);
719 VERIFY_CHECK(gens != NULL);
720
721 for (i = 0; i < n; i++) {
722 cb(&a_arr[i], NULL, 2*i, cb_data);
723 cb(&b_arr[i], NULL, 2*i+1, cb_data);
724 geng[i] = gens->gens[i];
725 genh[i] = gens->gens[i + gens->n/2];
726 }
727
728 /* Record final dot product */
729 secp256k1_scalar_dot_product(&dot, a_arr, b_arr, n);
730 secp256k1_scalar_get_b32(proof, &dot);
731
732 /* Protocol 2: hash dot product to obtain G-randomizer */
733 secp256k1_sha256_initialize(&sha256);
734 secp256k1_sha256_write(&sha256, commit_inp, 32);
735 secp256k1_sha256_write(&sha256, proof, 32);
736 secp256k1_sha256_finalize(&sha256, commit);
737
738 proof += 32;
739
740 secp256k1_scalar_set_b32(&ux, commit, &overflow);
741 if (overflow || secp256k1_scalar_is_zero(&ux)) {
742 /* cryptographically unreachable */
744 return 0;
745 }
746
747 if (!secp256k1_bulletproof_inner_product_real_prove_impl(ecmult_ctx, scratch, out_pt, &pt_idx, gens->blinding_gen, geng, genh, a_arr, b_arr, yinv, &ux, n, commit)) {
749 return 0;
750 }
751
752 /* Final a/b values */
753 for (i = 0; i < half_n_ab; i++) {
754 secp256k1_scalar_get_b32(&proof[32 * i], &a_arr[i]);
755 secp256k1_scalar_get_b32(&proof[32 * (i + half_n_ab)], &b_arr[i]);
756 }
757 proof += 64 * half_n_ab;
758 secp256k1_bulletproof_serialize_points(proof, out_pt, pt_idx);
759
761 return 1;
762}
763
764#undef IP_AB_SCALARS
765
766#endif
static void secp256k1_bulletproof_innerproduct_vfy_generator_term(secp256k1_scalar *term, const secp256k1_bulletproof_innerproduct_vfy_ecmult_context *ctx, const secp256k1_bulletproof_innerproduct_vfy_data *proof, const secp256k1_scalar *randomizer, size_t idx)
static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_g(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data)
static void secp256k1_bulletproof_innerproduct_vfy_xproduct(secp256k1_scalar *out, const secp256k1_bulletproof_innerproduct_vfy_data *proof, size_t idx, size_t lg_vec_len, int invert)
static int secp256k1_bulletproof_inner_product_real_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, secp256k1_ge *out_pt, size_t *pt_idx, const secp256k1_ge *g, secp256k1_ge *geng, secp256k1_ge *genh, secp256k1_scalar *a_arr, secp256k1_scalar *b_arr, const secp256k1_scalar *yinv, const secp256k1_scalar *ux, const size_t n, unsigned char *commit)
#define POPCOUNT(x)
static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_h(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data)
static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_r(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data)
static int secp256k1_bulletproof_inner_product_verify_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, const secp256k1_bulletproof_generators *gens, size_t vec_len, const secp256k1_bulletproof_innerproduct_context *proof, size_t n_proofs, size_t plen, int shared_g)
#define IP_AB_SCALARS
static int secp256k1_bulletproof_innerproduct_pf_ecmult_callback_l(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data)
static void secp256k1_bulletproof_innerproduct_vfy_yinvpow(secp256k1_scalar *out, const secp256k1_bulletproof_innerproduct_vfy_data *proof, size_t idx, size_t lg_vec_len)
size_t secp256k1_bulletproof_innerproduct_proof_length(size_t n)
static int secp256k1_bulletproof_inner_product_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, unsigned char *proof, size_t *proof_len, const secp256k1_bulletproof_generators *gens, const secp256k1_scalar *yinv, const size_t n, secp256k1_ecmult_multi_callback *cb, void *cb_data, const unsigned char *commit_inp)
static int secp256k1_bulletproof_innerproduct_vfy_ecmult_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data)
int() secp256k1_bulletproof_vfy_callback(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data)
#define SECP256K1_BULLETPROOF_MAX_DEPTH
Definition core.h:15
int secp256k1_scratch_allocate_frame(secp256k1_scratch *scratch, size_t n, size_t objects)
secp256k1_callback secp256k1_ecmult_context
Definition core.h:18
void secp256k1_scratch_deallocate_frame(secp256k1_scratch *scratch)
#define secp256k1_scratch_alloc(scratch, size)
Definition core.h:26
secp256k1_ge * blinding_gen
Definition core.h:72
secp256k1_bulletproof_vfy_callback * rangeproof_cb
secp256k1_scalar xinv[SECP256K1_BULLETPROOF_MAX_DEPTH]
secp256k1_scalar x[SECP256K1_BULLETPROOF_MAX_DEPTH]
const secp256k1_bulletproof_innerproduct_context * proof
secp256k1_scalar a[IP_AB_SCALARS/2]
secp256k1_scalar b[IP_AB_SCALARS/2]
secp256k1_scalar xinv[SECP256K1_BULLETPROOF_MAX_DEPTH+1]
secp256k1_ge lr[2 *SECP256K1_BULLETPROOF_MAX_DEPTH]
secp256k1_scalar xsq[SECP256K1_BULLETPROOF_MAX_DEPTH+1]
secp256k1_scalar xsqinv[SECP256K1_BULLETPROOF_MAX_DEPTH+1]
secp256k1_scalar x[SECP256K1_BULLETPROOF_MAX_DEPTH+1]
secp256k1_scalar yinvpow2[SECP256K1_BULLETPROOF_MAX_DEPTH+1]
secp256k1_bulletproof_innerproduct_vfy_data * proof
static SECP256K1_INLINE int secp256k1_bulletproof_deserialize_point(secp256k1_ge *pt, const unsigned char *data, size_t i, size_t n)
Definition util.h:178
static void secp256k1_bulletproof_update_commit(unsigned char *commit, const secp256k1_ge *lpt, const secp256k1_ge *rpt)
Definition util.h:197
static void secp256k1_scalar_dot_product(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, size_t n)
Definition util.h:122
static SECP256K1_INLINE size_t secp256k1_floor_lg(size_t n)
Definition util.h:85
static SECP256K1_INLINE void secp256k1_bulletproof_serialize_points(unsigned char *out, secp256k1_ge *pt, size_t n)
Definition util.h:158