purify
C++ Purify implementation with native circuit and BPP support
Loading...
Searching...
No Matches
circuit_impl.h
Go to the documentation of this file.
1/**********************************************************************
2 * Copyright (c) 2018 Andrew Poelstra *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5 **********************************************************************/
6
7#ifndef SECP256K1_MODULE_BULLETPROOF_CIRCUIT_IMPL
8#define SECP256K1_MODULE_BULLETPROOF_CIRCUIT_IMPL
9
13#include "third_party/secp256k1-zkp/src/group.h"
14
15#include <stdlib.h>
16
23
24static int secp256k1_bulletproof_circuit_abgh_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) {
26 const int is_g = idx % 2 == 0;
27
28 (void) pt;
29 if (is_g) {
30 /* l(x) */
31 if (idx / 2 < ctx->assn->n_gates) {
32 secp256k1_scalar_mul(sc, &ctx->comp_circ->l3[idx / 2], &ctx->x);
33 secp256k1_scalar_add(sc, sc, &ctx->assn->ao[idx / 2]);
34 secp256k1_scalar_mul(sc, sc, &ctx->x);
35 } else {
36 secp256k1_scalar_mul(sc, &ctx->comp_circ->l3[idx / 2], &ctx->x2);
37 }
38 secp256k1_scalar_add(sc, sc, &ctx->comp_circ->l1[idx / 2]);
39 secp256k1_scalar_mul(sc, sc, &ctx->x);
40 } else {
41 /* r(x) */
42 secp256k1_scalar_mul(sc, &ctx->comp_circ->r3[idx / 2], &ctx->x2);
43 secp256k1_scalar_add(sc, sc, &ctx->comp_circ->r1[idx / 2]);
44 secp256k1_scalar_mul(sc, sc, &ctx->x);
45 secp256k1_scalar_add(sc, sc, &ctx->comp_circ->r0[idx / 2]);
46 }
47
48 return 1;
49}
50
51/* Proof format:
52 *
53 * Serialized scalars (32 bytes) t, tau_x, mu, a, b
54 * Serialized points (bit array of parity followed by 32 bytes): A_I, A_O, S, T_1, T_3, T_4, T_5, T_6, [inner product proof points]
55 */
56static int secp256k1_bulletproof_relation66_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, unsigned char *proof, size_t *plen, const secp256k1_bulletproof_circuit_assignment *assn, const secp256k1_ge *commitp, const secp256k1_scalar *blinds, size_t nc, const secp256k1_ge *value_gen, const secp256k1_bulletproof_circuit *circ, const secp256k1_bulletproof_generators *gens, const unsigned char *nonce, const unsigned char *extra_commit, size_t extra_commit_len) {
59 secp256k1_sha256 sha256;
60 unsigned char commit[32] = {0};
61 secp256k1_scalar alpha, beta, rho, mu;
62 secp256k1_scalar tau1, tau3, tau4, tau5, tau6, taux; /* tau2 missing on purpose */
63 secp256k1_scalar t[7]; /* t[1..6] are coefficients; t[0] is the polynomial evaluated at x */
64 secp256k1_scalar tauv; /* <z, WV*gamma> term in eq (73) */
65 secp256k1_scalar x, xn, y, yinv, z;
66 secp256k1_scalar tmp;
67 secp256k1_gej aij, aoj, sj;
68 secp256k1_ge tmpge;
69 secp256k1_ge out_pt[8];
70 int overflow;
71 size_t i;
72
73 if (assn->n_gates > circ->n_gates || assn->n_commits > circ->n_commits || nc != circ->n_commits) {
74 return 0;
75 }
76 if (*plen < 64 + 256 + 1) { /* inner product argument will do a more precise check and assignment */
77 return 0;
78 }
79
80 /* Commit to all input data */
81 if (nc != 0) {
82 secp256k1_bulletproof_update_commit_n(commit, commitp, nc);
83 }
84 secp256k1_bulletproof_update_commit_n(commit, value_gen, 1);
85 /* The legacy transcript must bind the circuit on its own; extra_commit is
86 * optional caller context, not the only circuit binding.
87 */
89 if (extra_commit != NULL) {
90 secp256k1_sha256_initialize(&sha256);
91 secp256k1_sha256_write(&sha256, commit, 32);
92 secp256k1_sha256_write(&sha256, extra_commit, extra_commit_len);
93 secp256k1_sha256_finalize(&sha256, commit);
94 }
95
96 /* Setup, generate randomness */
97 secp256k1_scalar_chacha20(&alpha, &beta, nonce, 0);
99 secp256k1_scalar_chacha20(&tau3, &tau4, nonce, 2); /* t2 will be generated deterministically */
100 secp256k1_scalar_chacha20(&tau5, &tau6, nonce, 3);
101
102 /* Compute blinding factors in comp_circ.l3 and comp_circ.r3 */
104 return 0;
105 }
106 comp_circ = secp256k1_bulletproof_pf_slsr(scratch, circ, nonce);
107
108 /* Compute A_I, A_O, S */
109 secp256k1_ecmult_const(&aij, gens->blinding_gen, &alpha);
110 for (i = 0; i < circ->n_bits; i++) {
111 secp256k1_ge aterm = gens->gens[i + gens->n/2];
112
113 secp256k1_ge_neg(&aterm, &aterm);
114 secp256k1_fe_cmov(&aterm.x, &gens->gens[i].x, secp256k1_scalar_is_one(&assn->al[i]));
115 secp256k1_fe_cmov(&aterm.y, &gens->gens[i].y, secp256k1_scalar_is_one(&assn->al[i]));
116 secp256k1_gej_add_ge(&aij, &aij, &aterm);
117 }
118 secp256k1_ge_set_gej(&tmpge, &aij);
119 secp256k1_bulletproof_vector_commit(&aij, assn->al + circ->n_bits, gens->gens + circ->n_bits, assn->n_gates - circ->n_bits, NULL, NULL);
120 secp256k1_gej_add_ge(&aij, &aij, &tmpge);
121 secp256k1_ge_set_gej(&tmpge, &aij);
122 secp256k1_bulletproof_vector_commit(&aij, assn->ar + circ->n_bits, gens->gens + circ->n_bits + gens->n/2, assn->n_gates - circ->n_bits, NULL, NULL);
123 secp256k1_gej_add_ge(&aij, &aij, &tmpge);
124
125 secp256k1_bulletproof_vector_commit(&aoj, assn->ao + circ->n_bits, gens->gens + circ->n_bits, assn->n_gates - circ->n_bits, &beta, gens->blinding_gen);
126
127 secp256k1_ecmult_const(&sj, gens->blinding_gen, &rho);
128 for (i = 0; i < circ->n_gates; i++) {
129 secp256k1_gej termj;
130 secp256k1_ge term;
131
132 secp256k1_ecmult_const(&termj, &gens->gens[i], &comp_circ->l3[i]);
133 secp256k1_ge_set_gej(&term, &termj);
134 secp256k1_gej_add_ge(&sj, &sj, &term);
135 secp256k1_ecmult_const(&termj, &gens->gens[i + gens->n/2], &comp_circ->r3[i]);
136 secp256k1_ge_set_gej(&term, &termj);
137 secp256k1_gej_add_ge(&sj, &sj, &term);
138 }
139
140 /* get challenges y and z */
141 secp256k1_ge_set_gej(&out_pt[0], &aij);
142 secp256k1_ge_set_gej(&out_pt[1], &aoj);
143 secp256k1_ge_set_gej(&out_pt[2], &sj);
144
145 secp256k1_bulletproof_update_commit_n(commit, &out_pt[0], 3);
146 secp256k1_scalar_set_b32(&y, commit, &overflow);
147 if (overflow || secp256k1_scalar_is_zero(&y)) {
149 return 0;
150 }
152 secp256k1_scalar_set_b32(&z, commit, &overflow);
153 if (overflow || secp256k1_scalar_is_zero(&z)) {
155 return 0;
156 }
157 secp256k1_scalar_inverse_var(&yinv, &y);
158
159 /* complete circuit compression */
160 secp256k1_bulletproof_pf_compress_circuit(comp_circ, circ, assn, &y, &yinv, &z);
161
162 /* Compute coefficients t[1..6] */
163 /* Observe that
164 * l = l1 * X + l2 * X^2 + l3 * X^3
165 * r = r0 + r1 * X + r3 * X^3
166 * with l2 = ao, so that
167 * t1 = <l1, r0>
168 * t2 = <l1, r1> + <ao, r0>
169 * t3 = <ao, r1> + <l3, r0>
170 * t4 = <l3, r1> + <l1, r3>
171 * t5 = <ao, r3>
172 * t6 = <l3, r3>
173 * So we compute these terms and add them to t1,t3,etc as running sums.
174 */
175
176 for (i = 0; i < 6; i++) {
177 secp256k1_scalar_clear(&t[i + 1]);
178 }
179 for (i = 0; i < circ->n_gates; i++) {
180 secp256k1_scalar ao;
181
182 if (i < assn->n_gates) {
183 ao = assn->ao[i];
184 } else {
185 secp256k1_scalar_clear(&ao);
186 }
187
188 /* Now that we have the individual coefficients, compute the dot product */
189 secp256k1_scalar_mul(&tmp, &comp_circ->l1[i], &comp_circ->r0[i]);
190 secp256k1_scalar_add(&t[1], &t[1], &tmp);
191
192 secp256k1_scalar_mul(&tmp, &comp_circ->l1[i], &comp_circ->r1[i]);
193 secp256k1_scalar_add(&t[2], &t[2], &tmp);
194 secp256k1_scalar_mul(&tmp, &ao, &comp_circ->r0[i]);
195 secp256k1_scalar_add(&t[2], &t[2], &tmp);
196
197 secp256k1_scalar_mul(&tmp, &ao, &comp_circ->r1[i]);
198 secp256k1_scalar_add(&t[3], &t[3], &tmp);
199 secp256k1_scalar_mul(&tmp, &comp_circ->l3[i], &comp_circ->r0[i]);
200 secp256k1_scalar_add(&t[3], &t[3], &tmp);
201
202 secp256k1_scalar_mul(&tmp, &comp_circ->l3[i], &comp_circ->r1[i]);
203 secp256k1_scalar_add(&t[4], &t[4], &tmp);
204 secp256k1_scalar_mul(&tmp, &comp_circ->l1[i], &comp_circ->r3[i]);
205 secp256k1_scalar_add(&t[4], &t[4], &tmp);
206
207 secp256k1_scalar_mul(&tmp, &ao, &comp_circ->r3[i]);
208 secp256k1_scalar_add(&t[5], &t[5], &tmp);
209
210 secp256k1_scalar_mul(&tmp, &comp_circ->l3[i], &comp_circ->r3[i]);
211 secp256k1_scalar_add(&t[6], &t[6], &tmp);
212 }
213
214 /* Compute T1, T3, T4, T5, T6 */
215 secp256k1_bulletproof_vector_commit(&aij, &t[1], value_gen, 1, &tau1, gens->blinding_gen);
216 secp256k1_ge_set_gej(&out_pt[3], &aij);
217
218 secp256k1_bulletproof_vector_commit(&aij, &t[3], value_gen, 1, &tau3, gens->blinding_gen);
219 secp256k1_ge_set_gej(&out_pt[4], &aij);
220
221 secp256k1_bulletproof_vector_commit(&aij, &t[4], value_gen, 1, &tau4, gens->blinding_gen);
222 secp256k1_ge_set_gej(&out_pt[5], &aij);
223
224 secp256k1_bulletproof_vector_commit(&aij, &t[5], value_gen, 1, &tau5, gens->blinding_gen);
225 secp256k1_ge_set_gej(&out_pt[6], &aij);
226
227 secp256k1_bulletproof_vector_commit(&aij, &t[6], value_gen, 1, &tau6, gens->blinding_gen);
228 secp256k1_ge_set_gej(&out_pt[7], &aij);
229
230 /* Compute x, tau_x, mu and t */
231 secp256k1_bulletproof_update_commit_n(commit, &out_pt[3], 5);
232 secp256k1_scalar_set_b32(&x, commit, &overflow);
233 if (overflow || secp256k1_scalar_is_zero(&x)) {
235 return 0;
236 }
237
238 secp256k1_scalar_mul(&alpha, &alpha, &x);
239 secp256k1_scalar_mul(&tau1, &tau1, &x);
240
241 secp256k1_scalar_sqr(&xn, &x);
242 secp256k1_scalar_mul(&beta, &beta, &xn);
243 secp256k1_scalar_clear(&tauv);
244 for (i = 0; i < circ->n_commits; i++) {
245 secp256k1_scalar zwv;
246 secp256k1_scalar_mul(&zwv, &comp_circ->wv[i], &blinds[i]);
247 secp256k1_scalar_add(&tauv, &tauv, &zwv);
248 }
249 secp256k1_scalar_mul(&tauv, &tauv, &xn);
250
251 secp256k1_scalar_mul(&xn, &xn, &x);
252 secp256k1_scalar_mul(&rho, &rho, &xn);
253 secp256k1_scalar_mul(&tau3, &tau3, &xn);
254
255 secp256k1_scalar_mul(&xn, &xn, &x);
256 secp256k1_scalar_mul(&tau4, &tau4, &xn);
257
258 secp256k1_scalar_mul(&xn, &xn, &x);
259 secp256k1_scalar_mul(&tau5, &tau5, &xn);
260
261 secp256k1_scalar_mul(&xn, &xn, &x);
262 secp256k1_scalar_mul(&tau6, &tau6, &xn);
263
264 secp256k1_scalar_add(&taux, &tau1, &tauv);
265 secp256k1_scalar_add(&taux, &taux, &tau3);
266 secp256k1_scalar_add(&taux, &taux, &tau4);
267 secp256k1_scalar_add(&taux, &taux, &tau5);
268 secp256k1_scalar_add(&taux, &taux, &tau6);
269
270 secp256k1_scalar_add(&mu, &alpha, &beta);
271 secp256k1_scalar_add(&mu, &mu, &rho);
272
273 /* Negate taux and mu so verifier doesn't have to */
274 secp256k1_scalar_negate(&mu, &mu);
275 secp256k1_scalar_negate(&taux, &taux);
276
277 /* Encode circuit stuff */
278 secp256k1_scalar_get_b32(&proof[0], &taux);
279 secp256k1_scalar_get_b32(&proof[32], &mu);
280 secp256k1_bulletproof_serialize_points(&proof[64], out_pt, 8);
281
282 /* Mix these scalars into the hash so the input to the inner product proof is fixed */
283 secp256k1_sha256_initialize(&sha256);
284 secp256k1_sha256_write(&sha256, commit, 32);
285 secp256k1_sha256_write(&sha256, proof, 64);
286 secp256k1_sha256_finalize(&sha256, commit);
287
288 /* Compute l and r, do inner product proof */
289 abgh_data.x = x;
290 secp256k1_scalar_sqr(&abgh_data.x2, &x);
291 abgh_data.comp_circ = comp_circ;
292 abgh_data.assn = assn;
293 *plen -= 64 + 256 + 1;
294 if (secp256k1_bulletproof_inner_product_prove_impl(ecmult_ctx, scratch, &proof[64 + 256 + 1], plen, gens, &yinv, circ->n_gates, secp256k1_bulletproof_circuit_abgh_callback, (void *) &abgh_data, commit) == 0) {
296 return 0;
297 }
298 *plen += 64 + 256 + 1;
299
301 return 1;
302}
303
304typedef struct {
305 secp256k1_scalar x;
306 secp256k1_scalar y;
307 secp256k1_scalar yinv;
308 secp256k1_scalar z;
310 /* state tracking */
311 size_t count;
312 /* eq 83 */
313 secp256k1_ge age[3];
314 /* eq 82 */
315 secp256k1_scalar randomizer82;
316 secp256k1_ge tge[5];
317 secp256k1_scalar t;
318 const secp256k1_ge *value_gen;
319 const secp256k1_ge *commits;
320 size_t n_gates;
321 size_t n_commits;
323
324/* Batch verification shares scratch sizing and inner-product dimensions across
325 * proofs, so heterogeneous circuit shapes must be rejected up front.
326 */
333
334static int secp256k1_bulletproof_circuit_vfy_callback(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data) {
336
337 if (idx < ctx->n_gates) { /* Gi */
338 secp256k1_scalar_mul(sc, &ctx->comp_circ->wr[idx], &ctx->x);
339 secp256k1_scalar_mul(sc, sc, randomizer);
340 } else if (idx < 2 * ctx->n_gates) { /* Hi */
341 secp256k1_scalar dot;
342 idx -= ctx->n_gates;
343
344 secp256k1_scalar_set_int(&dot, 1);
345 secp256k1_scalar_negate(&dot, &dot);
346 secp256k1_scalar_add(sc, &ctx->comp_circ->wl_wo[idx], &dot);
347
348 secp256k1_scalar_mul(sc, sc, randomizer);
349 /* return a (scalar, point) pair to add to the multiexp */
350 } else {
351 switch(ctx->count) {
352 /* g^(x^2(k + <z^Q, c>) - t) (82) */
353 case 0: {
354 secp256k1_scalar_negate(sc, &ctx->t);
355 secp256k1_scalar_add(sc, sc, &ctx->comp_circ->c_sum);
356 secp256k1_scalar_mul(sc, sc, &ctx->randomizer82);
357 *pt = *ctx->value_gen;
358 break;
359 }
360 /* A_I^x (83) */
361 case 1:
362 *sc = ctx->x;
363 *pt = ctx->age[0];
364 break;
365 /* A_O^(x^2) (83) */
366 case 2:
367 secp256k1_scalar_sqr(sc, &ctx->x);
368 *pt = ctx->age[1];
369 break;
370 /* S^(x^3) (83) */
371 case 3:
372 secp256k1_scalar_sqr(sc, &ctx->x); /* TODO cache previous squaring */
373 secp256k1_scalar_mul(sc, sc, &ctx->x);
374 *pt = ctx->age[2];
375 break;
376 /* T_1^x (82) */
377 case 4:
378 secp256k1_scalar_mul(sc, &ctx->x, &ctx->randomizer82);
379 *pt = ctx->tge[0];
380 break;
381 default:
382 if (ctx->count < 9) {
383 size_t i;
384 secp256k1_scalar_mul(sc, &ctx->x, &ctx->randomizer82);
385 for (i = 0; i < ctx->count - 3; i++) {
386 secp256k1_scalar_mul(sc, sc, &ctx->x);
387 }
388 *pt = ctx->tge[ctx->count - 4];
389 } else if (ctx->count < 9 + ctx->n_commits) {
390 /* V^(x^2 . (z^Q . W_V)) (82) */
391 secp256k1_scalar_mul(sc, &ctx->comp_circ->wv[ctx->count - 9], &ctx->randomizer82);
392 *pt = ctx->commits[ctx->count - 9];
393 } else {
394 VERIFY_CHECK(!"bulletproof: too many points added by circuit_verify_impl to inner_product_verify_impl");
395 }
396 }
397 secp256k1_scalar_mul(sc, sc, randomizer);
398 ctx->count++;
399 }
400 return 1;
401}
402
403static int secp256k1_bulletproof_relation66_verify_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, const unsigned char* const* proof, size_t n_proofs, size_t plen, const secp256k1_ge* const* commitp, size_t *nc, const secp256k1_ge *value_gen, const secp256k1_bulletproof_circuit* const* circ, const secp256k1_bulletproof_generators *gens, const unsigned char **extra_commit, size_t *extra_commit_len) {
404 int ret;
407 size_t i;
408
409 /* sanity-check input */
410 if (plen < 64 + 256 + 1) { /* inner product argument will do a more precise check */
411 return 0;
412 }
414 return 0;
415 }
416 for (i = 1; i < n_proofs; i++) {
417 if (!secp256k1_bulletproof_circuit_same_batch_shape(circ[0], circ[i])) {
418 return 0;
419 }
420 }
421
422 if (!secp256k1_scratch_allocate_frame(scratch, n_proofs * (sizeof(*ecmult_data) + sizeof(*innp_ctx)), 2)) {
423 return 0;
424 }
425 ecmult_data = (secp256k1_bulletproof_circuit_vfy_ecmult_context *)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*ecmult_data));
426 innp_ctx = (secp256k1_bulletproof_innerproduct_context *)secp256k1_scratch_alloc(scratch, n_proofs * sizeof(*innp_ctx));
427 /* This opens a second nested frame for the compressed-circuit buffers, so
428 * any later error path in this function must pop scratch twice.
429 */
430 if (!secp256k1_bulletproof_vfy_compressed_circuit_allocate_frame(scratch, circ[0], n_proofs)) {
432 return 0;
433 }
434
435 for (i = 0; i < n_proofs; i++) {
436 secp256k1_sha256 sha256;
437 unsigned char randomizer82[32] = {0}; /* randomizer for eq (82) so we can add it to eq (83) to save a separate multiexp */
438 unsigned char commit[32] = {0};
439 secp256k1_scalar taux, mu;
440 secp256k1_scalar y;
441 int overflow;
442 size_t point_idx;
443 struct {
444 secp256k1_ge *pt;
445 size_t ser_idx;
446 } proof_points[] = {
447 { &ecmult_data[i].age[0], 0 },
448 { &ecmult_data[i].age[1], 1 },
449 { &ecmult_data[i].age[2], 2 },
450 { &ecmult_data[i].tge[0], 3 },
451 { &ecmult_data[i].tge[1], 4 },
452 { &ecmult_data[i].tge[2], 5 },
453 { &ecmult_data[i].tge[3], 6 },
454 { &ecmult_data[i].tge[4], 7 },
455 };
456
457 /* Commit to all input data: pedersen commit, asset generator, extra_commit */
458 if (nc != NULL) {
459 secp256k1_bulletproof_update_commit_n(commit, commitp[i], nc[i]);
460 }
461 secp256k1_bulletproof_update_commit_n(commit, value_gen, 1);
462 /* Match the prover: circuit bytes are always part of the base transcript,
463 * even if the caller does not provide any extra statement commitment.
464 */
466 if (extra_commit != NULL && extra_commit[i] != NULL) {
467 secp256k1_sha256_initialize(&sha256);
468 secp256k1_sha256_write(&sha256, commit, 32);
469 secp256k1_sha256_write(&sha256, extra_commit[i], extra_commit_len[i]);
470 secp256k1_sha256_finalize(&sha256, commit);
471 }
472
473 /* Deserialize everything */
474 /* Reject malformed point encodings before any Fiat-Shamir updates so the
475 * transcript cannot be driven by bytes that decode to no valid point.
476 */
477 for (point_idx = 0; point_idx < sizeof(proof_points) / sizeof(proof_points[0]); ++point_idx) {
478 if (!secp256k1_bulletproof_deserialize_point(proof_points[point_idx].pt, &proof[i][64], proof_points[point_idx].ser_idx, 8)) {
479 /* Pop the compressed-circuit frame first, then the outer verifier frame. */
482 return 0;
483 }
484 }
485
486 /* Compute y, z, x */
487 secp256k1_bulletproof_update_commit_n(commit, ecmult_data[i].age, 3);
488 secp256k1_scalar_set_b32(&y, commit, &overflow);
489 if (overflow || secp256k1_scalar_is_zero(&y)) {
492 return 0;
493 }
494 ecmult_data[i].y = y;
495 secp256k1_scalar_inverse_var(&ecmult_data[i].yinv, &y); /* TODO batch this into another inverse */
497 secp256k1_scalar_set_b32(&ecmult_data[i].z, commit, &overflow);
498 if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].z)) {
501 return 0;
502 }
503
504 secp256k1_bulletproof_update_commit_n(commit, ecmult_data[i].tge, 5);
505 secp256k1_scalar_set_b32(&ecmult_data[i].x, commit, &overflow);
506 if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].x)) {
509 return 0;
510 }
511
512 ecmult_data[i].comp_circ = secp256k1_bulletproof_vfy_compress_circuit(scratch, circ[i], &ecmult_data[i].x, &ecmult_data[i].y, &ecmult_data[i].yinv, &ecmult_data[i].z);
513
514 /* Extract scalars */
515 /* Zero taux, mu, and t are valid. Only malformed scalar encodings fail. */
516 secp256k1_scalar_set_b32(&taux, &proof[i][0], &overflow);
517 if (overflow) {
520 return 0;
521 }
522 secp256k1_scalar_set_b32(&mu, &proof[i][32], &overflow);
523 if (overflow) {
526 return 0;
527 }
528 /* A little sketchy, we read t (l(x) . r(x)) off the front of the inner product proof,
529 * which we otherwise treat as a black box */
530 secp256k1_scalar_set_b32(&ecmult_data[i].t, &proof[i][64 + 256 + 1], &overflow);
531 if (overflow) {
534 return 0;
535 }
536
537 /* Mix these scalars into the hash so the input to the inner product proof is fixed */
538 secp256k1_sha256_initialize(&sha256);
539 secp256k1_sha256_write(&sha256, commit, 32);
540 secp256k1_sha256_write(&sha256, proof[i], 64);
541 secp256k1_sha256_finalize(&sha256, commit);
542
543 secp256k1_sha256_initialize(&sha256);
544 secp256k1_sha256_write(&sha256, commit, 32);
545 secp256k1_sha256_finalize(&sha256, randomizer82);
546 secp256k1_scalar_set_b32(&ecmult_data[i].randomizer82, randomizer82, &overflow);
547 if (overflow || secp256k1_scalar_is_zero(&ecmult_data[i].randomizer82)) {
550 return 0;
551 }
552
553 /* compute exponent offsets */
554 ecmult_data[i].count = 0;
555
556 ecmult_data[i].value_gen = value_gen;
557 if (nc == NULL) {
558 ecmult_data[i].commits = NULL;
559 } else {
560 ecmult_data[i].commits = commitp[i];
561 }
562 ecmult_data[i].n_gates = circ[i]->n_gates;
563 if (nc == NULL) {
564 ecmult_data[i].n_commits = 0;
565 } else {
566 ecmult_data[i].n_commits = nc[i];
567 }
568
569 secp256k1_scalar_mul(&taux, &taux, &ecmult_data[i].randomizer82);
570 secp256k1_scalar_add(&mu, &mu, &taux);
571
572 innp_ctx[i].proof = &proof[i][64 + 256 + 1];
573 innp_ctx[i].p_offs = mu;
574 innp_ctx[i].yinv = ecmult_data[i].yinv;
575 memcpy(innp_ctx[i].commit, commit, 32);
577 innp_ctx[i].rangeproof_cb_data = (void *) &ecmult_data[i];
578 innp_ctx[i].n_extra_rangeproof_points = 9 + ecmult_data[i].n_commits;
579 }
580 /* Safe to size the shared inner-product verifier from circ[0] after the
581 * same_batch_shape() check above.
582 */
583 ret = secp256k1_bulletproof_inner_product_verify_impl(ecmult_ctx, scratch, gens, circ[0]->n_gates, innp_ctx, n_proofs, plen - (64 + 256 + 1), 1);
584 /* Pop the compressed-circuit frame first, then the outer verifier frame. */
587 return ret;
588}
589
590#endif
int secp256k1_bulletproof_vfy_compressed_circuit_allocate_frame(secp256k1_scratch *scratch, const secp256k1_bulletproof_circuit *circ, size_t n_proofs)
void secp256k1_bulletproof_pf_compress_circuit(secp256k1_bulletproof_pf_compressed_circuit *ret, const secp256k1_bulletproof_circuit *circ, const secp256k1_bulletproof_circuit_assignment *assn, const secp256k1_scalar *y, const secp256k1_scalar *yinv, const secp256k1_scalar *z)
int secp256k1_bulletproof_pf_compressed_circuit_allocate_frame(secp256k1_scratch *scratch, const secp256k1_bulletproof_circuit *circ)
secp256k1_bulletproof_pf_compressed_circuit * secp256k1_bulletproof_pf_slsr(secp256k1_scratch *scratch, const secp256k1_bulletproof_circuit *circ, const unsigned char *nonce)
secp256k1_bulletproof_vfy_compressed_circuit * secp256k1_bulletproof_vfy_compress_circuit(secp256k1_scratch *scratch, const secp256k1_bulletproof_circuit *circ, const secp256k1_scalar *x, const secp256k1_scalar *y, const secp256k1_scalar *yinv, const secp256k1_scalar *z)
static int secp256k1_bulletproof_relation66_verify_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, const unsigned char *const *proof, size_t n_proofs, size_t plen, const secp256k1_ge *const *commitp, size_t *nc, const secp256k1_ge *value_gen, const secp256k1_bulletproof_circuit *const *circ, const secp256k1_bulletproof_generators *gens, const unsigned char **extra_commit, size_t *extra_commit_len)
static int secp256k1_bulletproof_circuit_same_batch_shape(const secp256k1_bulletproof_circuit *a, const secp256k1_bulletproof_circuit *b)
static int secp256k1_bulletproof_circuit_vfy_callback(secp256k1_scalar *sc, secp256k1_ge *pt, secp256k1_scalar *randomizer, size_t idx, void *data)
static int secp256k1_bulletproof_circuit_abgh_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data)
static int secp256k1_bulletproof_relation66_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, unsigned char *proof, size_t *plen, const secp256k1_bulletproof_circuit_assignment *assn, const secp256k1_ge *commitp, const secp256k1_scalar *blinds, size_t nc, const secp256k1_ge *value_gen, const secp256k1_bulletproof_circuit *circ, const secp256k1_bulletproof_generators *gens, const unsigned char *nonce, const unsigned char *extra_commit, size_t extra_commit_len)
static int secp256k1_bulletproof_inner_product_verify_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, const secp256k1_bulletproof_generators *gens, size_t vec_len, const secp256k1_bulletproof_innerproduct_context *proof, size_t n_proofs, size_t plen, int shared_g)
static int secp256k1_bulletproof_inner_product_prove_impl(const secp256k1_ecmult_context *ecmult_ctx, secp256k1_scratch *scratch, unsigned char *proof, size_t *proof_len, const secp256k1_bulletproof_generators *gens, const secp256k1_scalar *yinv, const size_t n, secp256k1_ecmult_multi_callback *cb, void *cb_data, const unsigned char *commit_inp)
#define SECP256K1_BULLETPROOF_MAX_PROOF
Definition core.h:16
int secp256k1_scratch_allocate_frame(secp256k1_scratch *scratch, size_t n, size_t objects)
secp256k1_callback secp256k1_ecmult_context
Definition core.h:18
static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, size_t idx)
Definition core.h:181
void secp256k1_scratch_deallocate_frame(secp256k1_scratch *scratch)
#define secp256k1_scratch_alloc(scratch, size)
Definition core.h:26
FieldElement rho
Definition bppp.cpp:468
Nonce nonce
Definition bppp.cpp:120
const secp256k1_bulletproof_pf_compressed_circuit * comp_circ
const secp256k1_bulletproof_circuit_assignment * assn
const secp256k1_bulletproof_vfy_compressed_circuit * comp_circ
secp256k1_ge * blinding_gen
Definition core.h:72
secp256k1_bulletproof_vfy_callback * rangeproof_cb
static void secp256k1_bulletproof_update_commit_circuit(unsigned char *commit, const secp256k1_bulletproof_circuit *circ)
Definition util.h:276
static SECP256K1_INLINE int secp256k1_bulletproof_deserialize_point(secp256k1_ge *pt, const unsigned char *data, size_t i, size_t n)
Definition util.h:178
static void secp256k1_bulletproof_update_commit_n(unsigned char *commit, const secp256k1_ge *pt, size_t n)
Definition util.h:216
static void secp256k1_bulletproof_vector_commit(secp256k1_gej *r, const secp256k1_scalar *s, const secp256k1_ge *gen, size_t n, const secp256k1_scalar *blind, const secp256k1_ge *g)
Definition util.h:308
static SECP256K1_INLINE void secp256k1_bulletproof_serialize_points(unsigned char *out, secp256k1_ge *pt, size_t n)
Definition util.h:158