purify
C++ Purify implementation with native circuit and BPP support
Loading...
Searching...
No Matches
circuit_compress_impl.h
Go to the documentation of this file.
1/**********************************************************************
2 * Copyright (c) 2018 Andrew Poelstra *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5 **********************************************************************/
6
7#ifndef SECP256K1_MODULE_BULLETPROOF_CIRCUIT_COMPRESS_IMPL
8#define SECP256K1_MODULE_BULLETPROOF_CIRCUIT_COMPRESS_IMPL
9
11
12typedef struct {
13 secp256k1_scalar c_sum;
14 secp256k1_scalar *wl_wo; /* y^-n . (x * WL + WO), gates-many */
15 secp256k1_scalar *wr; /* y^-n . WR, gates-many */
16 secp256k1_scalar *wv; /* WV, commits-many */
17 secp256k1_scalar *zn; /* z^n, constraints-many */
19
20/* l and r coefficients */
21typedef struct {
22 /* l0 = 0*/
23 secp256k1_scalar *l1;
24 /* l2 = assn->ao */
25 secp256k1_scalar *l3;
26 secp256k1_scalar *r0;
27 secp256k1_scalar *r1;
28 /* r2 = 0 */
29 secp256k1_scalar *r3;
30 secp256k1_scalar *wv; /* WV, commits-many */
31 secp256k1_scalar *zn; /* z^n, constraints + 2-many */
33
34static void secp256k1_fast_scalar_mul(secp256k1_scalar *r, const secp256k1_fast_scalar *a, const secp256k1_scalar *b) {
35 switch (a->special) {
36 case -2:
37 secp256k1_scalar_add(r, b, b);
38 secp256k1_scalar_negate(r, r);
39 break;
40 case -1:
41 secp256k1_scalar_negate(r, b);
42 break;
43 case 0:
44 secp256k1_scalar_clear(r);
45 break;
46 case 1:
47 *r = *b;
48 break;
49 case 2:
50 secp256k1_scalar_add(r, b, b);
51 break;
52 default:
53 secp256k1_scalar_mul(r, &a->scal, b);
54 break;
55 }
56#ifdef VERIFY
57{
58 secp256k1_scalar mul;
59 secp256k1_scalar_mul(&mul, &a->scal, b);
60 CHECK(secp256k1_scalar_eq(&mul, r));
61}
62#endif
63}
64
65static void secp256k1_wmatrix_row_compress(secp256k1_scalar *r, const secp256k1_bulletproof_wmatrix_row *row, const secp256k1_scalar *zn) {
66 size_t j;
67 secp256k1_scalar_clear(r);
68 for (j = 0; j < row->size; j++) {
69 secp256k1_scalar term;
70 secp256k1_fast_scalar_mul(&term, &row->entry[j].scal, &zn[row->entry[j].idx]);
71 secp256k1_scalar_add(r, r, &term);
72 }
73}
74
75int secp256k1_bulletproof_vfy_compressed_circuit_allocate_frame(secp256k1_scratch *scratch, const secp256k1_bulletproof_circuit *circ, size_t n_proofs) {
77 scratch,
78 n_proofs * (sizeof(secp256k1_bulletproof_vfy_compressed_circuit) + (2 * circ->n_gates + circ->n_constraints + circ->n_commits) * sizeof(secp256k1_scalar)),
79 n_proofs * 2
80 );
81}
82
83secp256k1_bulletproof_vfy_compressed_circuit *secp256k1_bulletproof_vfy_compress_circuit(secp256k1_scratch *scratch, const secp256k1_bulletproof_circuit *circ, const secp256k1_scalar *x, const secp256k1_scalar *y, const secp256k1_scalar *yinv, const secp256k1_scalar *z) {
85 secp256k1_scalar *ss = (secp256k1_scalar *)secp256k1_scratch_alloc(scratch, (2 * circ->n_gates + circ->n_commits + circ->n_constraints) * sizeof(*ss));
86 secp256k1_scalar yinvn, zyn;
87 secp256k1_scalar zsqr;
88 secp256k1_scalar tmp;
89 size_t i;
90
91 ret->wl_wo = &ss[0 * circ->n_gates];
92 ret->wr = &ss[1 * circ->n_gates];
93 ret->wv = &ss[2 * circ->n_gates];
94 /* Some circuits only use implicit bit constraints. In that case there is
95 * no z^n vector to materialize, so keep zn absent instead of touching zn[0].
96 */
97 ret->zn = circ->n_constraints == 0 ? NULL : &ss[2 * circ->n_gates + circ->n_commits];
98
99 secp256k1_scalar_sqr(&zsqr, z); /* z^1 and z^2 are reserved for bits */
100 if (circ->n_constraints != 0) {
101 secp256k1_scalar_mul(&ret->zn[0], &zsqr, z);
102 }
103 for (i = 1; i < circ->n_constraints; i++) {
104 secp256k1_scalar_mul(&ret->zn[i], &ret->zn[i - 1], z);
105 }
106
107 zyn = *z;
108 secp256k1_scalar_set_int(&yinvn, 1);
109 secp256k1_scalar_clear(&ret->c_sum);
110
111 for (i = 0; i < circ->n_gates; i++) {
112 secp256k1_scalar wl;
113 secp256k1_wmatrix_row_compress(&wl, &circ->wl[i], ret->zn);
114
115 /* For bits only WL has constraints beyond the bit-constraints */
116 if (i < circ->n_bits) {
117 secp256k1_scalar_negate(&ret->wr[i], z); /* set WR */
118
119 secp256k1_scalar_mul(&wl, &wl, &yinvn);
120 secp256k1_scalar_add(&wl, &wl, z); /* add bit-constraint to WL */
121
122 secp256k1_scalar_mul(&ret->wl_wo[i], &wl, x); /* WO becomes WL*x */
123 secp256k1_scalar_add(&ret->wl_wo[i], &ret->wl_wo[i], &zsqr); /* set WLx + WO */
124
125 /* Multiply WL by WR and add to the c sum */
126 secp256k1_scalar_mul(&wl, &wl, &zyn);
127 secp256k1_scalar_negate(&wl, &wl);
128
129 secp256k1_scalar_add(&ret->c_sum, &ret->c_sum, &wl);
130 secp256k1_scalar_add(&ret->c_sum, &ret->c_sum, &zyn);
131 secp256k1_scalar_mul(&zyn, &zyn, y);
132 } else {
133 secp256k1_wmatrix_row_compress(&ret->wr[i], &circ->wr[i], ret->zn);
134 secp256k1_wmatrix_row_compress(&ret->wl_wo[i], &circ->wo[i], ret->zn);
135
136 secp256k1_scalar_mul(&tmp, &wl, x);
137 secp256k1_scalar_add(&tmp, &tmp, &ret->wl_wo[i]);
138 secp256k1_scalar_mul(&ret->wl_wo[i], &tmp, &yinvn);
139
140 secp256k1_scalar_mul(&ret->wr[i], &ret->wr[i], &yinvn);
141
142 secp256k1_scalar_mul(&tmp, &wl, &ret->wr[i]);
143 secp256k1_scalar_add(&ret->c_sum, &ret->c_sum, &tmp);
144 }
145
146 secp256k1_scalar_mul(&yinvn, &yinvn, yinv);
147 }
148
149 secp256k1_scalar_sqr(&tmp, x);
150 for (i = 0; i < circ->n_commits; i++) {
151 secp256k1_wmatrix_row_compress(&ret->wv[i], &circ->wv[i], ret->zn);
152 secp256k1_scalar_mul(&ret->wv[i], &ret->wv[i], &tmp);
153 }
154
155 for (i = 0; i < circ->n_constraints; i++) {
156 secp256k1_scalar term;
157 secp256k1_fast_scalar_mul(&term, &circ->c[i], &ret->zn[i]);
158 secp256k1_scalar_add(&ret->c_sum, &ret->c_sum, &term);
159 }
160 secp256k1_scalar_mul(&ret->c_sum, &ret->c_sum, &tmp);
161
162 return ret;
163}
164
167 scratch,
168 (sizeof(secp256k1_bulletproof_pf_compressed_circuit) + (5 * circ->n_gates + circ->n_constraints + circ->n_commits) * sizeof(secp256k1_scalar)),
169 2
170 );
171 return ret;
172}
173
176 secp256k1_scalar *ss = (secp256k1_scalar *)secp256k1_scratch_alloc(scratch, (5 * circ->n_gates + circ->n_commits + circ->n_constraints) * sizeof(*ss));
177 size_t i;
178
179 VERIFY_CHECK(ret != NULL);
180 VERIFY_CHECK(ss != NULL);
181
182 ret->l1 = &ss[0 * circ->n_gates];
183 ret->l3 = &ss[1 * circ->n_gates];
184 ret->r0 = &ss[2 * circ->n_gates];
185 ret->r1 = &ss[3 * circ->n_gates];
186 ret->r3 = &ss[4 * circ->n_gates];
187 ret->wv = &ss[5 * circ->n_gates];
188 /* Match verifier-side handling for circuits with no explicit constraint rows. */
189 ret->zn = circ->n_constraints == 0 ? NULL : &ss[5 * circ->n_gates + circ->n_commits];
190
191 for (i = 0; i < circ->n_gates; i++) {
192 secp256k1_scalar_chacha20(&ret->l3[i], &ret->r3[i], nonce, 4 + i);
193 }
194
195 return ret;
196}
197
198void secp256k1_bulletproof_pf_compress_circuit(secp256k1_bulletproof_pf_compressed_circuit *ret, const secp256k1_bulletproof_circuit *circ, const secp256k1_bulletproof_circuit_assignment *assn, const secp256k1_scalar *y, const secp256k1_scalar *yinv, const secp256k1_scalar *z) {
199 secp256k1_scalar yinvn, yn;
200 secp256k1_scalar zsqr;
201 size_t i;
202
203 secp256k1_scalar_sqr(&zsqr, z); /* z^1 and z^2 are reserved for bits */
204 if (circ->n_constraints != 0) {
205 secp256k1_scalar_mul(&ret->zn[0], &zsqr, z);
206 }
207 for (i = 1; i < circ->n_constraints; i++) {
208 secp256k1_scalar_mul(&ret->zn[i], &ret->zn[i - 1], z);
209 }
210
211 secp256k1_scalar_set_int(&yinvn, 1);
212 secp256k1_scalar_set_int(&yn, 1);
213
214 for (i = 0; i < circ->n_gates; i++) {
215 secp256k1_scalar wl, wr, wo;
216
217 secp256k1_wmatrix_row_compress(&wl, &circ->wl[i], ret->zn);
218 secp256k1_wmatrix_row_compress(&wr, &circ->wr[i], ret->zn);
219 secp256k1_wmatrix_row_compress(&wo, &circ->wo[i], ret->zn);
220
221 /* Add bit constraints to sums, in the randomized form
222 * y^i*z*(Li - Ri - 1) + y^i*z^2*Oi = 0 */
223 if (i < circ->n_bits) {
224 secp256k1_scalar tmp;
225
226 secp256k1_scalar_negate(&tmp, z);
227 secp256k1_scalar_add(&wr, &wr, &tmp);
228
229 secp256k1_scalar_mul(&tmp, &yn, z);
230 secp256k1_scalar_add(&wl, &wl, &tmp);
231
232 secp256k1_scalar_mul(&tmp, &tmp, z);
233 secp256k1_scalar_add(&wo, &wo, &tmp);
234 } else {
235 secp256k1_scalar_mul(&wr, &wr, &yinvn);
236 }
237
238 /* Compute l3 and r3 */
239 secp256k1_scalar_mul(&ret->r3[i], &ret->r3[i], &yn);
240 /* Compute l1 */
241 if (i < assn->n_gates) {
242 secp256k1_scalar_add(&ret->l1[i], &wr, &assn->al[i]);
243 }
244 /* Compute r0 */
245 secp256k1_scalar_negate(&ret->r0[i], &yn);
246 secp256k1_scalar_add(&ret->r0[i], &ret->r0[i], &wo);
247 /* Compute r1 */
248 if (i < assn->n_gates) {
249 secp256k1_scalar_mul(&ret->r1[i], &assn->ar[i], &yn);
250 secp256k1_scalar_add(&ret->r1[i], &ret->r1[i], &wl);
251 } else {
252 ret->r1[i] = wl;
253 }
254
255 secp256k1_scalar_mul(&yn, &yn, y);
256 secp256k1_scalar_mul(&yinvn, &yinvn, yinv);
257 }
258
259 for (i = 0; i < circ->n_commits; i++) {
260 secp256k1_wmatrix_row_compress(&ret->wv[i], &circ->wv[i], ret->zn);
261 }
262}
263
264#endif
int secp256k1_bulletproof_vfy_compressed_circuit_allocate_frame(secp256k1_scratch *scratch, const secp256k1_bulletproof_circuit *circ, size_t n_proofs)
void secp256k1_bulletproof_pf_compress_circuit(secp256k1_bulletproof_pf_compressed_circuit *ret, const secp256k1_bulletproof_circuit *circ, const secp256k1_bulletproof_circuit_assignment *assn, const secp256k1_scalar *y, const secp256k1_scalar *yinv, const secp256k1_scalar *z)
static void secp256k1_fast_scalar_mul(secp256k1_scalar *r, const secp256k1_fast_scalar *a, const secp256k1_scalar *b)
int secp256k1_bulletproof_pf_compressed_circuit_allocate_frame(secp256k1_scratch *scratch, const secp256k1_bulletproof_circuit *circ)
static void secp256k1_wmatrix_row_compress(secp256k1_scalar *r, const secp256k1_bulletproof_wmatrix_row *row, const secp256k1_scalar *zn)
secp256k1_bulletproof_pf_compressed_circuit * secp256k1_bulletproof_pf_slsr(secp256k1_scratch *scratch, const secp256k1_bulletproof_circuit *circ, const unsigned char *nonce)
secp256k1_bulletproof_vfy_compressed_circuit * secp256k1_bulletproof_vfy_compress_circuit(secp256k1_scratch *scratch, const secp256k1_bulletproof_circuit *circ, const secp256k1_scalar *x, const secp256k1_scalar *y, const secp256k1_scalar *yinv, const secp256k1_scalar *z)
int secp256k1_scratch_allocate_frame(secp256k1_scratch *scratch, size_t n, size_t objects)
static void secp256k1_scalar_chacha20(secp256k1_scalar *r1, secp256k1_scalar *r2, const unsigned char *seed, size_t idx)
Definition core.h:181
#define secp256k1_scratch_alloc(scratch, size)
Definition core.h:26
Nonce nonce
Definition bppp.cpp:120
secp256k1_bulletproof_wmatrix_row * wl
Definition core.h:52
secp256k1_bulletproof_wmatrix_row * wv
Definition core.h:55
secp256k1_bulletproof_wmatrix_row * wo
Definition core.h:54
secp256k1_bulletproof_wmatrix_row * wr
Definition core.h:53
secp256k1_fast_scalar * c
Definition core.h:56
secp256k1_fast_scalar scal
Definition core.h:39
size_t idx
Definition core.h:38
secp256k1_bulletproof_wmatrix_entry * entry
Definition core.h:44
secp256k1_scalar scal
Definition core.h:30