From 03a1fb963bcbf137ef426396419d49007b0e7d59 Mon Sep 17 00:00:00 2001 From: adiabat Date: Fri, 13 Nov 2020 22:15:01 -0500 Subject: [PATCH 01/28] slim down tests --- accumulator/batchproof_test.go | 49 +++------------------------------- accumulator/forest_test.go | 36 +++++++++++-------------- accumulator/pollard_test.go | 26 +++++++++--------- accumulator/undo_test.go | 2 +- 4 files changed, 34 insertions(+), 79 deletions(-) diff --git a/accumulator/batchproof_test.go b/accumulator/batchproof_test.go index 660f2ecf..8d7fc739 100644 --- a/accumulator/batchproof_test.go +++ b/accumulator/batchproof_test.go @@ -1,51 +1,9 @@ package accumulator -import ( - "fmt" - "testing" -) - -// TestIncompleteBatchProof tests that a incomplete (missing some hashes) batchproof does not pass verification. -func TestIncompleteBatchProof(t *testing.T) { - // Create forest in memory - f := NewForest(nil, false, "", 0) - - // last index to be deleted. Same as blockDels - lastIdx := uint64(7) - - // Generate adds - adds := make([]Leaf, 8) - adds[0].Hash = Hash{1} - adds[1].Hash = Hash{2} - adds[2].Hash = Hash{3} - adds[3].Hash = Hash{4} - adds[4].Hash = Hash{5} - adds[5].Hash = Hash{6} - adds[6].Hash = Hash{7} - adds[7].Hash = Hash{8} - - // Modify with the additions to simulate txos being added - _, err := f.Modify(adds, nil) - if err != nil { - t.Fatal(err) - } - - // create blockProof based on the last add in the slice - blockProof, err := f.ProveBatch( - []Hash{adds[lastIdx].Hash}) - - if err != nil { - t.Fatal(err) - } - - blockProof.Proof = blockProof.Proof[:len(blockProof.Proof)-1] - shouldBeFalse := f.VerifyBatchProof(blockProof) - if shouldBeFalse != false { - t.Fail() - t.Logf("Incomplete proof passes verification") - } -} +// forests don't accept or verify proofs since they have everything. +// TODO rewrite these tests using pollard instead of forest +/* // TestVerifyBlockProof tests that the computedTop is compared to the top in the // Utreexo forest. func TestVerifyBatchProof(t *testing.T) { @@ -147,3 +105,4 @@ func TestProofShouldNotValidateAfterNodeDeleted(t *testing.T) { proofIndex)) } } +*/ diff --git a/accumulator/forest_test.go b/accumulator/forest_test.go index cfd0eeb4..db2aba2d 100644 --- a/accumulator/forest_test.go +++ b/accumulator/forest_test.go @@ -52,7 +52,7 @@ func TestForestAddDel(t *testing.T) { } func TestCowForestAddDelComp(t *testing.T) { - numAdds := uint32(1000) + numAdds := uint32(100) tmpDir := os.TempDir() cowF := NewForest(nil, false, tmpDir, 500) @@ -61,7 +61,7 @@ func TestCowForestAddDelComp(t *testing.T) { sc := NewSimChain(0x07) sc.lookahead = 400 - for b := 0; b <= 1000; b++ { + for b := 0; b <= 100; b++ { adds, _, delHashes := sc.NextBlock(numAdds) cowBP, err := cowF.ProveBatch(delHashes) @@ -89,7 +89,8 @@ func TestCowForestAddDelComp(t *testing.T) { if err != nil { panic(err) } - cowstring := fmt.Sprintf("nl %d %s\n", cowF.numLeaves, cowF.ToString()) + cowstring := + fmt.Sprintf("nl %d %s\n", cowF.numLeaves, cowF.ToString()) cowFile.WriteString(cowstring) memFile, err := os.OpenFile("memlog", @@ -98,7 +99,8 @@ func TestCowForestAddDelComp(t *testing.T) { panic(err) } - memstring := fmt.Sprintf("nl %d %s\n", memF.numLeaves, memF.ToString()) + memstring := + fmt.Sprintf("nl %d %s\n", memF.numLeaves, memF.ToString()) memFile.WriteString(memstring) s := fmt.Sprintf("forests are not equal\n") s += fmt.Sprintf("forestRows in f: %d\n: ", cowF.rows) @@ -134,8 +136,8 @@ func TestCowForestAddDelComp(t *testing.T) { } } -// checkIfEqual checks if the forest differ returns true for equal and if not, returns -// the positions and the hashes +// checkIfEqual checks if the forest differ returns true for equal and if not, +// returns the positions and the hashes func checkIfEqual(cowF, memF *Forest) (bool, []uint64, []Hash) { cowFH := cowF.rows memFH := memF.rows @@ -159,7 +161,8 @@ func checkIfEqual(cowF, memF *Forest) (bool, []uint64, []Hash) { memH := memF.data.read(uint64(pos)) cowH := cowF.data.read(uint64(pos)) if memH != cowH { - s := fmt.Sprintf("hashes aren't equal at gpos: %d "+"mem: %x cow: %x\n", pos, memH, cowH) + s := fmt.Sprintf("hashes aren't equal at gpos: %d "+ + "mem: %x cow: %x\n", pos, memH, cowH) panic(s) } } @@ -193,7 +196,7 @@ func TestCowForestAddDel(t *testing.T) { t.Fatal(err) } - fmt.Printf("nl %d %s\n", cowF.numLeaves, cowF.ToString()) + // fmt.Printf("nl %d %s\n", cowF.numLeaves, cowF.ToString()) } } @@ -328,18 +331,13 @@ func addDelFullBatchProof(nAdds, nDels int) error { } // get block proof - bp, err := f.ProveBatch(addHashes[:nDels]) + _, err = f.ProveBatch(addHashes[:nDels]) if err != nil { return err } - // check block proof. Note this doesn't delete anything, just proves inclusion - worked, _, _ := verifyBatchProof(bp, f.getRoots(), f.numLeaves, nil) - // worked := f.VerifyBatchProof(bp) - if !worked { - return fmt.Errorf("VerifyBatchProof failed") - } - fmt.Printf("VerifyBatchProof worked\n") + // TODO: make a pollard so that the proof can be verified + return nil } @@ -434,7 +432,7 @@ func TestSmallRandomForests(t *testing.T) { // If the tree we filled isn't empty, and contains a node we didn't delete, // we should be able to make a proof for that leaf if atLeastOneLeafRemains { - blockProof, err := f.ProveBatch( + _, err := f.ProveBatch( []Hash{ chosenUndeletedLeaf.Hash, }) @@ -442,9 +440,7 @@ func TestSmallRandomForests(t *testing.T) { t.Fatalf("proveblock failed proving existing leaf: %v", err) } - if !(f.VerifyBatchProof(blockProof)) { - t.Fatal("verifyblockproof failed verifying proof for existing leaf") - } + // TODO verify created proof } } } diff --git a/accumulator/pollard_test.go b/accumulator/pollard_test.go index 8d3fd9c8..b7444e85 100644 --- a/accumulator/pollard_test.go +++ b/accumulator/pollard_test.go @@ -7,7 +7,7 @@ import ( ) func TestPollardRand(t *testing.T) { - for z := 0; z < 30; z++ { + for z := 0; z < 10; z++ { // z := 11221 // z := 55 rand.Seed(int64(z)) @@ -78,8 +78,8 @@ func pollardRandomRemember(blocks int32) error { for b := int32(0); b < blocks; b++ { adds, _, delHashes := sn.NextBlock(rand.Uint32() & 0xff) - fmt.Printf("\t\t\tstart block %d del %d add %d - %s\n", - sn.blockHeight, len(delHashes), len(adds), p.Stats()) + // fmt.Printf("\t\t\tstart block %d del %d add %d - %s\n", + // sn.blockHeight, len(delHashes), len(adds), p.Stats()) // get proof for these deletions (with respect to prev block) bp, err := f.ProveBatch(delHashes) @@ -91,7 +91,7 @@ func pollardRandomRemember(blocks int32) error { if err != nil { return err } - fmt.Printf("del %v\n", bp.Targets) + // fmt.Printf("del %v\n", bp.Targets) // apply adds and deletes to the bridge node (could do this whenever) _, err = f.Modify(adds, bp.Targets) @@ -124,9 +124,9 @@ func pollardRandomRemember(blocks int32) error { return err } - fmt.Printf("pol postadd %s", p.ToString()) + // fmt.Printf("pol postadd %s", p.ToString()) - fmt.Printf("frs postadd %s", f.ToString()) + // fmt.Printf("frs postadd %s", f.ToString()) // check all leaves match if !p.equalToForestIfThere(f) { @@ -141,15 +141,15 @@ func pollardRandomRemember(blocks int32) error { return fmt.Errorf("block %d full %d tops, pol %d tops", sn.blockHeight, len(fullTops), len(polTops)) } - fmt.Printf("top matching: ") + // fmt.Printf("top matching: ") for i, ft := range fullTops { - fmt.Printf("f %04x p %04x ", ft[:4], polTops[i][:4]) + // fmt.Printf("f %04x p %04x ", ft[:4], polTops[i][:4]) if ft != polTops[i] { return fmt.Errorf("block %d top %d mismatch, full %x pol %x", sn.blockHeight, i, ft[:4], polTops[i][:4]) } } - fmt.Printf("\n") + // fmt.Printf("\n") } return nil @@ -187,7 +187,7 @@ func fixedPollard(leaves int32) error { if err != nil { return err } - fmt.Printf("forest post del %s", f.ToString()) + // fmt.Printf("forest post del %s", f.ToString()) var p Pollard @@ -196,7 +196,7 @@ func fixedPollard(leaves int32) error { return err } - fmt.Printf("pollard post add %s", p.ToString()) + // fmt.Printf("pollard post add %s", p.ToString()) err = p.rem2(dels) if err != nil { @@ -207,9 +207,9 @@ func fixedPollard(leaves int32) error { if err != nil { return err } - fmt.Printf("forest post del %s", f.ToString()) + // fmt.Printf("forest post del %s", f.ToString()) - fmt.Printf("pollard post del %s", p.ToString()) + // fmt.Printf("pollard post del %s", p.ToString()) if !p.equalToForest(f) { return fmt.Errorf("p != f (leaves)") diff --git a/accumulator/undo_test.go b/accumulator/undo_test.go index 994d2ab5..043059fa 100644 --- a/accumulator/undo_test.go +++ b/accumulator/undo_test.go @@ -18,7 +18,7 @@ func TestUndoFixed(t *testing.T) { func TestUndoRandom(t *testing.T) { - for z := int64(0); z < 100; z++ { + for z := int64(0); z < 5; z++ { // z := int64(11) rand.Seed(z) err := undoOnceRandom(20) From 8b90d1524aa321ae7da3f6b467cf177020f9a57f Mon Sep 17 00:00:00 2001 From: adiabat Date: Fri, 13 Nov 2020 22:48:05 -0500 Subject: [PATCH 02/28] replace [3]node with miniTree same thing, just a little easier to read --- accumulator/batchproof.go | 11 ++++++++--- accumulator/pollardproof.go | 17 ++++++++++------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index 5e914707..d7b9d639 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -14,6 +14,10 @@ type BatchProof struct { // the position of the hashes is implied / computable from the leaf positions } +type miniTree struct { + l, r, parent node // left, right, parent +} + /* Batchproof serialization is: 4bytes numTargets @@ -142,7 +146,7 @@ func verifyBatchProof(bp BatchProof, roots []Hash, numLeaves uint64, // cached should be a function that fetches nodes from the pollard and // indicates whether they exist or not, this is only useful for the pollard // and nil should be passed for the forest. - cached func(pos uint64) (bool, Hash)) (bool, [][3]node, []node) { + cached func(pos uint64) (bool, Hash)) (bool, []miniTree, []node) { if len(bp.Targets) == 0 { return true, nil, nil } @@ -176,7 +180,7 @@ func verifyBatchProof(bp BatchProof, roots []Hash, numLeaves uint64, // is the right child. // trees holds the entire proof tree of the batchproof in this way, // sorted by the tuple[0]. - trees := make([][3]node, 0, len(computablePositions)) + trees := make([]miniTree, 0, len(computablePositions)) // initialise the targetNodes for row 0. // TODO: this would be more straight forward if bp.Proofs wouldn't // contain the targets @@ -264,7 +268,8 @@ func verifyBatchProof(bp BatchProof, roots []Hash, numLeaves uint64, return false, nil, nil } - trees = append(trees, [3]node{{Val: hash, Pos: parentPos}, left, right}) + trees = append(trees, + miniTree{parent: node{Val: hash, Pos: parentPos}, l: left, r: right}) row := detectRow(parentPos, rows) if numLeaves&(1< 0 && parentPos == rootPosition(numLeaves, row, rows) { diff --git a/accumulator/pollardproof.go b/accumulator/pollardproof.go index da867cff..65af3fd5 100644 --- a/accumulator/pollardproof.go +++ b/accumulator/pollardproof.go @@ -36,8 +36,10 @@ func (p *Pollard) IngestBatchProof(bp BatchProof) error { i++ } // populate the pollard - nodesAllocated += p.populate(p.roots[len(p.roots)-i-1], root.Pos, - trees, polNodes[nodesAllocated:]) + nodesAllocated += p.populate( + p.roots[len(p.roots)-i-1], + root.Pos, trees, polNodes[nodesAllocated:]) + } return nil @@ -45,10 +47,11 @@ func (p *Pollard) IngestBatchProof(bp BatchProof) error { // populate takes a root and populates it with the nodes of the paritial proof tree that was computed // in `verifyBatchProof`. -func (p *Pollard) populate(root *polNode, pos uint64, trees [][3]node, polNodes []polNode) int { +func (p *Pollard) populate( + root *polNode, pos uint64, trees []miniTree, polNodes []polNode) int { // a stack to traverse the pollard type stackElem struct { - trees [][3]node + trees []miniTree node *polNode pos uint64 } @@ -71,7 +74,7 @@ func (p *Pollard) populate(root *polNode, pos uint64, trees [][3]node, polNodes i := len(elem.trees) - 1 find_nodes: for ; i >= 0; i-- { - switch elem.trees[i][0].Pos { + switch elem.trees[i].parent.Pos { case elem.pos: fallthrough case rightChild: @@ -80,7 +83,7 @@ func (p *Pollard) populate(root *polNode, pos uint64, trees [][3]node, polNodes nodesAllocated++ } right = elem.node.niece[0] - right.data = elem.trees[i][1].Val + right.data = elem.trees[i].l.Val fallthrough case leftChild: if elem.node.niece[1] == nil { @@ -88,7 +91,7 @@ func (p *Pollard) populate(root *polNode, pos uint64, trees [][3]node, polNodes nodesAllocated++ } left = elem.node.niece[1] - left.data = elem.trees[i][2].Val + left.data = elem.trees[i].r.Val break find_nodes } } From 9b9db1151f641b388b2d58e63aadef606e81cbd5 Mon Sep 17 00:00:00 2001 From: adiabat Date: Fri, 13 Nov 2020 22:51:32 -0500 Subject: [PATCH 03/28] make verifyBatchProof() a method on Pollard --- accumulator/batchproof.go | 13 +++++++------ accumulator/forestproofs.go | 6 ------ accumulator/pollardproof.go | 2 +- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index d7b9d639..18f8a731 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -142,7 +142,7 @@ func (bp *BatchProof) ToString() string { // Takes a BatchProof, the accumulator roots, and the number of leaves in the forest. // Returns wether or not the proof verified correctly, the partial proof tree, // and the subset of roots that was computed. -func verifyBatchProof(bp BatchProof, roots []Hash, numLeaves uint64, +func (p *Pollard) verifyBatchProof(bp BatchProof, // cached should be a function that fetches nodes from the pollard and // indicates whether they exist or not, this is only useful for the pollard // and nil should be passed for the forest. @@ -150,7 +150,7 @@ func verifyBatchProof(bp BatchProof, roots []Hash, numLeaves uint64, if len(bp.Targets) == 0 { return true, nil, nil } - + roots := p.rootHashesReverse() // copy targets to leave them in original order targets := make([]uint64, len(bp.Targets)) copy(targets, bp.Targets) @@ -160,9 +160,9 @@ func verifyBatchProof(bp BatchProof, roots []Hash, numLeaves uint64, cached = func(_ uint64) (bool, Hash) { return false, empty } } - rows := treeRows(numLeaves) + rows := treeRows(p.numLeaves) proofPositions, computablePositions := - ProofPositions(targets, numLeaves, rows) + ProofPositions(targets, p.numLeaves, rows) // The proof should have as many hashes as there are proof positions. if len(proofPositions)+len(bp.Targets) != len(bp.Proof) { @@ -190,7 +190,7 @@ func verifyBatchProof(bp BatchProof, roots []Hash, numLeaves uint64, // check if the target is the row 0 root. // this is the case if its the last leaf (pos==numLeaves-1) // AND the tree has a root at row 0 (numLeaves&1==1) - if targets[0] == numLeaves-1 && numLeaves&1 == 1 { + if targets[0] == p.numLeaves-1 && p.numLeaves&1 == 1 { // target is the row 0 root, append it to the root candidates. rootCandidates = append(rootCandidates, node{Val: roots[0], Pos: targets[0]}) @@ -272,7 +272,8 @@ func verifyBatchProof(bp BatchProof, roots []Hash, numLeaves uint64, miniTree{parent: node{Val: hash, Pos: parentPos}, l: left, r: right}) row := detectRow(parentPos, rows) - if numLeaves&(1< 0 && parentPos == rootPosition(numLeaves, row, rows) { + if p.numLeaves&(1< 0 && parentPos == + rootPosition(p.numLeaves, row, rows) { // the parent is a root -> store as candidate, to check against // actual roots later. rootCandidates = append(rootCandidates, node{Val: hash, Pos: parentPos}) diff --git a/accumulator/forestproofs.go b/accumulator/forestproofs.go index a087e68f..c58e7cc8 100644 --- a/accumulator/forestproofs.go +++ b/accumulator/forestproofs.go @@ -192,9 +192,3 @@ func (f *Forest) ProveBatch(hs []Hash) (BatchProof, error) { f.TimeInProve += donetime.Sub(starttime) return bp, nil } - -// VerifyBatchProof : -func (f *Forest) VerifyBatchProof(bp BatchProof) bool { - ok, _, _ := verifyBatchProof(bp, f.getRoots(), f.numLeaves, nil) - return ok -} diff --git a/accumulator/pollardproof.go b/accumulator/pollardproof.go index 65af3fd5..5b53613b 100644 --- a/accumulator/pollardproof.go +++ b/accumulator/pollardproof.go @@ -9,7 +9,7 @@ import ( func (p *Pollard) IngestBatchProof(bp BatchProof) error { // verify the batch proof. rootHashes := p.rootHashesReverse() - ok, trees, roots := verifyBatchProof(bp, rootHashes, p.numLeaves, + ok, trees, roots := p.verifyBatchProof(bp, // pass a closure that checks the pollard for cached nodes. // returns true and the hash value of the node if it exists. // returns false if the node does not exist or the hash value is empty. From 9861d9fe444297924e779b00746bead9c8938cf0 Mon Sep 17 00:00:00 2001 From: adiabat Date: Fri, 13 Nov 2020 23:01:50 -0500 Subject: [PATCH 04/28] have verifyBatchProof() do it's own reading from the pollard, instead of giving it a function to read locations simpler, and we don't have multiple callers of verifyBatchProof() --- accumulator/batchproof.go | 26 +++++++++++--------------- accumulator/pollardproof.go | 16 +--------------- 2 files changed, 12 insertions(+), 30 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index 18f8a731..7ed10f44 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -142,24 +142,17 @@ func (bp *BatchProof) ToString() string { // Takes a BatchProof, the accumulator roots, and the number of leaves in the forest. // Returns wether or not the proof verified correctly, the partial proof tree, // and the subset of roots that was computed. -func (p *Pollard) verifyBatchProof(bp BatchProof, - // cached should be a function that fetches nodes from the pollard and - // indicates whether they exist or not, this is only useful for the pollard - // and nil should be passed for the forest. - cached func(pos uint64) (bool, Hash)) (bool, []miniTree, []node) { +func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { + if len(bp.Targets) == 0 { return true, nil, nil } - roots := p.rootHashesReverse() + rootHashes := p.rootHashesReverse() // copy targets to leave them in original order targets := make([]uint64, len(bp.Targets)) copy(targets, bp.Targets) sortUint64s(targets) - if cached == nil { - cached = func(_ uint64) (bool, Hash) { return false, empty } - } - rows := treeRows(p.numLeaves) proofPositions, computablePositions := ProofPositions(targets, p.numLeaves, rows) @@ -174,7 +167,7 @@ func (p *Pollard) verifyBatchProof(bp BatchProof, // rootCandidates holds the roots that where computed, and have to be // compared to the actual roots at the end. targetNodes := make([]node, 0, len(targets)*int(rows)) - rootCandidates := make([]node, 0, len(roots)) + rootCandidates := make([]node, 0, len(rootHashes)) // trees is a slice of 3-Tuples, each tuple represents a parent and its children. // tuple[0] is the parent, tuple[1] is the left child and tuple[2] // is the right child. @@ -193,7 +186,7 @@ func (p *Pollard) verifyBatchProof(bp BatchProof, if targets[0] == p.numLeaves-1 && p.numLeaves&1 == 1 { // target is the row 0 root, append it to the root candidates. rootCandidates = append(rootCandidates, - node{Val: roots[0], Pos: targets[0]}) + node{Val: rootHashes[0], Pos: targets[0]}) bp.Proof = bp.Proof[1:] break } @@ -261,9 +254,12 @@ func (p *Pollard) verifyBatchProof(bp BatchProof, // get the hash of the parent from the cache or compute it parentPos := parent(target.Pos, rows) - isParentCached, cachedHash := cached(parentPos) hash := parentHash(left.Val, right.Val) - if isParentCached && hash != cachedHash { + + populatedNode, _, _, err := p.readPos(parentPos) + if err != nil || + (populatedNode != nil && populatedNode.data != empty && + hash != populatedNode.data) { // The hash did not match the cached hash return false, nil, nil } @@ -291,7 +287,7 @@ func (p *Pollard) verifyBatchProof(bp BatchProof, // holds a subset of the roots // we count the roots that match in order. rootMatches := 0 - for _, root := range roots { + for _, root := range rootHashes { if len(rootCandidates) > rootMatches && root == rootCandidates[rootMatches].Val { rootMatches++ diff --git a/accumulator/pollardproof.go b/accumulator/pollardproof.go index 5b53613b..a97deeaa 100644 --- a/accumulator/pollardproof.go +++ b/accumulator/pollardproof.go @@ -9,21 +9,7 @@ import ( func (p *Pollard) IngestBatchProof(bp BatchProof) error { // verify the batch proof. rootHashes := p.rootHashesReverse() - ok, trees, roots := p.verifyBatchProof(bp, - // pass a closure that checks the pollard for cached nodes. - // returns true and the hash value of the node if it exists. - // returns false if the node does not exist or the hash value is empty. - func(pos uint64) (bool, Hash) { - n, _, _, err := p.readPos(pos) - if err != nil { - return false, empty - } - if n != nil && n.data != empty { - return true, n.data - } - - return false, empty - }) + ok, trees, roots := p.verifyBatchProof(bp) if !ok { return fmt.Errorf("block proof mismatch") } From 0e278daa8c15dbb6233ff3a80388ac960a69d1bf Mon Sep 17 00:00:00 2001 From: adiabat Date: Fri, 13 Nov 2020 23:07:36 -0500 Subject: [PATCH 05/28] slight changes to formatting and comments --- accumulator/batchproof.go | 9 +++------ accumulator/pollardproof.go | 7 ++++--- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index 7ed10f44..bd211375 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -10,7 +10,8 @@ import ( type BatchProof struct { Targets []uint64 Proof []Hash - // list of leaf locations to delete, along with a bunch of hashes that give the proof. + // list of leaf locations to delete, along with a bunch of hashes that + // give the proof. // the position of the hashes is implied / computable from the leaf positions } @@ -168,11 +169,7 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { // compared to the actual roots at the end. targetNodes := make([]node, 0, len(targets)*int(rows)) rootCandidates := make([]node, 0, len(rootHashes)) - // trees is a slice of 3-Tuples, each tuple represents a parent and its children. - // tuple[0] is the parent, tuple[1] is the left child and tuple[2] - // is the right child. - // trees holds the entire proof tree of the batchproof in this way, - // sorted by the tuple[0]. + // trees holds the entire proof tree of the batchproof, sorted by parents. trees := make([]miniTree, 0, len(computablePositions)) // initialise the targetNodes for row 0. // TODO: this would be more straight forward if bp.Proofs wouldn't diff --git a/accumulator/pollardproof.go b/accumulator/pollardproof.go index a97deeaa..f4428615 100644 --- a/accumulator/pollardproof.go +++ b/accumulator/pollardproof.go @@ -31,8 +31,8 @@ func (p *Pollard) IngestBatchProof(bp BatchProof) error { return nil } -// populate takes a root and populates it with the nodes of the paritial proof tree that was computed -// in `verifyBatchProof`. +// populate takes a root and populates it with the nodes of the paritial proof +// tree that was computed in `verifyBatchProof`. func (p *Pollard) populate( root *polNode, pos uint64, trees []miniTree, polNodes []polNode) int { // a stack to traverse the pollard @@ -86,7 +86,8 @@ func (p *Pollard) populate( } stack = append(stack, - stackElem{trees[:i], left, leftChild}, stackElem{trees[:i], right, rightChild}) + stackElem{trees[:i], left, leftChild}, + stackElem{trees[:i], right, rightChild}) } return nodesAllocated } From 3db1d2f690165844b2151926044f040c00b392de Mon Sep 17 00:00:00 2001 From: adiabat Date: Thu, 17 Dec 2020 23:39:58 -0500 Subject: [PATCH 06/28] change datadir path (specifies blocks, not bitcoin dir) --- bridgenode/config.go | 24 ++++++++++++++---------- bridgenode/genproofs.go | 8 ++++---- bridgenode/initrestore.go | 7 +++++-- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/bridgenode/config.go b/bridgenode/config.go index b1f9afb3..264553a4 100644 --- a/bridgenode/config.go +++ b/bridgenode/config.go @@ -196,12 +196,11 @@ func Parse(args []string) (*Config, error) { cfg := Config{} var dataDir string - // set dataDir if *dataDirCmd == "" { // No custom datadir given by the user dataDir = btcutil.AppDataDir("bitcoin", true) } else { - dataDir = *dataDirCmd // set dataDir to the one set by the user + cfg.blockDir = *dataDirCmd // use user sepcified as the full blockdir } var bridgeDir string @@ -216,26 +215,31 @@ func Parse(args []string) (*Config, error) { // set network if *netCmd == "testnet" { cfg.params = chaincfg.TestNet3Params - cfg.blockDir = filepath.Join( - filepath.Join(dataDir, chaincfg.TestNet3Params.Name), - "blocks") + if *dataDirCmd == "" { + cfg.blockDir = filepath.Join( + filepath.Join(dataDir, chaincfg.TestNet3Params.Name), + "blocks") + } base := filepath.Join(bridgeDir, chaincfg.TestNet3Params.Name) cfg.utreeDir = initUtreeDir(base) } else if *netCmd == "regtest" { cfg.params = chaincfg.RegressionNetParams - cfg.blockDir = filepath.Join( - filepath.Join(dataDir, chaincfg.RegressionNetParams.Name), - "blocks") + if *dataDirCmd == "" { + cfg.blockDir = filepath.Join( + filepath.Join(dataDir, chaincfg.RegressionNetParams.Name), + "blocks") + } base := filepath.Join(bridgeDir, chaincfg.RegressionNetParams.Name) cfg.utreeDir = initUtreeDir(base) } else if *netCmd == "mainnet" { cfg.params = chaincfg.MainNetParams - cfg.blockDir = filepath.Join(dataDir, "blocks") + if *dataDirCmd == "" { + cfg.blockDir = filepath.Join(dataDir, "blocks") + } cfg.utreeDir = initUtreeDir(bridgeDir) } else { return nil, errInvalidNetwork(*netCmd) } - makePaths(cfg.utreeDir) // set profiling diff --git a/bridgenode/genproofs.go b/bridgenode/genproofs.go index 4119bf93..b9fcfdc2 100644 --- a/bridgenode/genproofs.go +++ b/bridgenode/genproofs.go @@ -62,8 +62,8 @@ func BuildProofs(cfg *Config, sig chan bool) error { forest, height, knownTipHeight, err := initBridgeNodeState(cfg, offsetFinished) if err != nil { - fmt.Printf("initialization error. If your .blk and .dat files are ") - fmt.Printf("not in %s, specify alternate path with -datadir\n.", cfg.blockDir) + fmt.Printf("Can't read blocks in %s ; ", cfg.blockDir) + fmt.Printf("specify path with -datadir\n") return err } @@ -74,8 +74,8 @@ func BuildProofs(cfg *Config, sig chan bool) error { } lvdb, err := leveldb.OpenFile(cfg.utreeDir.ttldb, &o) if err != nil { - fmt.Printf("initialization error. If your .blk and .dat files are ") - fmt.Printf("not in %s, specify alternate path with -datadir\n.", cfg.blockDir) + fmt.Printf("Can't read blocks in %s ; ", cfg.blockDir) + fmt.Printf("specify path with -datadir\n") return err } defer lvdb.Close() diff --git a/bridgenode/initrestore.go b/bridgenode/initrestore.go index fcb83c9b..6b005a46 100644 --- a/bridgenode/initrestore.go +++ b/bridgenode/initrestore.go @@ -12,7 +12,8 @@ import ( // initBridgeNodeState attempts to load and initialize the chain state from the disk. // If a chain state is not present, chain is initialized to the genesis // returns forest, height, lastIndexOffsetHeight, pOffset and error -func initBridgeNodeState(cfg *Config, offsetFinished chan bool) (forest *accumulator.Forest, +func initBridgeNodeState( + cfg *Config, offsetFinished chan bool) (forest *accumulator.Forest, height int32, knownTipHeight int32, err error) { // Default behavior is that the user should delete all offsetdata @@ -24,7 +25,9 @@ func initBridgeNodeState(cfg *Config, offsetFinished chan bool) (forest *accumul // anew // Check if the offsetfiles for both rev*.dat and blk*.dat are present if util.HasAccess(cfg.utreeDir.offsetDir.offsetFile) { - knownTipHeight, err = restoreLastIndexOffsetHeight(cfg.utreeDir.offsetDir, offsetFinished) + fmt.Printf("has access to %s\n", cfg.utreeDir.offsetDir.offsetFile) + knownTipHeight, err = + restoreLastIndexOffsetHeight(cfg.utreeDir.offsetDir, offsetFinished) if err != nil { err = fmt.Errorf("restoreLastIndexOffsetHeight error: %s", err.Error()) return From bb24cfdb85ac2ce626c1b87142fa4de0d9b8b4c9 Mon Sep 17 00:00:00 2001 From: adiabat Date: Sun, 20 Dec 2020 17:43:25 -0500 Subject: [PATCH 07/28] remove some unused functions; revert proofs to still include target hashes need to clean up some other things first before removing the targets --- accumulator/batchproof.go | 1 + accumulator/forestproofs.go | 27 ++++++--------------------- go.mod | 8 +++----- go.sum | 16 ++++++++++++++++ util/types.go | 2 +- util/udata.go | 4 ++-- util/utils.go | 6 ++---- 7 files changed, 31 insertions(+), 33 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index bd211375..8b5522f1 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -174,6 +174,7 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { // initialise the targetNodes for row 0. // TODO: this would be more straight forward if bp.Proofs wouldn't // contain the targets + // bp.Proofs is now on from ProofPositions() proofHashes := make([]Hash, 0, len(proofPositions)) var targetsMatched uint64 for len(targets) > 0 { diff --git a/accumulator/forestproofs.go b/accumulator/forestproofs.go index c58e7cc8..fa884ca2 100644 --- a/accumulator/forestproofs.go +++ b/accumulator/forestproofs.go @@ -62,19 +62,6 @@ func (f *Forest) Prove(wanted Hash) (Proof, error) { return pr, nil } -// ProveMany : -func (f *Forest) ProveMany(hs []Hash) ([]Proof, error) { - var err error - proofs := make([]Proof, len(hs)) - for i, h := range hs { - proofs[i], err = f.Prove(h) - if err != nil { - return proofs, err - } - } - return proofs, err -} - // Verify checks an inclusion proof. // returns false on any errors func (f *Forest) Verify(p Proof) bool { @@ -151,7 +138,6 @@ func (f *Forest) ProveBatch(hs []Hash) (BatchProof, error) { bp.Targets = make([]uint64, len(hs)) for i, wanted := range hs { - pos, ok := f.positionMap[wanted.Mini()] if !ok { fmt.Print(f.ToString()) @@ -169,18 +155,17 @@ func (f *Forest) ProveBatch(hs []Hash) (BatchProof, error) { } bp.Targets[i] = pos } - // targets need to be sorted because the proof hashes are sorted - // NOTE that this is a big deal -- we lose in-block positional information - // because of this sorting. Does that hurt locality or performance? My - // guess is no, but that's untested. + + // Don't include targets in proof, we don't need them. + sortedTargets := make([]uint64, len(bp.Targets)) copy(sortedTargets, bp.Targets) sortUint64s(sortedTargets) proofPositions, _ := ProofPositions(sortedTargets, f.numLeaves, f.rows) - targetsAndProof := mergeSortedSlices(proofPositions, sortedTargets) - bp.Proof = make([]Hash, len(targetsAndProof)) - for i, proofPos := range targetsAndProof { + // proofPositions = mergeSortedSlices(proofPositions, sortedTargets) + bp.Proof = make([]Hash, len(proofPositions)) + for i, proofPos := range proofPositions { bp.Proof[i] = f.data.read(proofPos) } diff --git a/go.mod b/go.mod index 62c0fd93..ae4ee450 100644 --- a/go.mod +++ b/go.mod @@ -4,12 +4,10 @@ go 1.12 require ( github.com/adiabat/bech32 v0.0.0-20170505011816-6289d404861d - github.com/btcsuite/btcd v0.20.1-beta + github.com/btcsuite/btcd v0.21.0-beta github.com/btcsuite/btcutil v1.0.2 - github.com/golang/snappy v0.0.1 // indirect + github.com/golang/snappy v0.0.2 // indirect github.com/syndtr/goleveldb v1.0.0 - golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 // indirect + golang.org/x/crypto v0.0.0-20201217014255-9d1352758620 // indirect golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 // indirect ) - -replace github.com/btcsuite/btcd => github.com/rjected/btcd v0.0.0-20200718165331-907190b086ba diff --git a/go.sum b/go.sum index 72cab87b..018cfe50 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,20 @@ github.com/adiabat/bech32 v0.0.0-20170505011816-6289d404861d h1:7uzrpmQFgin7GpzfZOqRLNBJB2c2Sjb0TFOJajaPbgw= github.com/adiabat/bech32 v0.0.0-20170505011816-6289d404861d/go.mod h1:NW+G+E7qQb191ngeVCFjpvrWHIYANKkWJYxekITaulc= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v1.0.0 h1:Tvd0BfvqX9o823q1j2UZ/epQo09eJh6dTcRp79ilIN4= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/snappy-go v1.0.0 h1:ZxaA6lo2EpxGddsA8JwWOcxlzRybb444sgmeJQMJGQE= github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= @@ -15,6 +22,7 @@ github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46f github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= @@ -22,8 +30,11 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= @@ -37,11 +48,14 @@ github.com/rjected/btcd v0.0.0-20200718165331-907190b086ba h1:RMaSvSBrULixyGJ8zG github.com/rjected/btcd v0.0.0-20200718165331-907190b086ba/go.mod h1:Yktc19YNjh/Iz2//CX0vfRTS4IJKM/RKO5YZ9Fn+Pgo= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 h1:vEg9joUBmeBcK9iSJftGNf3coIG4HqZElCPehJsfAYM= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201217014255-9d1352758620 h1:3wPMTskHO3+O6jqTEXyFcsnuxMQOqYSaHsDxcbUXpqA= +golang.org/x/crypto v0.0.0-20201217014255-9d1352758620/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= @@ -51,8 +65,10 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/util/types.go b/util/types.go index fd601a9b..13bc4667 100644 --- a/util/types.go +++ b/util/types.go @@ -41,8 +41,8 @@ The udata comes first, and the height and leafTTLs come first. type UData struct { Height int32 - AccProof accumulator.BatchProof Stxos []LeafData + AccProof accumulator.BatchProof TxoTTLs []int32 } diff --git a/util/udata.go b/util/udata.go index 071fb58f..9b57b70f 100644 --- a/util/udata.go +++ b/util/udata.go @@ -13,13 +13,13 @@ import ( // ProofsProveBlock checks the consistency of a UBlock. Does the proof prove // all the inputs in the block? -func (ub *UBlock) ProofsProveBlock(inputSkipList []uint32) bool { +func (ub *UBlock) LeafDataOutpointsMatchBlock(inputSkipList []uint32) bool { // get the outpoints that need proof proveOPs := blockToDelOPs(&ub.Block, inputSkipList) // ensure that all outpoints are provided in the extradata if len(proveOPs) != len(ub.UtreexoData.Stxos) { - fmt.Printf("height %d %d outpoints need proofs but only %d proven\n", + fmt.Printf("height %d need proofs for %d outpoints but only %d proven\n", ub.UtreexoData.Height, len(proveOPs), len(ub.UtreexoData.Stxos)) return false } diff --git a/util/utils.go b/util/utils.go index 6827e886..96ea1268 100644 --- a/util/utils.go +++ b/util/utils.go @@ -162,8 +162,8 @@ func BlockToAddLeaves(blk wire.MsgBlock, return } -// blockToDelOPs gives all the UTXOs in a block that need proofs in order to be -// deleted. All txinputs except for the coinbase input and utxos created +// blockToDelOPs returns all the outpoints in a block that need proofs in order +// to be deleted. All txinputs except for the coinbase input and utxos created // within the same block (on the skiplist) func blockToDelOPs( blk *wire.MsgBlock, skiplist []uint32) (delOPs []wire.OutPoint) { @@ -174,7 +174,6 @@ func blockToDelOPs( blockInIdx++ // coinbase tx always has 1 input continue } - // loop through inputs for _, txin := range tx.TxIn { // check if on skiplist. If so, don't make leaf @@ -184,7 +183,6 @@ func blockToDelOPs( blockInIdx++ continue } - delOPs = append(delOPs, txin.PreviousOutPoint) blockInIdx++ } From 0835f188be7a4e7ca7086fe6df92862e0abf9ff7 Mon Sep 17 00:00:00 2001 From: adiabat Date: Sun, 20 Dec 2020 20:20:23 -0500 Subject: [PATCH 08/28] structure of compact serialization / deserialization for ublocks it all happens without needing to query any csn state (except for the blockhash which we're not using yet) --- util/types.go | 270 ++++++++++++++++++++++++++++++++++++++++++++++++-- util/utils.go | 11 +- 2 files changed, 268 insertions(+), 13 deletions(-) diff --git a/util/types.go b/util/types.go index 13bc4667..5d6245bc 100644 --- a/util/types.go +++ b/util/types.go @@ -56,6 +56,14 @@ type LeafData struct { PkScript []byte } +// compact serialization flags: +// right now just 0xff for full pkscript, 0x01 for p2pkh +// TODO can add p2sh / segwit stuff later +const ( + LeafFlagFullPKScript = 0xff + LeafFlagP2PKH = 0x01 +) + // ToString turns a LeafData into a string func (l *LeafData) ToString() (s string) { s = l.Outpoint.String() @@ -102,6 +110,11 @@ func (l *LeafData) Deserialize(r io.Reader) (err error) { _, err = io.ReadFull(r, l.Outpoint.Hash[:]) err = binary.Read(r, binary.BigEndian, &l.Outpoint.Index) err = binary.Read(r, binary.BigEndian, &l.Height) + if l.Height&1 == 1 { + l.Coinbase = true + } + l.Height >>= 1 + err = binary.Read(r, binary.BigEndian, &l.Amt) var pkSize uint16 @@ -113,18 +126,89 @@ func (l *LeafData) Deserialize(r io.Reader) (err error) { } l.PkScript = make([]byte, pkSize) _, err = io.ReadFull(r, l.PkScript) - if l.Height&1 == 1 { - l.Coinbase = true - } - l.Height >>= 1 + return } // compact serialization for LeafData: // don't need to send BlockHash; figure it out from height // don't need to send outpoint, it's already in the msgBlock -// can use tags for PkScript -// so it's just height, coinbaseness, amt, pkscript tag +// 1 byte tag for PkScript, with more if needed +// so it's just height/coinbaseness, amt, pkscript tag + +// TODO can compact the amount too, same way bitcoind does + +// SerializeCompact puts compact LeafData onto a writer +func (l *LeafData) SerializeCompact(w io.Writer) (err error) { + hcb := l.Height << 1 + if l.Coinbase { + hcb |= 1 + } + + // _, err = w.Write(l.BlockHash[:]) + // _, err = w.Write(l.Outpoint.Hash[:]) + // err = binary.Write(w, binary.BigEndian, l.Outpoint.Index) + + err = binary.Write(w, binary.BigEndian, hcb) + err = binary.Write(w, binary.BigEndian, l.Amt) + if len(l.PkScript) > 10000 { + err = fmt.Errorf("pksize too long") + return + } + if IsP2PKH(l.PkScript) { + w.Write([]byte{LeafFlagP2PKH}) + } else { + w.Write([]byte{LeafFlagFullPKScript}) + err = binary.Write(w, binary.BigEndian, uint16(len(l.PkScript))) + _, err = w.Write(l.PkScript) + } + return +} + +// SerializeSize says how big a leafdata is +func (l *LeafData) SerializeCompactSize() int { + var pklen int + if IsP2PKH(l.PkScript) { + pklen = 1 + } else { + pklen = 3 + len(l.PkScript) // 1 byte flag, 2 byte len, pkscript + } + // 4B height, 8B amount, then pkscript + return 12 + pklen +} + +// DeserializeCompact takes the bytes from SerializeCompact and rebuilds +// into a LeafDataPartial. Note that this isn't a whole leafdata +func (l *LeafData) DeserializeCompact(r io.Reader) (flag byte, err error) { + err = binary.Read(r, binary.BigEndian, &l.Height) + if l.Height&1 == 1 { + l.Coinbase = true + } + l.Height >>= 1 + + err = binary.Read(r, binary.BigEndian, &l.Amt) + + flagSlice := make([]byte, 1) // this is dumb but only way to read 1 byte + _, err = r.Read(flagSlice) + flag = flagSlice[0] + if err != nil { + return + } + if flag == LeafFlagP2PKH { + return + } + + var pkSize uint16 + err = binary.Read(r, binary.BigEndian, &pkSize) + if pkSize > 10000 { + err = fmt.Errorf("pksize %d byte too long", pkSize) + return + } + l.PkScript = make([]byte, pkSize) + _, err = io.ReadFull(r, l.PkScript) + + return +} // LeafHash turns a LeafData into a LeafHash func (l *LeafData) LeafHash() [32]byte { @@ -181,7 +265,45 @@ func (ud *UData) Serialize(w io.Writer) (err error) { return } -// +// Serialize but use compact encoding for leafData +func (ud *UData) SerializeCompact(w io.Writer) (err error) { + err = binary.Write(w, binary.BigEndian, ud.Height) + if err != nil { // ^ 4B block height + return + } + err = binary.Write(w, binary.BigEndian, uint32(len(ud.TxoTTLs))) + if err != nil { // ^ 4B num ttls + return + } + for _, ttlval := range ud.TxoTTLs { // write all ttls + err = binary.Write(w, binary.BigEndian, ttlval) + if err != nil { + return + } + } + + err = ud.AccProof.Serialize(w) + if err != nil { // ^ batch proof with lengths internal + return + } + + // fmt.Printf("accproof %d bytes\n", ud.AccProof.SerializeSize()) + + // write all the leafdatas + for _, ld := range ud.Stxos { + // fmt.Printf("writing ld %d %s\n", i, ld.ToString()) + err = ld.SerializeCompact(w) + if err != nil { + return + } + // fmt.Printf("h %d leaf %d %s len %d\n", + // ud.Height, i, ld.Outpoint.String(), len(ld.PkScript)) + } + + return +} + +// gives the size of the serialized udata without actually serializing it func (ud *UData) SerializeSize() int { var ldsize int var b bytes.Buffer @@ -210,6 +332,35 @@ func (ud *UData) SerializeSize() int { return guess } +// gives the size of the serialized udata with compact leaf data +func (ud *UData) SerializeCompactSize() int { + var ldsize int + var b bytes.Buffer + + // TODO this is slow, can remove double checking once it works reliably + for _, l := range ud.Stxos { + ldsize += l.SerializeSize() + b.Reset() + l.Serialize(&b) + if b.Len() != l.SerializeCompactSize() { + fmt.Printf(" b.Len() %d, l.SerializeSize() %d\n", + b.Len(), l.SerializeCompactSize()) + } + } + + b.Reset() + ud.AccProof.Serialize(&b) + if b.Len() != ud.AccProof.SerializeSize() { + fmt.Printf(" b.Len() %d, AccProof.SerializeSize() %d\n", + b.Len(), ud.AccProof.SerializeSize()) + } + + guess := 8 + (4 * len(ud.TxoTTLs)) + ud.AccProof.SerializeSize() + ldsize + + // 8B height & numTTLs, 4B per TTL, accProof size, leaf sizes + return guess +} + func (ud *UData) Deserialize(r io.Reader) (err error) { err = binary.Read(r, binary.BigEndian, &ud.Height) @@ -264,6 +415,63 @@ func (ud *UData) Deserialize(r io.Reader) (err error) { return } +// Gives a partially filled in UData from compact serialization +// Also gives the "flags" for the leaf data. Combine those with +// The data from the regular block to recreate the full leaf data +func (ud *UData) DeserializeCompact(r io.Reader) (flags []byte, err error) { + err = binary.Read(r, binary.BigEndian, &ud.Height) + if err != nil { // ^ 4B block height + fmt.Printf("ud deser Height err %s\n", err.Error()) + return + } + // fmt.Printf("read height %d\n", ud.Height) + + var numTTLs uint32 + err = binary.Read(r, binary.BigEndian, &numTTLs) + if err != nil { // ^ 4B num ttls + fmt.Printf("ud deser numTTLs err %s\n", err.Error()) + return + } + // fmt.Printf("read ttls %d\n", numTTLs) + // fmt.Printf("UData deser read h %d - %d ttls ", ud.Height, numTTLs) + + ud.TxoTTLs = make([]int32, numTTLs) + for i, _ := range ud.TxoTTLs { // write all ttls + err = binary.Read(r, binary.BigEndian, &ud.TxoTTLs[i]) + if err != nil { + fmt.Printf("ud deser LeafTTLs[%d] err %s\n", i, err.Error()) + return + } + // fmt.Printf("read ttl[%d] %d\n", i, ud.TxoTTLs[i]) + } + + err = ud.AccProof.Deserialize(r) + if err != nil { // ^ batch proof with lengths internal + fmt.Printf("ud deser AccProof err %s\n", err.Error()) + return + } + + // fmt.Printf("%d byte accproof, read %d targets\n", + // ud.AccProof.SerializeSize(), len(ud.AccProof.Targets)) + // we've already gotten targets. 1 leafdata per target + ud.Stxos = make([]LeafData, len(ud.AccProof.Targets)) + flags = make([]byte, len(ud.AccProof.Targets)) + var flag byte + for i, _ := range ud.Stxos { + flag, err = ud.Stxos[i].DeserializeCompact(r) + if err != nil { + err = fmt.Errorf( + "ud deser h %d nttl %d targets %d UtxoData[%d] err %s\n", + ud.Height, numTTLs, len(ud.AccProof.Targets), i, err.Error()) + return + } + flags[i] = flag + // fmt.Printf("h %d leaf %d %s len %d\n", + // ud.Height, i, ud.Stxos[i].Outpoint.String(), len(ud.Stxos[i].PkScript)) + } + return +} + // Deserialize a UBlock. It's just a block then udata. func (ub *UBlock) Deserialize(r io.Reader) (err error) { err = ub.Block.Deserialize(r) @@ -297,12 +505,52 @@ func (ub *UBlock) SerializeSize() int { // block proof, you've also got the block, so should always be OK to omit the // data that's already in the block. -func UDataFromCompactBytes(b []byte) (UData, error) { - var ud UData +// We don't actually call serialize since from the server side we don't +// serialize, we just glom stuff together from the disk and send it over. +func (ub *UBlock) SerializeCompact(w io.Writer) (err error) { + err = ub.Block.Serialize(w) + if err != nil { + return + } + err = ub.UtreexoData.SerializeCompact(w) + return +} - return ud, nil +// SerializeSize: how big is it, in bytes. +func (ub *UBlock) SerializeCompactSize() int { + return ub.Block.SerializeSize() + ub.UtreexoData.SerializeCompactSize() } -func (ud *UData) ToCompactBytes() (b []byte) { +// Deserialize a compact UBlock. More complex in that the leafdata gets +// rebuilt from the block data. Note that this leaves the blockhash +// empty in the leaf data, so that needs to be filled in by lookup up +// the headers (block height is provided) +func (ub *UBlock) DeserializeCompact(r io.Reader) (err error) { + err = ub.Block.Deserialize(r) + if err != nil { + return err + } + // fmt.Printf("deser'd block %s %d bytes\n", + // ub.Block.Header.BlockHash().String(), ub.Block.SerializeSize()) + flags, err := ub.UtreexoData.DeserializeCompact(r) + + // ensure leaf data & block inputs size match up + if len(flags) != len(ub.UtreexoData.Stxos) { + err = fmt.Errorf("%d flags but %d leaf data", + len(flags), len(ub.UtreexoData.Stxos)) + return + } + proofsRemaining := len(flags) + for i, tx := range ub.Block.Transactions { + if i == 0 { + continue + } + proofsRemaining -= len(tx.TxIn) + } + if proofsRemaining != 0 { + err = fmt.Errorf("%d txos proven but %d inputs in block", + len(flags), len(flags)-proofsRemaining) + } + return } diff --git a/util/utils.go b/util/utils.go index 96ea1268..ec56295a 100644 --- a/util/utils.go +++ b/util/utils.go @@ -294,8 +294,8 @@ func HasAccess(fileName string) bool { return true } -//IsUnspendable determines whether a tx is spendable or not. -//returns true if spendable, false if unspendable. +// IsUnspendable determines whether a txout is spendable or not. +// returns true if spendable, false if unspendable. func IsUnspendable(o *wire.TxOut) bool { switch { case len(o.PkScript) > 10000: //len 0 is OK, spendable @@ -306,3 +306,10 @@ func IsUnspendable(o *wire.TxOut) bool { return false } } + +// Returns true for p2pkh outputs by checking the opcode bytes +func IsP2PKH(pks []byte) bool { + return len(pks) == 25 && + pks[0] == 0x76 && pks[1] == 0xa9 && pks[2] == 0x14 && + pks[23] == 0x88 && pks[24] == 0xac +} From e0261ea2d5097380b82b21f2676c4b84e4e4581d Mon Sep 17 00:00:00 2001 From: adiabat Date: Mon, 21 Dec 2020 00:10:10 -0500 Subject: [PATCH 09/28] add functions for recovering leaf data from block data --- util/types.go | 32 ++++++++++++++++++++++++++++++++ util/utils.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/util/types.go b/util/types.go index 5d6245bc..e0e810a5 100644 --- a/util/types.go +++ b/util/types.go @@ -195,6 +195,7 @@ func (l *LeafData) DeserializeCompact(r io.Reader) (flag byte, err error) { return } if flag == LeafFlagP2PKH { + // if it's P2PKH the flag alone is enough; no PKH data is given return } @@ -525,6 +526,7 @@ func (ub *UBlock) SerializeCompactSize() int { // rebuilt from the block data. Note that this leaves the blockhash // empty in the leaf data, so that needs to be filled in by lookup up // the headers (block height is provided) +// The 2 things to rebuild here are outpoint and pkscript func (ub *UBlock) DeserializeCompact(r io.Reader) (err error) { err = ub.Block.Deserialize(r) if err != nil { @@ -540,6 +542,8 @@ func (ub *UBlock) DeserializeCompact(r io.Reader) (err error) { len(flags), len(ub.UtreexoData.Stxos)) return } + // make sure the number of targets in the proof side matches the + // number of inputs in the block proofsRemaining := len(flags) for i, tx := range ub.Block.Transactions { if i == 0 { @@ -547,9 +551,37 @@ func (ub *UBlock) DeserializeCompact(r io.Reader) (err error) { } proofsRemaining -= len(tx.TxIn) } + // if it doesn't match, fail if proofsRemaining != 0 { err = fmt.Errorf("%d txos proven but %d inputs in block", len(flags), len(flags)-proofsRemaining) + return + } + // we know the leaf data & inputs match up, at least in number, so + // rebuild the leaf data. It could be wrong but we'll find out later + // if the hashes / proofs don't match. + txinInBlock := 0 + for i, tx := range ub.Block.Transactions { + if i == 0 { + continue // skip coinbase + } + for _, in := range tx.TxIn { + // rebuild leaf data from this txin data (OP and PkScript) + // copy outpoint from block into leaf + ub.UtreexoData.Stxos[txinInBlock].Outpoint = in.PreviousOutPoint + // rebuild pkscript based on flag + + // so far only P2PKH are omitted / recovered + if flags[txinInBlock] == LeafFlagP2PKH { + // get pubkey from sigscript + ub.UtreexoData.Stxos[txinInBlock].PkScript, err = + RecoverPkScriptP2PKH(in.SignatureScript) + if err != nil { + return + } + } + txinInBlock++ + } } return diff --git a/util/utils.go b/util/utils.go index ec56295a..300d4796 100644 --- a/util/utils.go +++ b/util/utils.go @@ -10,6 +10,8 @@ import ( "sort" "time" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/wire" "github.com/mit-dci/utreexo/accumulator" @@ -313,3 +315,31 @@ func IsP2PKH(pks []byte) bool { pks[0] == 0x76 && pks[1] == 0xa9 && pks[2] == 0x14 && pks[23] == 0x88 && pks[24] == 0xac } + +// given a P2PKH scriptSig, output the original scriptPubKey +func RecoverPkScriptP2PKH(scriptSig []byte) ([]byte, error) { + if len(scriptSig) == 0 { + return nil, fmt.Errorf("RecoverPkScriptP2PKH give empty scriptSig") + } + siglen := scriptSig[0] + if len(scriptSig)+1 < int(siglen) { + return nil, fmt.Errorf("RecoverPkScriptP2PKH can't pop signature") + } + scriptSig = scriptSig[siglen+1:] + pklen := scriptSig[0] + if len(scriptSig)+1 < int(pklen) { + return nil, fmt.Errorf("RecoverPkScriptP2PKH can't pop pubkey") + } + pkh := btcutil.Hash160(scriptSig[1 : 1+pklen]) + return p2pkhify(pkh), nil +} + +// turns a pubkey hash into a normal bitcoin p2pkh spend script. +// no checks on the input. if it's not 20 bytes, will return a script +// that won't work. +func p2pkhify(pkh []byte) (script []byte) { + script = []byte{0x76, 0xa9, 0x14} + script = append(script, pkh...) + script = append(script, []byte{0x88, 0xac}...) + return +} From d2b99ee180b59db12d9527fd6b89c9b74240a36b Mon Sep 17 00:00:00 2001 From: adiabat Date: Mon, 21 Dec 2020 22:03:14 -0500 Subject: [PATCH 10/28] compact deserialization needs skiplists, so pass those along --- bridgenode/flatfileworker.go | 11 ++++----- csn/ibd.go | 28 +++++++++++------------ util/types.go | 43 +++++++++++++++++++++++++++++++----- util/utils.go | 16 +++++++++----- 4 files changed, 68 insertions(+), 30 deletions(-) diff --git a/bridgenode/flatfileworker.go b/bridgenode/flatfileworker.go index c055d68a..3a937440 100644 --- a/bridgenode/flatfileworker.go +++ b/bridgenode/flatfileworker.go @@ -208,13 +208,14 @@ func (ff *flatFileState) writeProofBlock(ud util.UData) error { } // prefix with size - err = binary.Write(ff.proofFile, binary.BigEndian, uint32(ud.SerializeSize())) + err = binary.Write( + ff.proofFile, binary.BigEndian, uint32(ud.SerializeCompactSize())) if err != nil { return err } // then write the whole proof - err = ud.Serialize(ff.proofFile) + err = ud.SerializeCompact(ff.proofFile) if err != nil { return err } @@ -224,14 +225,14 @@ func (ff *flatFileState) writeProofBlock(ud util.UData) error { if err != nil { return err } - if off != ff.currentOffset+int64(ud.SerializeSize())+8 { + if off != ff.currentOffset+int64(ud.SerializeCompactSize())+8 { return fmt.Errorf("h %d offset %x calculated length %d but observed %d", ff.currentHeight, ff.currentOffset, - int64(ud.SerializeSize())+8, off-ff.currentOffset) + int64(ud.SerializeCompactSize())+8, off-ff.currentOffset) } // 4B magic & 4B size comes first - ff.currentOffset += int64(ud.SerializeSize()) + 8 + ff.currentOffset += int64(ud.SerializeCompactSize()) + 8 ff.currentHeight++ ff.fileWait.Done() diff --git a/csn/ibd.go b/csn/ibd.go index 4b0f4f40..b089292e 100644 --- a/csn/ibd.go +++ b/csn/ibd.go @@ -31,12 +31,12 @@ func (c *Csn) IBDThread(sig chan bool, quitafter int) { // blocks come in and sit in the blockQueue // They should come in from the network -- right now they're coming from the // disk but it should be the exact same thing - ublockQueue := make(chan util.UBlock, 10) + ublockSkipQueue := make(chan util.UBlockWithSkiplists, 10) // Reads blocks asynchronously from blk*.dat files, and the proof.dat, and DB // this will be a network reader, with the server sending the same stuff over go util.UblockNetworkReader( - ublockQueue, c.remoteHost, c.CurrentHeight, lookahead) + ublockSkipQueue, c.remoteHost, c.CurrentHeight, lookahead) var plustime time.Duration starttime := time.Now() @@ -46,14 +46,15 @@ func (c *Csn) IBDThread(sig chan bool, quitafter int) { var blockCount int for ; !stop; c.CurrentHeight++ { - blocknproof, open := <-ublockQueue + blocknproof, open := <-ublockSkipQueue if !open { fmt.Printf("ublockQueue channel closed ") sig <- true break } - err := c.putBlockInPollard(blocknproof, &totalTXOAdded, &totalDels, plustime) + err := c.putBlockInPollard( + blocknproof, &totalTXOAdded, &totalDels, plustime) if err != nil { // crash if there's a bad proof or signature, OK for testing panic(err) @@ -139,16 +140,15 @@ func (c *Csn) ScanBlock(b wire.MsgBlock) { // Here we write proofs for all the txs. // All the inputs are saved as 32byte sha256 hashes. // All the outputs are saved as Leaf type. -func (c *Csn) putBlockInPollard( - ub util.UBlock, totalTXOAdded, totalDels *int, plustime time.Duration) error { +func (c *Csn) putBlockInPollard(ub util.UBlockWithSkiplists, + totalTXOAdded, totalDels *int, plustime time.Duration) error { plusstart := time.Now() - - inskip, outskip := util.DedupeBlock(&ub.Block) - if !ub.ProofsProveBlock(inskip) { - return fmt.Errorf( - "uData missing utxo data for block %d", ub.UtreexoData.Height) - } + // no need to prove match with compact + // if !ub.ProofsProveBlock(inskip) { + // return fmt.Errorf( + // "uData missing utxo data for block %d", ub.UtreexoData.Height) + // } *totalDels += len(ub.UtreexoData.AccProof.Targets) // for benchmarking @@ -168,7 +168,7 @@ func (c *Csn) putBlockInPollard( // PoW, but the signatures are... if c.CheckSignatures { - if !ub.CheckBlock(outskip, &c.Params) { + if !ub.CheckBlock(ub.Outskip, &c.Params) { return fmt.Errorf("height %d hash %s block invalid", ub.UtreexoData.Height, ub.Block.BlockHash().String()) } @@ -189,7 +189,7 @@ func (c *Csn) putBlockInPollard( // get hashes to add into the accumulator blockAdds := util.BlockToAddLeaves( - ub.Block, remember, outskip, ub.UtreexoData.Height) + ub.Block, remember, ub.Outskip, ub.UtreexoData.Height) *totalTXOAdded += len(blockAdds) // for benchmarking // for i, leaf := range blockAdds { diff --git a/util/types.go b/util/types.go index e0e810a5..7da586e4 100644 --- a/util/types.go +++ b/util/types.go @@ -30,6 +30,13 @@ type UBlock struct { Block wire.MsgBlock } +// Compact deserialization gives you the dedupe skiplists for "free" so +// may as well include them here +type UBlockWithSkiplists struct { + UBlock + Inskip, Outskip []uint32 // really could be 16bit as no block has 65K txos +} + /* Ublock serialization (changed with flatttl branch) @@ -37,6 +44,15 @@ Ublock serialization A "Ublock" is a regular bitcoin block, along with Utreexo-specific data. The udata comes first, and the height and leafTTLs come first. +Height: the height of the block in the blockchain +[]LeafData: UTXOs spent in this block +BatchProof: the inclustion proof for all the LeafData +TxoTTLs: for each new output created at this height, how long the utxo lasts + +Note that utxos that are created & destroyed in the same block are not included +as LeafData and not proven in the BatchProof; from utreexo's perspective they +don't exist. + */ type UData struct { @@ -527,11 +543,17 @@ func (ub *UBlock) SerializeCompactSize() int { // empty in the leaf data, so that needs to be filled in by lookup up // the headers (block height is provided) // The 2 things to rebuild here are outpoint and pkscript -func (ub *UBlock) DeserializeCompact(r io.Reader) (err error) { +// Also we need a skiplist here as 0-duration UTXOs don't get proofs, so +// they show up in the ub.Block but not in ub.UtreexoData. +// Return the skiplist so you don't have to calculate it twice. +func (ub *UBlockWithSkiplists) DeserializeCompact(r io.Reader) (err error) { err = ub.Block.Deserialize(r) if err != nil { return err } + // get the skiplists from the block & save them in the ubwsls + ub.Inskip, ub.Outskip = DedupeBlock(&ub.Block) + // fmt.Printf("deser'd block %s %d bytes\n", // ub.Block.Header.BlockHash().String(), ub.Block.SerializeSize()) flags, err := ub.UtreexoData.DeserializeCompact(r) @@ -540,7 +562,6 @@ func (ub *UBlock) DeserializeCompact(r io.Reader) (err error) { if len(flags) != len(ub.UtreexoData.Stxos) { err = fmt.Errorf("%d flags but %d leaf data", len(flags), len(ub.UtreexoData.Stxos)) - return } // make sure the number of targets in the proof side matches the // number of inputs in the block @@ -555,17 +576,29 @@ func (ub *UBlock) DeserializeCompact(r io.Reader) (err error) { if proofsRemaining != 0 { err = fmt.Errorf("%d txos proven but %d inputs in block", len(flags), len(flags)-proofsRemaining) - return } + + // blockToDelOPs() // we know the leaf data & inputs match up, at least in number, so // rebuild the leaf data. It could be wrong but we'll find out later // if the hashes / proofs don't match. txinInBlock := 0 + skippos := 0 + skiplen := len(ub.Inskip) for i, tx := range ub.Block.Transactions { if i == 0 { - continue // skip coinbase + txinInBlock++ // coinbase always has 1 input + continue // skip coinbase } + // loop through inputs for _, in := range tx.TxIn { + // skip if on skiplist + if skippos < skiplen && ub.Inskip[skippos] == uint32(txinInBlock) { + skippos++ + txinInBlock++ + continue + } + // rebuild leaf data from this txin data (OP and PkScript) // copy outpoint from block into leaf ub.UtreexoData.Stxos[txinInBlock].Outpoint = in.PreviousOutPoint @@ -584,5 +617,5 @@ func (ub *UBlock) DeserializeCompact(r io.Reader) (err error) { } } - return + return nil } diff --git a/util/utils.go b/util/utils.go index 300d4796..3bf8e5fd 100644 --- a/util/utils.go +++ b/util/utils.go @@ -58,7 +58,7 @@ func GenHashForNet(p chaincfg.Params) (*Hash, error) { // UblockNetworkReader gets Ublocks from the remote host and puts em in the // channel. It'll try to fill the channel buffer. func UblockNetworkReader( - blockChan chan UBlock, remoteServer string, + blockChan chan UBlockWithSkiplists, remoteServer string, curHeight, lookahead int32) { d := net.Dialer{Timeout: 2 * time.Second} @@ -69,7 +69,7 @@ func UblockNetworkReader( defer con.Close() defer close(blockChan) - var ub UBlock + var ub UBlockWithSkiplists // var ublen uint32 // request range from curHeight to latest block err = binary.Write(con, binary.BigEndian, curHeight) @@ -91,7 +91,7 @@ func UblockNetworkReader( // Need to sort the blocks though if you're doing that for ; ; curHeight++ { - err = ub.Deserialize(con) + err = ub.DeserializeCompact(con) if err != nil { fmt.Printf("Deserialize error from connection %s %s\n", con.RemoteAddr().String(), err.Error()) @@ -198,7 +198,6 @@ func blockToDelOPs( // So the coinbase tx in & output numbers affect the skip lists even though // the coinbase ins/outs can never be deduped. it's simpler that way. func DedupeBlock(blk *wire.MsgBlock) (inskip []uint32, outskip []uint32) { - var i uint32 // wire.Outpoints are comparable with == which is nice. inmap := make(map[wire.OutPoint]uint32) @@ -220,7 +219,7 @@ func DedupeBlock(blk *wire.MsgBlock) (inskip []uint32, outskip []uint32) { // start over, go through outputs finding skips for cbif0, tx := range blk.Transactions { if cbif0 == 0 { // coinbase tx can't be deduped - i += uint32(len(tx.TxOut)) // coinbase can have multiple inputs + i += uint32(len(tx.TxOut)) // coinbase can have multiple outputs continue } txid := tx.TxHash() @@ -248,6 +247,11 @@ func sortUint32s(s []uint32) { sort.Slice(s, func(a, b int) bool { return s[a] < s[b] }) } +// it'd be cool if you just had .sort() methods on slices of builtin types... +func sortUint16s(s []uint16) { + sort.Slice(s, func(a, b int) bool { return s[a] < s[b] }) +} + // PrefixLen16 puts a 2 byte length prefix in front of a byte slice func PrefixLen16(b []byte) []byte { l := uint16(len(b)) @@ -319,7 +323,7 @@ func IsP2PKH(pks []byte) bool { // given a P2PKH scriptSig, output the original scriptPubKey func RecoverPkScriptP2PKH(scriptSig []byte) ([]byte, error) { if len(scriptSig) == 0 { - return nil, fmt.Errorf("RecoverPkScriptP2PKH give empty scriptSig") + return nil, fmt.Errorf("RecoverPkScriptP2PKH given empty scriptSig") } siglen := scriptSig[0] if len(scriptSig)+1 < int(siglen) { From af86dc071b33770feb5aca21d9237f658bfc4812 Mon Sep 17 00:00:00 2001 From: adiabat Date: Mon, 21 Dec 2020 22:12:58 -0500 Subject: [PATCH 11/28] obvious fix in UData SerializeCompactSize() --- util/types.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/util/types.go b/util/types.go index 7da586e4..84d6e8f1 100644 --- a/util/types.go +++ b/util/types.go @@ -356,11 +356,11 @@ func (ud *UData) SerializeCompactSize() int { // TODO this is slow, can remove double checking once it works reliably for _, l := range ud.Stxos { - ldsize += l.SerializeSize() + ldsize += l.SerializeCompactSize() b.Reset() - l.Serialize(&b) + l.SerializeCompact(&b) if b.Len() != l.SerializeCompactSize() { - fmt.Printf(" b.Len() %d, l.SerializeSize() %d\n", + fmt.Printf(" b.Len() %d, l.SerializeCompactSize() %d\n", b.Len(), l.SerializeCompactSize()) } } From f24feb2fe981a6f56ec4f0e6fd26c570c9d41e16 Mon Sep 17 00:00:00 2001 From: adiabat Date: Mon, 21 Dec 2020 22:25:14 -0500 Subject: [PATCH 12/28] compact serialization doesn't crash; but reconstruct wants target hashes --- util/types.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/util/types.go b/util/types.go index 84d6e8f1..e1fac222 100644 --- a/util/types.go +++ b/util/types.go @@ -582,38 +582,40 @@ func (ub *UBlockWithSkiplists) DeserializeCompact(r io.Reader) (err error) { // we know the leaf data & inputs match up, at least in number, so // rebuild the leaf data. It could be wrong but we'll find out later // if the hashes / proofs don't match. - txinInBlock := 0 + inputInBlock := 0 skippos := 0 skiplen := len(ub.Inskip) + fmt.Printf("%d h %d txs %d targets inskip %v\n", + ub.UtreexoData.Height, len(ub.Block.Transactions), + len(ub.UtreexoData.Stxos), ub.Inskip) for i, tx := range ub.Block.Transactions { if i == 0 { - txinInBlock++ // coinbase always has 1 input - continue // skip coinbase + continue // skip coinbase, not counted in Stxos } // loop through inputs for _, in := range tx.TxIn { // skip if on skiplist - if skippos < skiplen && ub.Inskip[skippos] == uint32(txinInBlock) { + if skippos < skiplen && ub.Inskip[skippos] == uint32(inputInBlock) { skippos++ - txinInBlock++ + inputInBlock++ continue } // rebuild leaf data from this txin data (OP and PkScript) // copy outpoint from block into leaf - ub.UtreexoData.Stxos[txinInBlock].Outpoint = in.PreviousOutPoint + ub.UtreexoData.Stxos[inputInBlock].Outpoint = in.PreviousOutPoint // rebuild pkscript based on flag // so far only P2PKH are omitted / recovered - if flags[txinInBlock] == LeafFlagP2PKH { + if flags[inputInBlock] == LeafFlagP2PKH { // get pubkey from sigscript - ub.UtreexoData.Stxos[txinInBlock].PkScript, err = + ub.UtreexoData.Stxos[inputInBlock].PkScript, err = RecoverPkScriptP2PKH(in.SignatureScript) if err != nil { return } } - txinInBlock++ + inputInBlock++ } } From 611dfeeace3f8559189c44dae1eb87895f557461 Mon Sep 17 00:00:00 2001 From: adiabat Date: Sat, 2 Jan 2021 15:20:07 -0500 Subject: [PATCH 13/28] remove computablePositions from ProofPositions() functions it was just returning the targets slice given to the function --- accumulator/batchproof.go | 13 ++++++------- accumulator/forestproofs.go | 2 +- accumulator/pollardfull.go | 2 +- accumulator/utils.go | 12 +++++------- 4 files changed, 13 insertions(+), 16 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index 8b5522f1..9ab27f0e 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -155,9 +155,8 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { sortUint64s(targets) rows := treeRows(p.numLeaves) - proofPositions, computablePositions := - ProofPositions(targets, p.numLeaves, rows) - + proofPositions := ProofPositions(targets, p.numLeaves, rows) + numComputable := len(targets) // The proof should have as many hashes as there are proof positions. if len(proofPositions)+len(bp.Targets) != len(bp.Proof) { return false, nil, nil @@ -170,7 +169,7 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { targetNodes := make([]node, 0, len(targets)*int(rows)) rootCandidates := make([]node, 0, len(rootHashes)) // trees holds the entire proof tree of the batchproof, sorted by parents. - trees := make([]miniTree, 0, len(computablePositions)) + trees := make([]miniTree, 0, numComputable) // initialise the targetNodes for row 0. // TODO: this would be more straight forward if bp.Proofs wouldn't // contain the targets @@ -320,12 +319,12 @@ func (bp *BatchProof) Reconstruct( targets := make([]uint64, len(bp.Targets)) copy(targets, bp.Targets) sortUint64s(targets) - proofPositions, _ := ProofPositions(targets, numleaves, forestRows) + proofPositions := ProofPositions(targets, numleaves, forestRows) proofPositions = mergeSortedSlices(targets, proofPositions) if len(proofPositions) != len(bp.Proof) { - return nil, fmt.Errorf("Reconstruct wants %d hashes, has %d", - len(proofPositions), len(bp.Proof)) + return nil, fmt.Errorf("Reconstruct wants %d hashes, has %d targ:%v pp:%v", + len(proofPositions), len(bp.Proof), bp.Targets, proofPositions) } for i, pos := range proofPositions { diff --git a/accumulator/forestproofs.go b/accumulator/forestproofs.go index fa884ca2..a3dc7b9d 100644 --- a/accumulator/forestproofs.go +++ b/accumulator/forestproofs.go @@ -162,7 +162,7 @@ func (f *Forest) ProveBatch(hs []Hash) (BatchProof, error) { copy(sortedTargets, bp.Targets) sortUint64s(sortedTargets) - proofPositions, _ := ProofPositions(sortedTargets, f.numLeaves, f.rows) + proofPositions := ProofPositions(sortedTargets, f.numLeaves, f.rows) // proofPositions = mergeSortedSlices(proofPositions, sortedTargets) bp.Proof = make([]Hash, len(proofPositions)) for i, proofPos := range proofPositions { diff --git a/accumulator/pollardfull.go b/accumulator/pollardfull.go index 1d2e47bc..bb58e83d 100644 --- a/accumulator/pollardfull.go +++ b/accumulator/pollardfull.go @@ -85,7 +85,7 @@ func (p *Pollard) ProveBatch(hs []Hash) (BatchProof, error) { // guess is no, but that's untested. sortUint64s(bp.Targets) - proofPositions, _ := ProofPositions(bp.Targets, p.numLeaves, p.rows()) + proofPositions := ProofPositions(bp.Targets, p.numLeaves, p.rows()) targetsAndProof := mergeSortedSlices(proofPositions, bp.Targets) bp.Proof = make([]Hash, len(targetsAndProof)) for i, proofPos := range targetsAndProof { diff --git a/accumulator/utils.go b/accumulator/utils.go index 8ce92fec..bd2e6ece 100644 --- a/accumulator/utils.go +++ b/accumulator/utils.go @@ -9,16 +9,14 @@ import ( // verbose is a global const to get lots of printfs for debugging var verbose = false -// ProofPositions returns the positions that are needed to prove that the targets exist. +// ProofPositions returns the positions that are needed to prove that the targets +// exist. func ProofPositions( - targets []uint64, numLeaves uint64, forestRows uint8) ([]uint64, []uint64) { + targets []uint64, numLeaves uint64, forestRows uint8) []uint64 { // the proofPositions needed without caching. proofPositions := make([]uint64, 0, len(targets)*int(forestRows)) - // the positions that are computed/not included in the proof. - // (also includes the targets) - computedPositions := make([]uint64, 0, len(targets)*int(forestRows)) + // pre-allocate up to a worst-case targets*height for row := uint8(0); row < forestRows; row++ { - computedPositions = append(computedPositions, targets...) if numLeaves&(1< 0 && len(targets) > 0 && targets[len(targets)-1] == rootPosition(numLeaves, row, forestRows) { // remove roots from targets @@ -91,7 +89,7 @@ func ProofPositions( targets = nextTargets } - return proofPositions, computedPositions + return proofPositions } // takes a slice of dels, removes the twins (in place) and returns a slice From 05f9171860f40959df10d4c2b6c8cd029a51e42c Mon Sep 17 00:00:00 2001 From: adiabat Date: Sat, 2 Jan 2021 18:04:14 -0500 Subject: [PATCH 14/28] compiles / fits but proof hashes don't match up --- accumulator/batchproof.go | 24 ++++++++++++++---------- accumulator/pollardproof.go | 10 +++------- csn/ibd.go | 10 ++++++---- 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index 9ab27f0e..3ecc318e 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -143,10 +143,10 @@ func (bp *BatchProof) ToString() string { // Takes a BatchProof, the accumulator roots, and the number of leaves in the forest. // Returns wether or not the proof verified correctly, the partial proof tree, // and the subset of roots that was computed. -func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { +func (p *Pollard) verifyBatchProof(bp BatchProof) ([]miniTree, []node, error) { if len(bp.Targets) == 0 { - return true, nil, nil + return nil, nil, nil } rootHashes := p.rootHashesReverse() // copy targets to leave them in original order @@ -158,8 +158,11 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { proofPositions := ProofPositions(targets, p.numLeaves, rows) numComputable := len(targets) // The proof should have as many hashes as there are proof positions. - if len(proofPositions)+len(bp.Targets) != len(bp.Proof) { - return false, nil, nil + if len(proofPositions) != len(bp.Proof) { + // fmt.Printf(") + return nil, nil, + fmt.Errorf("verifyBatchProof %d proofPositions but %d proof hashes", + len(proofPositions), len(bp.Proof)) } // targetNodes holds nodes that are known, on the bottom row those @@ -207,7 +210,7 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { // hashes or less than 2 targets left the proof is invalid because // there is a target without matching proof. if len(bp.Proof) < 2 || len(targets) < 2 { - return false, nil, nil + return nil, nil, fmt.Errorf("verifyBatchProof ran out of proof hashes") } targetNodes = append(targetNodes, @@ -235,7 +238,7 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { // target should have its sibling in targetNodes if len(targetNodes) == 1 { // sibling not found - return false, nil, nil + return nil, nil, fmt.Errorf("%v sibling not found", targetNodes) } proof = targetNodes[1] @@ -258,7 +261,8 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { (populatedNode != nil && populatedNode.data != empty && hash != populatedNode.data) { // The hash did not match the cached hash - return false, nil, nil + return nil, nil, fmt.Errorf("verifyBatchProof %d have %x calc'd %x", + parentPos, populatedNode.data, hash) } trees = append(trees, @@ -277,7 +281,7 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { if len(rootCandidates) == 0 { // no roots to verify - return false, nil, nil + return nil, nil, fmt.Errorf("verifyBatchProof no roots") } // `roots` is ordered, therefore to verify that `rootCandidates` @@ -293,10 +297,10 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) (bool, []miniTree, []node) { if len(rootCandidates) != rootMatches { // the proof is invalid because some root candidates were not // included in `roots`. - return false, nil, nil + return nil, nil, fmt.Errorf("verifyBatchProof missing roots") } - return true, trees, rootCandidates + return trees, rootCandidates, nil } // Reconstruct takes a number of leaves and rows, and turns a block proof back diff --git a/accumulator/pollardproof.go b/accumulator/pollardproof.go index f4428615..7b613e94 100644 --- a/accumulator/pollardproof.go +++ b/accumulator/pollardproof.go @@ -1,17 +1,13 @@ package accumulator -import ( - "fmt" -) - // IngestBatchProof populates the Pollard with all needed data to delete the // targets in the block proof func (p *Pollard) IngestBatchProof(bp BatchProof) error { // verify the batch proof. rootHashes := p.rootHashesReverse() - ok, trees, roots := p.verifyBatchProof(bp) - if !ok { - return fmt.Errorf("block proof mismatch") + trees, roots, err := p.verifyBatchProof(bp) + if err != nil { + return err } // preallocating polNodes helps with garbage collection polNodes := make([]polNode, len(trees)*3) diff --git a/csn/ibd.go b/csn/ibd.go index b089292e..8ae704fa 100644 --- a/csn/ibd.go +++ b/csn/ibd.go @@ -152,10 +152,12 @@ func (c *Csn) putBlockInPollard(ub util.UBlockWithSkiplists, *totalDels += len(ub.UtreexoData.AccProof.Targets) // for benchmarking - // derive leafHashes from leafData - if !ub.UtreexoData.Verify(c.pollard.ReconstructStats()) { - return fmt.Errorf("height %d LeafData / Proof mismatch", ub.UtreexoData.Height) - } + // we can no longer verify the proof on its own for self-consistency + // can only check the accproof in relation to the current pollard + + // if !ub.UtreexoData.Verify(c.pollard.ReconstructStats()) { + // return fmt.Errorf("height %d LeafData / Proof mismatch", ub.UtreexoData.Height) + // } // ************************************** // check transactions and signatures here From 1ffe55a2cc2289e124768f00ade14eecf753cabf Mon Sep 17 00:00:00 2001 From: adiabat Date: Sat, 2 Jan 2021 18:13:20 -0500 Subject: [PATCH 15/28] remove stateless reconstruct / verify of block proofs doesn't make any sense as the targets aren't included in the proof now so you can only tell if the proof works with respect to a pollard, can't check a proof's self-consistency --- accumulator/batchproof.go | 35 ------------------- util/udata.go | 71 --------------------------------------- 2 files changed, 106 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index 3ecc318e..75dd85a7 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -302,38 +302,3 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) ([]miniTree, []node, error) { return trees, rootCandidates, nil } - -// Reconstruct takes a number of leaves and rows, and turns a block proof back -// into a partial proof tree. Should leave bp intact -func (bp *BatchProof) Reconstruct( - numleaves uint64, forestRows uint8) (map[uint64]Hash, error) { - - if verbose { - fmt.Printf("reconstruct blockproof %d tgts %d hashes nl %d fr %d\n", - len(bp.Targets), len(bp.Proof), numleaves, forestRows) - } - proofTree := make(map[uint64]Hash) - - // If there is nothing to reconstruct, return empty map - if len(bp.Targets) == 0 { - return proofTree, nil - } - - // copy bp.targets and send copy - targets := make([]uint64, len(bp.Targets)) - copy(targets, bp.Targets) - sortUint64s(targets) - proofPositions := ProofPositions(targets, numleaves, forestRows) - proofPositions = mergeSortedSlices(targets, proofPositions) - - if len(proofPositions) != len(bp.Proof) { - return nil, fmt.Errorf("Reconstruct wants %d hashes, has %d targ:%v pp:%v", - len(proofPositions), len(bp.Proof), bp.Targets, proofPositions) - } - - for i, pos := range proofPositions { - proofTree[pos] = bp.Proof[i] - } - - return proofTree, nil -} diff --git a/util/udata.go b/util/udata.go index 9b57b70f..c2d32262 100644 --- a/util/udata.go +++ b/util/udata.go @@ -11,77 +11,6 @@ import ( "github.com/btcsuite/btcutil" ) -// ProofsProveBlock checks the consistency of a UBlock. Does the proof prove -// all the inputs in the block? -func (ub *UBlock) LeafDataOutpointsMatchBlock(inputSkipList []uint32) bool { - // get the outpoints that need proof - proveOPs := blockToDelOPs(&ub.Block, inputSkipList) - - // ensure that all outpoints are provided in the extradata - if len(proveOPs) != len(ub.UtreexoData.Stxos) { - fmt.Printf("height %d need proofs for %d outpoints but only %d proven\n", - ub.UtreexoData.Height, len(proveOPs), len(ub.UtreexoData.Stxos)) - return false - } - for i, _ := range ub.UtreexoData.Stxos { - if proveOPs[i] != ub.UtreexoData.Stxos[i].Outpoint { - fmt.Printf("block/utxoData mismatch %s v %s\n", - proveOPs[i].String(), ub.UtreexoData.Stxos[i].Outpoint.String()) - return false - } - } - return true -} - -// Verify checks the consistency of uData: that the utxos are proven in the -// batchproof -func (ud *UData) Verify(nl uint64, h uint8) bool { - - // this is really ugly and basically copies the whole thing to avoid - // destroying it while verifying... - - // presort := make([]uint64, len(ud.AccProof.Targets)) - // copy(presort, ud.AccProof.Targets) - - // fmt.Printf(ud.AccProof.ToString()) - - // ud.AccProof.SortTargets() - mp, err := ud.AccProof.Reconstruct(nl, h) - if err != nil { - fmt.Printf("Reconstruct failed %s\n", err.Error()) - return false - } - - // make sure the udata is consistent, with the same number of leafDatas - // as targets in the accumulator batch proof - if len(ud.AccProof.Targets) != len(ud.Stxos) { - fmt.Printf("Verify failed: %d targets but %d leafdatas\n", - len(ud.AccProof.Targets), len(ud.Stxos)) - } - - for i, pos := range ud.AccProof.Targets { - hashInProof, exists := mp[pos] - if !exists { - fmt.Printf("Verify failed: Target %d not in map\n", pos) - return false - } - // check if leafdata hashes to the hash in the proof at the target - if ud.Stxos[i].LeafHash() != hashInProof { - fmt.Printf("Verify failed: txo %s pos %d leafdata %x in proof %x\n", - ud.Stxos[i].Outpoint.String(), pos, - ud.Stxos[i].LeafHash(), hashInProof) - sib, exists := mp[pos^1] - if exists { - fmt.Printf("sib exists, %x\n", sib) - } - return false - } - } - // return to presorted target list - // ud.AccProof.Targets = presort - return true -} - // ToUtxoView converts a UData into a btcd blockchain.UtxoViewpoint // all the data is there, just a bit different format. // Note that this needs blockchain.NewUtxoEntry() in btcd From 0fa69645024ba5cecdd3c238bf716f92f5cf2c60 Mon Sep 17 00:00:00 2001 From: adiabat Date: Sun, 3 Jan 2021 17:34:24 -0500 Subject: [PATCH 16/28] remove redundant functions from pollard --- accumulator/batchproof.go | 19 +++++++++++-------- accumulator/pollard.go | 36 +----------------------------------- accumulator/pollardfull.go | 3 +++ 3 files changed, 15 insertions(+), 43 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index 75dd85a7..a8ff5c28 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -144,10 +144,11 @@ func (bp *BatchProof) ToString() string { // Returns wether or not the proof verified correctly, the partial proof tree, // and the subset of roots that was computed. func (p *Pollard) verifyBatchProof(bp BatchProof) ([]miniTree, []node, error) { - if len(bp.Targets) == 0 { return nil, nil, nil } + fmt.Printf("got proof %s\n", bp.ToString()) + rootHashes := p.rootHashesReverse() // copy targets to leave them in original order targets := make([]uint64, len(bp.Targets)) @@ -176,20 +177,22 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) ([]miniTree, []node, error) { // initialise the targetNodes for row 0. // TODO: this would be more straight forward if bp.Proofs wouldn't // contain the targets + // TODO it doesn't now! // bp.Proofs is now on from ProofPositions() proofHashes := make([]Hash, 0, len(proofPositions)) var targetsMatched uint64 for len(targets) > 0 { - // check if the target is the row 0 root. - // this is the case if its the last leaf (pos==numLeaves-1) - // AND the tree has a root at row 0 (numLeaves&1==1) - if targets[0] == p.numLeaves-1 && p.numLeaves&1 == 1 { + + // a row-0 root should never be given, as it can only be a target and + // targets aren't sent + + /*if targets[0] == p.numLeaves-1 && p.numLeaves&1 == 1 { // target is the row 0 root, append it to the root candidates. rootCandidates = append(rootCandidates, node{Val: rootHashes[0], Pos: targets[0]}) bp.Proof = bp.Proof[1:] break - } + }*/ // `targets` might contain a target and its sibling or just the target, if // only the target is present the sibling will be in `proofPositions`. @@ -256,12 +259,12 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) ([]miniTree, []node, error) { parentPos := parent(target.Pos, rows) hash := parentHash(left.Val, right.Val) - populatedNode, _, _, err := p.readPos(parentPos) + populatedNode, _, _, err := p.grabPos(parentPos) if err != nil || (populatedNode != nil && populatedNode.data != empty && hash != populatedNode.data) { // The hash did not match the cached hash - return nil, nil, fmt.Errorf("verifyBatchProof %d have %x calc'd %x", + return nil, nil, fmt.Errorf("verifyBatchProof pos %d have %x calc'd %x", parentPos, populatedNode.data, hash) } diff --git a/accumulator/pollard.go b/accumulator/pollard.go index b035c2ad..d951493e 100644 --- a/accumulator/pollard.go +++ b/accumulator/pollard.go @@ -343,40 +343,6 @@ func (p *Pollard) swapNodes(s arrow, row uint8) (*hashableNode, error) { return bhn, nil } -func (p *Pollard) readPos(pos uint64) ( - n, nsib *polNode, hn *hashableNode, err error) { - // Grab the tree that the position is at - tree, branchLen, bits := detectOffset(pos, p.numLeaves) - if tree >= uint8(len(p.roots)) { - err = ErrorStrings[ErrorNotEnoughTrees] - return - } - - n, nsib = p.roots[tree], p.roots[tree] - - if branchLen == 0 { - return - } - - for h := branchLen - 1; h != 0; h-- { // go through branch - lr := uint8(bits>>h) & 1 - // grab the sibling of lr - lrSib := lr ^ 1 - - n, nsib = n.niece[lr], n.niece[lrSib] - if n == nil { - return nil, nil, nil, err - } - } - - lr := uint8(bits) & 1 - // grab the sibling of lr - lrSib := lr ^ 1 - - n, nsib = n.niece[lrSib], n.niece[lr] - return // only happens when returning a root -} - // grabPos returns the thing you asked for, as well as its sibling // and a hashable node for the position ABOVE pos // Returns an error if it can't get it. @@ -445,7 +411,7 @@ func (p *Pollard) toFull() (*Forest, error) { if !inForest(i, ff.numLeaves, ff.rows) { continue } - n, _, _, err := p.readPos(i) + n, _, _, err := p.grabPos(i) if err != nil { return nil, err } diff --git a/accumulator/pollardfull.go b/accumulator/pollardfull.go index bb58e83d..f7261e4c 100644 --- a/accumulator/pollardfull.go +++ b/accumulator/pollardfull.go @@ -5,6 +5,9 @@ import ( ) // read is just like forestData read but for pollard +// can't return an error... +// TODO should merge this with grabPos, as they're the same thing & this just +// calls grabPos func (p *Pollard) read(pos uint64) Hash { n, _, _, err := p.grabPos(pos) if err != nil { From 9678cf680a0776144ae1086379716f75c0b25465 Mon Sep 17 00:00:00 2001 From: adiabat Date: Sun, 3 Jan 2021 17:40:00 -0500 Subject: [PATCH 17/28] get rid of pollard full it was a cool idea but wasn't any faster and we have forest more built out now would still be nice to merge a lot of the overlapping accumulator code, but that wasn't how --- accumulator/pollard.go | 16 ++++++ accumulator/pollardfull.go | 103 ------------------------------------- 2 files changed, 16 insertions(+), 103 deletions(-) delete mode 100644 accumulator/pollardfull.go diff --git a/accumulator/pollard.go b/accumulator/pollard.go index d951493e..67a0eaa2 100644 --- a/accumulator/pollard.go +++ b/accumulator/pollard.go @@ -392,6 +392,22 @@ func (p *Pollard) grabPos( return // only happens when returning a root } +// read is just like forestData read but for pollard +// can't return an error... +// TODO should merge this with grabPos, as they're the same thing & this just +// calls grabPos +func (p *Pollard) read(pos uint64) Hash { + n, _, _, err := p.grabPos(pos) + if err != nil { + fmt.Printf("read err %s pos %d\n", err.Error(), pos) + return empty + } + if n == nil { + return empty + } + return n.data +} + // toFull takes a pollard and converts to a forest. // For debugging and seeing what pollard is doing since there's already // a good toString method for forest. diff --git a/accumulator/pollardfull.go b/accumulator/pollardfull.go deleted file mode 100644 index f7261e4c..00000000 --- a/accumulator/pollardfull.go +++ /dev/null @@ -1,103 +0,0 @@ -package accumulator - -import ( - "fmt" -) - -// read is just like forestData read but for pollard -// can't return an error... -// TODO should merge this with grabPos, as they're the same thing & this just -// calls grabPos -func (p *Pollard) read(pos uint64) Hash { - n, _, _, err := p.grabPos(pos) - if err != nil { - fmt.Printf("read err %s pos %d\n", err.Error(), pos) - return empty - } - if n == nil { - return empty - } - return n.data -} - -// NewFullPollard gives you a Pollard with an activated -func NewFullPollard() Pollard { - var p Pollard - p.positionMap = make(map[MiniHash]uint64) - return p -} - -// PosMapSanity is costly / slow: check that everything in posMap is correct -func (p *Pollard) PosMapSanity() error { - for i := uint64(0); i < p.numLeaves; i++ { - if p.positionMap[p.read(i).Mini()] != i { - return fmt.Errorf("positionMap error: map says %x @%d but it's @%d", - p.read(i).Prefix(), p.positionMap[p.read(i).Mini()], i) - } - } - return nil -} - -// TODO make interface to reduce code dupe - -// ProveBatch but for pollard. -// Now getting really obvious that forest and pollard should both satisfy some -// kind of utreexo-like interface. And maybe forest shouldn't be called forest. -// Anyway do that after this. -func (p *Pollard) ProveBatch(hs []Hash) (BatchProof, error) { - var bp BatchProof - // skip everything if empty (should this be an error? - if len(hs) == 0 { - return bp, nil - } - if p.numLeaves < 2 { - return bp, nil - } - - // for h, p := range f.positionMap { - // fmt.Printf("%x@%d ", h[:4], p) - // } - - // first get all the leaf positions - // there shouldn't be any duplicates in hs, but if there are I guess - // it's not an error. - bp.Targets = make([]uint64, len(hs)) - - for i, wanted := range hs { - - pos, ok := p.positionMap[wanted.Mini()] - if !ok { - fmt.Print(p.ToString()) - return bp, fmt.Errorf("hash %x not found", wanted) - } - - // should never happen - if pos > p.numLeaves { - for m, p := range p.positionMap { - fmt.Printf("%x @%d\t", m[:4], p) - } - return bp, fmt.Errorf( - "ProveBlock: got leaf position %d but only %d leaves exist", - pos, p.numLeaves) - } - bp.Targets[i] = pos - } - // targets need to be sorted because the proof hashes are sorted - // NOTE that this is a big deal -- we lose in-block positional information - // because of this sorting. Does that hurt locality or performance? My - // guess is no, but that's untested. - sortUint64s(bp.Targets) - - proofPositions := ProofPositions(bp.Targets, p.numLeaves, p.rows()) - targetsAndProof := mergeSortedSlices(proofPositions, bp.Targets) - bp.Proof = make([]Hash, len(targetsAndProof)) - for i, proofPos := range targetsAndProof { - bp.Proof[i] = p.read(proofPos) - } - - if verbose { - fmt.Printf("blockproof targets: %v\n", bp.Targets) - } - - return bp, nil -} From 72fe40573f06a117ab6215891adc0506a5b8a84d Mon Sep 17 00:00:00 2001 From: adiabat Date: Mon, 4 Jan 2021 08:58:47 -0500 Subject: [PATCH 18/28] add comments about changes to IngestBatchProof() --- accumulator/batchproof.go | 38 ++++++++++++++++++++++++++++++++----- accumulator/pollardproof.go | 4 ++-- csn/ibd.go | 7 ++++++- util/udata.go | 9 +++++++++ 4 files changed, 50 insertions(+), 8 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index a8ff5c28..b49a5800 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -135,15 +135,41 @@ func (bp *BatchProof) ToString() string { return s } -// TODO OH WAIT -- this is not how to to it! Don't hash all the way up to the -// roots to verify -- just hash up to any populated node! Saves a ton of CPU! +// TODO : +/* +several changes needed & maybe easier to do them incrementally but at this +point it's more of a rewrite. +The batchProof no longer contains target hashes; those are obtained separately +from the leaf data. This makes sense as the verifying node will have to +know the preimages anyway to do tx/sig checks, so they can also compute the +hashes themselves instead of receiving them. + +prior to this change: verifyBatchProof() verifies up to the roots, +and then returned all the new stuff it received / computed, so that it +could be populated into the pollard (to allow for subsequent deletion) + +the new way it works: verifyBatchProof() and IngestBatchProof() will be +merged, since really right now IngestBatchProof() is basically just a wrapper +for verifyBatchProof(). It will get a batchProof as well as a slice of +target hashes (the things being proven). It will hash up to known branches, +then not return anything as it's populating as it goes. If the ingestion fails, +we need to undo everything added. It's also ok to trim everything down to +just the roots in that case for now; can add the backtrack later +(it doesn't seem too hard if you just keep track of every new populated position, +then wipe them on an invalid proof. Though... if you want to be really +efficient / DDoS resistant, only wipe the invalid parts and leave the partially +checked stuff that works. + + +*/ // verifyBatchProof verifies a batchproof by checking against the set of known // correct roots. // Takes a BatchProof, the accumulator roots, and the number of leaves in the forest. // Returns wether or not the proof verified correctly, the partial proof tree, // and the subset of roots that was computed. -func (p *Pollard) verifyBatchProof(bp BatchProof) ([]miniTree, []node, error) { +func (p *Pollard) verifyBatchProof( + bp BatchProof, targs []Hash) ([]miniTree, []node, error) { if len(bp.Targets) == 0 { return nil, nil, nil } @@ -177,7 +203,7 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) ([]miniTree, []node, error) { // initialise the targetNodes for row 0. // TODO: this would be more straight forward if bp.Proofs wouldn't // contain the targets - // TODO it doesn't now! + // TODO targets are now given in a separate argument // bp.Proofs is now on from ProofPositions() proofHashes := make([]Hash, 0, len(proofPositions)) var targetsMatched uint64 @@ -209,13 +235,15 @@ func (p *Pollard) verifyBatchProof(bp BatchProof) ([]miniTree, []node, error) { } // the sibling is not included in the proof positions, therefore - // it has to be included in targets. if there are less than 2 proof + // it must also be a target. if there are fewer than 2 proof // hashes or less than 2 targets left the proof is invalid because // there is a target without matching proof. if len(bp.Proof) < 2 || len(targets) < 2 { return nil, nil, fmt.Errorf("verifyBatchProof ran out of proof hashes") } + // if we got this far there are 2 targets that are siblings; pop em both + targetNodes = append(targetNodes, node{Pos: targets[0], Val: bp.Proof[0]}, node{Pos: targets[1], Val: bp.Proof[1]}) diff --git a/accumulator/pollardproof.go b/accumulator/pollardproof.go index 7b613e94..6c8d0ae0 100644 --- a/accumulator/pollardproof.go +++ b/accumulator/pollardproof.go @@ -2,10 +2,10 @@ package accumulator // IngestBatchProof populates the Pollard with all needed data to delete the // targets in the block proof -func (p *Pollard) IngestBatchProof(bp BatchProof) error { +func (p *Pollard) IngestBatchProof(bp BatchProof, targetHashes []Hash) error { // verify the batch proof. rootHashes := p.rootHashesReverse() - trees, roots, err := p.verifyBatchProof(bp) + trees, roots, err := p.verifyBatchProof(bp, targetHashes) if err != nil { return err } diff --git a/csn/ibd.go b/csn/ibd.go index 8ae704fa..0d7fae3a 100644 --- a/csn/ibd.go +++ b/csn/ibd.go @@ -150,6 +150,10 @@ func (c *Csn) putBlockInPollard(ub util.UBlockWithSkiplists, // "uData missing utxo data for block %d", ub.UtreexoData.Height) // } + // generate the target leaf hashes from the leaf data + // these are the hashes the server is telling us exist in the accumulator + // and we give these to IngestBatchProof() + *totalDels += len(ub.UtreexoData.AccProof.Targets) // for benchmarking // we can no longer verify the proof on its own for self-consistency @@ -177,7 +181,8 @@ func (c *Csn) putBlockInPollard(ub util.UBlockWithSkiplists, } // Fills in the empty(nil) nieces for verification && deletion - err := c.pollard.IngestBatchProof(ub.UtreexoData.AccProof) + err := c.pollard.IngestBatchProof( + ub.UtreexoData.AccProof, ub.UtreexoData.TargetLeafHashes()) if err != nil { fmt.Printf("height %d ingest error\n", ub.UtreexoData.Height) return err diff --git a/util/udata.go b/util/udata.go index c2d32262..dd1bdee4 100644 --- a/util/udata.go +++ b/util/udata.go @@ -28,6 +28,15 @@ func (ud *UData) ToUtxoView() *blockchain.UtxoViewpoint { return v } +// returns all the target leafhashes +func (ud *UData) TargetLeafHashes() []Hash { + leafHashes := make([]Hash, len(ud.Stxos)) + for i, _ := range ud.Stxos { + leafHashes[i] = ud.Stxos[i].LeafHash() + } + return leafHashes +} + /* blockchain.NewUtxoEntry() looks like this: // NewUtxoEntry returns a new UtxoEntry built from the arguments. From 8bb472db1958c9cfc0558e205756bef78dec777a Mon Sep 17 00:00:00 2001 From: adiabat Date: Fri, 29 Jan 2021 15:40:06 -0500 Subject: [PATCH 19/28] add better printfs for debugging looks like there's an off by 1 error somewhere --- accumulator/forest.go | 10 +++++++--- accumulator/pollard.go | 1 - accumulator/types.go | 5 +++++ bridgenode/genproofs.go | 5 +++-- csn/ibd.go | 5 ++++- util/udata.go | 6 ++++-- 6 files changed, 23 insertions(+), 9 deletions(-) diff --git a/accumulator/forest.go b/accumulator/forest.go index d73b2b7c..64666c1e 100644 --- a/accumulator/forest.go +++ b/accumulator/forest.go @@ -747,9 +747,13 @@ func (f *Forest) Stats() string { func (f *Forest) ToString() string { fh := f.rows - // tree rows should be 6 or less - if fh > 6 { - return "forest too big to print " + // tree rows should be 3 or less to print, otherwise gives stats / roots + if fh > 3 { + s := fmt.Sprintf("%d leaves, roots: ", f.numLeaves) + for _, r := range f.getRoots() { + s += r.PrefixString() + " " + } + return s } output := make([]string, (fh*2)+1) diff --git a/accumulator/pollard.go b/accumulator/pollard.go index 67a0eaa2..2ca33187 100644 --- a/accumulator/pollard.go +++ b/accumulator/pollard.go @@ -25,7 +25,6 @@ func (p *Pollard) Modify(adds []Leaf, delsUn []uint64) error { if err != nil { return err } - // fmt.Printf("pol pre add %s", p.toString()) err = p.add(adds) if err != nil { diff --git a/accumulator/types.go b/accumulator/types.go index b6d7cf95..9bb03b8d 100644 --- a/accumulator/types.go +++ b/accumulator/types.go @@ -15,6 +15,11 @@ func (h Hash) Prefix() []byte { return h[:4] } +// Prefix string for printing +func (h Hash) PrefixString() string { + return fmt.Sprintf("%x", h[:4]) +} + // Mini : func (h Hash) Mini() (m MiniHash) { copy(m[:], h[:12]) diff --git a/bridgenode/genproofs.go b/bridgenode/genproofs.go index b9fcfdc2..ec2ee99b 100644 --- a/bridgenode/genproofs.go +++ b/bridgenode/genproofs.go @@ -156,8 +156,9 @@ func BuildProofs(cfg *Config, sig chan bool) error { return err } - if bnr.Height%100 == 0 { - fmt.Println("On block :", bnr.Height+1) + if bnr.Height%100 == 0 || bnr.Height < 150 { + fmt.Printf("On block %d, forest: %s\n", + bnr.Height+1, forest.ToString()) } // Check if stopSig is no longer false diff --git a/csn/ibd.go b/csn/ibd.go index 0d7fae3a..3c8d1831 100644 --- a/csn/ibd.go +++ b/csn/ibd.go @@ -69,6 +69,10 @@ func (c *Csn) IBDThread(sig chan bool, quitafter int) { c.CurrentHeight, totalTXOAdded, totalDels, c.pollard.Stats(), plustime.Seconds(), time.Since(starttime).Seconds()) } + if c.CurrentHeight < 150 { + fmt.Printf("on block %d, pollard: %s\n", + c.CurrentHeight, c.pollard.ToString()) + } // quit after `quitafter` blocks if the -quitafter option is set blockCount++ @@ -208,7 +212,6 @@ func (c *Csn) putBlockInPollard(ub util.UBlockWithSkiplists, err = c.pollard.Modify(blockAdds, ub.UtreexoData.AccProof.Targets) if err != nil { - return fmt.Errorf("csn h %d modify %s", c.CurrentHeight, err.Error()) } diff --git a/util/udata.go b/util/udata.go index dd1bdee4..3d2ef144 100644 --- a/util/udata.go +++ b/util/udata.go @@ -4,6 +4,8 @@ import ( "fmt" "sync" + "github.com/mit-dci/utreexo/accumulator" + "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/txscript" @@ -29,8 +31,8 @@ func (ud *UData) ToUtxoView() *blockchain.UtxoViewpoint { } // returns all the target leafhashes -func (ud *UData) TargetLeafHashes() []Hash { - leafHashes := make([]Hash, len(ud.Stxos)) +func (ud *UData) TargetLeafHashes() []accumulator.Hash { + leafHashes := make([]accumulator.Hash, len(ud.Stxos)) for i, _ := range ud.Stxos { leafHashes[i] = ud.Stxos[i].LeafHash() } From 248d86b8a47e5e43d7a0d316eefd017ede84b828 Mon Sep 17 00:00:00 2001 From: adiabat Date: Fri, 29 Jan 2021 17:50:15 -0500 Subject: [PATCH 20/28] make tests compile (they don't pass) --- accumulator/pollard_test.go | 30 ++++++---- accumulator/pollardfull_test.go | 102 -------------------------------- bridgenode/genproofs.go | 5 +- csn/ibd.go | 4 -- util/types.go | 6 +- 5 files changed, 24 insertions(+), 123 deletions(-) delete mode 100644 accumulator/pollardfull_test.go diff --git a/accumulator/pollard_test.go b/accumulator/pollard_test.go index b7444e85..5e552b9c 100644 --- a/accumulator/pollard_test.go +++ b/accumulator/pollard_test.go @@ -48,13 +48,17 @@ func TestPollardSimpleIngest(t *testing.T) { hashes[i] = adds[i].Hash } - bp, _ := f.ProveBatch(hashes) + bp, err := f.ProveBatch(hashes) + if err != nil { + t.Fatal(err.Error()) + } var p Pollard p.Modify(adds, nil) // Modify the proof so that the verification should fail. - bp.Proof[0][0] = 0xFF - err := p.IngestBatchProof(bp) + bp.Proof[0] = empty + + err = p.IngestBatchProof(bp, hashes) if err == nil { t.Fatal("BatchProof valid after modification. Accumulator validation failing") } @@ -87,7 +91,7 @@ func pollardRandomRemember(blocks int32) error { return err } // verify proofs on rad node - err = p.IngestBatchProof(bp) + err = p.IngestBatchProof(bp, delHashes) if err != nil { return err } @@ -241,7 +245,7 @@ func TestCache(t *testing.T) { t.Fatal("Modify failed", err) } - err = p.IngestBatchProof(proof) + err = p.IngestBatchProof(proof, delHashes) if err != nil { t.Fatal("IngestBatchProof failed", err) } @@ -269,7 +273,7 @@ func TestCache(t *testing.T) { pos := leafProof.Targets[0] fmt.Println(pos, l) - _, nsib, _, err := p.readPos(pos) + _, nsib, _, err := p.grabPos(pos) if pos == p.numLeaves-1 { // roots are always cached @@ -278,14 +282,16 @@ func TestCache(t *testing.T) { siblingDoesNotExists := nsib == nil || nsib.data == empty || err != nil if l.Remember && siblingDoesNotExists { - // the proof for l is not cached even though it should have been because it - // was added with remember=true. - t.Fatal("proof for leaf at", pos, "does not exist but it was added with remember=true") + // the proof for l is not cached even though it should have been + // because it was added with remember=true. + t.Fatal("proof for leaf at", pos, + "does not exist but it was added with remember=true") } else if !l.Remember && !siblingDoesNotExists { - // the proof for l was cached even though it should not have been because it - // was added with remember = false. + // the proof for l was cached even though it should not have + // been because it was added with remember = false. fmt.Println(p.ToString()) - t.Fatal("proof for leaf at", pos, "does exist but it was added with remember=false") + t.Fatal("proof for leaf at", pos, + "does exist but it was added with remember=false") } } } diff --git a/accumulator/pollardfull_test.go b/accumulator/pollardfull_test.go deleted file mode 100644 index c6761108..00000000 --- a/accumulator/pollardfull_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package accumulator - -import ( - "fmt" - "math/rand" - "testing" -) - -func TestPollardFullRand(t *testing.T) { - for z := 0; z < 30; z++ { - // z := 1 - rand.Seed(int64(z)) - fmt.Printf("randseed %d\n", z) - err := pollardFullRandomRemember(20) - if err != nil { - fmt.Printf("randseed %d\n", z) - t.Fatal(err) - } - } -} - -func pollardFullRandomRemember(blocks int32) error { - - // ffile, err := os.Create("/dev/shm/forfile") - // if err != nil { - // return err - // } - - var fp, p Pollard - fp = NewFullPollard() - - // p.Minleaves = 0 - - sn := NewSimChain(0x07) - sn.lookahead = 400 - for b := int32(0); b < blocks; b++ { - adds, _, delHashes := sn.NextBlock(rand.Uint32() & 0x03) - - fmt.Printf("\t\t\tstart block %d del %d add %d - %s\n", - sn.blockHeight, len(delHashes), len(adds), p.Stats()) - - // get proof for these deletions (with respect to prev block) - bp, err := fp.ProveBatch(delHashes) - if err != nil { - return err - } - - // verify proofs on rad node - err = p.IngestBatchProof(bp) - if err != nil { - return err - } - fmt.Printf("del %v\n", bp.Targets) - - // apply adds and deletes to the bridge node (could do this whenever) - err = fp.Modify(adds, bp.Targets) - if err != nil { - return err - } - // TODO fix: there is a leak in forest.Modify where sometimes - // the position map doesn't clear out and a hash that doesn't exist - // any more will be stuck in the positionMap. Wastes a bit of memory - // and seems to happen when there are moves to and from a location - // Should fix but can leave it for now. - - err = fp.PosMapSanity() - if err != nil { - fmt.Print(fp.ToString()) - return err - } - - // apply adds / dels to pollard - err = p.Modify(adds, bp.Targets) - if err != nil { - return err - } - - fmt.Printf("pol postadd %s", p.ToString()) - - fmt.Printf("fulpol postadd %s", fp.ToString()) - - fullTops := fp.rootHashesReverse() - polTops := p.rootHashesReverse() - - // check that tops match - if len(fullTops) != len(polTops) { - return fmt.Errorf("block %d fulpol %d tops, pol %d tops", - sn.blockHeight, len(fullTops), len(polTops)) - } - fmt.Printf("top matching: ") - for i, ft := range fullTops { - fmt.Printf("fp %04x p %04x ", ft[:4], polTops[i][:4]) - if ft != polTops[i] { - return fmt.Errorf("block %d top %d mismatch, fulpol %x pol %x", - sn.blockHeight, i, ft[:4], polTops[i][:4]) - } - } - fmt.Printf("\n") - } - - return nil -} diff --git a/bridgenode/genproofs.go b/bridgenode/genproofs.go index ec2ee99b..12ffc1cc 100644 --- a/bridgenode/genproofs.go +++ b/bridgenode/genproofs.go @@ -144,6 +144,7 @@ func BuildProofs(cfg *Config, sig chan bool) error { if err != nil { return err } + // We don't know the TTL values, but know how many spots to allocate ud.TxoTTLs = make([]int32, len(blockAdds)) // send proof udata to channel to be written to disk @@ -157,8 +158,8 @@ func BuildProofs(cfg *Config, sig chan bool) error { } if bnr.Height%100 == 0 || bnr.Height < 150 { - fmt.Printf("On block %d, forest: %s\n", - bnr.Height+1, forest.ToString()) + fmt.Printf("finished block %d (%d), forest: %s\n", + bnr.Height, height, forest.ToString()) } // Check if stopSig is no longer false diff --git a/csn/ibd.go b/csn/ibd.go index 3c8d1831..750b8a4d 100644 --- a/csn/ibd.go +++ b/csn/ibd.go @@ -69,10 +69,6 @@ func (c *Csn) IBDThread(sig chan bool, quitafter int) { c.CurrentHeight, totalTXOAdded, totalDels, c.pollard.Stats(), plustime.Seconds(), time.Since(starttime).Seconds()) } - if c.CurrentHeight < 150 { - fmt.Printf("on block %d, pollard: %s\n", - c.CurrentHeight, c.pollard.ToString()) - } // quit after `quitafter` blocks if the -quitafter option is set blockCount++ diff --git a/util/types.go b/util/types.go index e1fac222..1623317e 100644 --- a/util/types.go +++ b/util/types.go @@ -585,9 +585,9 @@ func (ub *UBlockWithSkiplists) DeserializeCompact(r io.Reader) (err error) { inputInBlock := 0 skippos := 0 skiplen := len(ub.Inskip) - fmt.Printf("%d h %d txs %d targets inskip %v\n", - ub.UtreexoData.Height, len(ub.Block.Transactions), - len(ub.UtreexoData.Stxos), ub.Inskip) + // fmt.Printf("%d h %d txs %d targets inskip %v\n", + // ub.UtreexoData.Height, len(ub.Block.Transactions), + // len(ub.UtreexoData.Stxos), ub.Inskip) for i, tx := range ub.Block.Transactions { if i == 0 { continue // skip coinbase, not counted in Stxos From 4739809b5c015c6f59eadd90ff149a4b4bdb9bc0 Mon Sep 17 00:00:00 2001 From: adiabat Date: Fri, 29 Jan 2021 18:07:27 -0500 Subject: [PATCH 21/28] aha the test is now wrong It chokes if the proof is empty (no hashes) But that's correct now; if you're proving everything, there is no proof needed! --- accumulator/pollard_test.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/accumulator/pollard_test.go b/accumulator/pollard_test.go index 5e552b9c..16e0796c 100644 --- a/accumulator/pollard_test.go +++ b/accumulator/pollard_test.go @@ -37,10 +37,13 @@ func TestPollardSimpleIngest(t *testing.T) { f := NewForest(nil, false, "", 0) adds := make([]Leaf, 15) for i := 0; i < len(adds); i++ { - adds[i].Hash[0] = uint8(i + 1) + adds[i].Hash = HashFromString(fmt.Sprintf("%d", i)) } - f.Modify(adds, []uint64{}) + _, err := f.Modify(adds, []uint64{}) + if err != nil { + t.Fatal(err.Error()) + } fmt.Println(f.ToString()) hashes := make([]Hash, len(adds)) @@ -52,9 +55,17 @@ func TestPollardSimpleIngest(t *testing.T) { if err != nil { t.Fatal(err.Error()) } + fmt.Printf("prove %d leaves (total %d)\n", len(hashes), len(bp.Proof)) + if len(bp.Proof) == 0 { + t.Fatal("no hashes in proof") + } var p Pollard - p.Modify(adds, nil) + + err = p.Modify(adds, nil) + if err != nil { + t.Fatal(err.Error()) + } // Modify the proof so that the verification should fail. bp.Proof[0] = empty From 939f69255938adcd0b1224d4b7852c17197fa639 Mon Sep 17 00:00:00 2001 From: adiabat Date: Fri, 29 Jan 2021 23:47:46 -0500 Subject: [PATCH 22/28] start rewrite of ingest --- accumulator/batchproof.go | 22 ++++++-------- accumulator/pollard_test.go | 31 +++++++++++++++----- accumulator/pollardproof.go | 57 ++++++++++++++++++++++++++----------- 3 files changed, 73 insertions(+), 37 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index b49a5800..011d2634 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -212,19 +212,12 @@ func (p *Pollard) verifyBatchProof( // a row-0 root should never be given, as it can only be a target and // targets aren't sent - /*if targets[0] == p.numLeaves-1 && p.numLeaves&1 == 1 { - // target is the row 0 root, append it to the root candidates. - rootCandidates = append(rootCandidates, - node{Val: rootHashes[0], Pos: targets[0]}) - bp.Proof = bp.Proof[1:] - break - }*/ - // `targets` might contain a target and its sibling or just the target, if // only the target is present the sibling will be in `proofPositions`. + if uint64(len(proofPositions)) > targetsMatched && targets[0]^1 == proofPositions[targetsMatched] { - // the sibling of the target is included in the proof positions. + // target's sibling is in proof positions. lr := targets[0] & 1 targetNodes = append(targetNodes, node{Pos: targets[0], Val: bp.Proof[lr]}) proofHashes = append(proofHashes, bp.Proof[lr^1]) @@ -234,7 +227,7 @@ func (p *Pollard) verifyBatchProof( continue } - // the sibling is not included in the proof positions, therefore + // the sibling is not included in proof positions, therefore // it must also be a target. if there are fewer than 2 proof // hashes or less than 2 targets left the proof is invalid because // there is a target without matching proof. @@ -243,7 +236,6 @@ func (p *Pollard) verifyBatchProof( } // if we got this far there are 2 targets that are siblings; pop em both - targetNodes = append(targetNodes, node{Pos: targets[0], Val: bp.Proof[0]}, node{Pos: targets[1], Val: bp.Proof[1]}) @@ -288,9 +280,11 @@ func (p *Pollard) verifyBatchProof( hash := parentHash(left.Val, right.Val) populatedNode, _, _, err := p.grabPos(parentPos) - if err != nil || - (populatedNode != nil && populatedNode.data != empty && - hash != populatedNode.data) { + if err != nil { + return nil, nil, fmt.Errorf("verify grabPos error %s", err.Error()) + } + if populatedNode != nil && populatedNode.data != empty && + hash != populatedNode.data { // The hash did not match the cached hash return nil, nil, fmt.Errorf("verifyBatchProof pos %d have %x calc'd %x", parentPos, populatedNode.data, hash) diff --git a/accumulator/pollard_test.go b/accumulator/pollard_test.go index 16e0796c..693ce9fc 100644 --- a/accumulator/pollard_test.go +++ b/accumulator/pollard_test.go @@ -35,7 +35,7 @@ func TestPollardFixed(t *testing.T) { func TestPollardSimpleIngest(t *testing.T) { f := NewForest(nil, false, "", 0) - adds := make([]Leaf, 15) + adds := make([]Leaf, 8) for i := 0; i < len(adds); i++ { adds[i].Hash = HashFromString(fmt.Sprintf("%d", i)) } @@ -51,28 +51,45 @@ func TestPollardSimpleIngest(t *testing.T) { hashes[i] = adds[i].Hash } - bp, err := f.ProveBatch(hashes) + targets := hashes[1:3] + + bp, err := f.ProveBatch(targets) if err != nil { t.Fatal(err.Error()) } - fmt.Printf("prove %d leaves (total %d)\n", len(hashes), len(bp.Proof)) + + fmt.Printf("%d targets, proof is %d hashes\n", len(targets), len(bp.Proof)) if len(bp.Proof) == 0 { t.Fatal("no hashes in proof") } - var p Pollard + fmt.Println(f.ToString()) + + var p, p2 Pollard err = p.Modify(adds, nil) if err != nil { t.Fatal(err.Error()) } + err = p2.Modify(adds, nil) + if err != nil { + t.Fatal(err.Error()) + } + + fmt.Println(p.ToString()) + + err = p.IngestBatchProof(bp, targets) + if err != nil { + t.Fatalf("BatchProof should work, but got error: %s", err.Error()) + } // Modify the proof so that the verification should fail. - bp.Proof[0] = empty + copy(bp.Proof[0][:16], empty[:16]) - err = p.IngestBatchProof(bp, hashes) + err = p2.IngestBatchProof(bp, targets) if err == nil { - t.Fatal("BatchProof valid after modification. Accumulator validation failing") + t.Fatal("BatchProof still valid after modification.") } + fmt.Printf("Expected an error, got one: %s\n", err.Error()) } func pollardRandomRemember(blocks int32) error { diff --git a/accumulator/pollardproof.go b/accumulator/pollardproof.go index 6c8d0ae0..ca5ce7c0 100644 --- a/accumulator/pollardproof.go +++ b/accumulator/pollardproof.go @@ -1,26 +1,51 @@ package accumulator +import ( + "bytes" + "fmt" +) + // IngestBatchProof populates the Pollard with all needed data to delete the // targets in the block proof func (p *Pollard) IngestBatchProof(bp BatchProof, targetHashes []Hash) error { - // verify the batch proof. - rootHashes := p.rootHashesReverse() - trees, roots, err := p.verifyBatchProof(bp, targetHashes) + + // first, save the rootHashes. If ingestAndCheck fails, the pollard + // will be messed up / invalid, and we can wipe everything and restore + // to the roots before we ingested. (not idea but works for now) + // TODO: cleaner failure mode for ingesting a bad proof + + var buf bytes.Buffer + p.WritePollard(&buf) + + err := p.ingestAndCheck(bp, targetHashes) if err != nil { - return err + fmt.Printf("ingest proof failure: %s restoring pollard\n", err.Error()) + p.RestorePollard(&buf) + return fmt.Errorf("Invalid proof, pollard wiped down to roots") } - // preallocating polNodes helps with garbage collection - polNodes := make([]polNode, len(trees)*3) - i := 0 - nodesAllocated := 0 - for _, root := range roots { - for root.Val != rootHashes[i] { - i++ - } - // populate the pollard - nodesAllocated += p.populate( - p.roots[len(p.roots)-i-1], - root.Pos, trees, polNodes[nodesAllocated:]) + return nil +} + +// ingestAndCheck puts the targets and proofs from the BatchProof into the +// pollard, and computes parents as needed up to already populated nodes. +func (p *Pollard) ingestAndCheck(bp BatchProof, targs []Hash) error { + if len(targs) == 0 { + return nil + } + fmt.Printf("got proof %s\n", bp.ToString()) + + // the main thing ingestAndCheck does is write hashes to the pollard. + // the hashes can come from 2 places: arguments or hashing. + // for arguments, proofs and targets are treated pretty much the same; + // read em off the slice and write em in. + // any time you're writing somthing that's already there, check to make + // sure it matches. if it doesn't, return an error. + // if it does, you don't need to hash any parents above that. + + // first range through targets, populating / matching, and placing proof + // hashes if the targets are not twins + + for i, targpos := range bp.Targets { } From 337c75abb988b014f8c15392bffc9fe673687e49 Mon Sep 17 00:00:00 2001 From: adiabat Date: Sat, 30 Jan 2021 01:00:15 -0500 Subject: [PATCH 23/28] building ingestAndCheck but need to change grabPos this will eliminate populate, verifyBatchProof, and ProofPositions, which is getting rid of a lot of complex code. So that's good. It does need a grabPos variant that returns a slice of the whole branch. Then we can dedupe the nodes in those slices when inserting them into a 2d slice. modify grabPos, new function, or inline it all into ingestAndCheck..? --- accumulator/batchproof.go | 2 +- accumulator/pollard.go | 38 ++++++++++++++++------------ accumulator/pollard_test.go | 3 ++- accumulator/pollardproof.go | 49 ++++++++++++++++++++++++++++++++++++- 4 files changed, 74 insertions(+), 18 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index 011d2634..dbe8c496 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -279,7 +279,7 @@ func (p *Pollard) verifyBatchProof( parentPos := parent(target.Pos, rows) hash := parentHash(left.Val, right.Val) - populatedNode, _, _, err := p.grabPos(parentPos) + populatedNode, _, _, err := p.grabPos(parentPos, false) if err != nil { return nil, nil, fmt.Errorf("verify grabPos error %s", err.Error()) } diff --git a/accumulator/pollard.go b/accumulator/pollard.go index 2ca33187..d9c3567b 100644 --- a/accumulator/pollard.go +++ b/accumulator/pollard.go @@ -250,7 +250,7 @@ func (p *Pollard) rem2(dels []uint64) error { nextRootPositions, _ := getRootsReverse(nextNumLeaves, ph) nextRoots := make([]*polNode, len(nextRootPositions)) for i, _ := range nextRoots { - nt, ntsib, _, err := p.grabPos(nextRootPositions[i]) + nt, ntsib, _, err := p.grabPos(nextRootPositions[i], false) if err != nil { return err } @@ -277,7 +277,7 @@ func (p *Pollard) hnFromPos(pos uint64) (*hashableNode, error) { // fmt.Printf("HnFromPos %d out of forest\n", pos) return nil, nil } - _, _, hn, err := p.grabPos(pos) + _, _, hn, err := p.grabPos(pos, false) if err != nil { return nil, err } @@ -313,11 +313,11 @@ func (p *Pollard) swapNodes(s arrow, row uint8) (*hashableNode, error) { // TODO could be improved by getting the highest common ancestor // and then splitting instead of doing 2 full descents - a, asib, _, err := p.grabPos(s.from) + a, asib, _, err := p.grabPos(s.from, false) if err != nil { return nil, err } - b, bsib, bhn, err := p.grabPos(s.to) + b, bsib, bhn, err := p.grabPos(s.to, false) if err != nil { return nil, err } @@ -346,8 +346,8 @@ func (p *Pollard) swapNodes(s arrow, row uint8) (*hashableNode, error) { // and a hashable node for the position ABOVE pos // Returns an error if it can't get it. // NOTE errors are not exhaustive; could return garbage without an error -func (p *Pollard) grabPos( - pos uint64) (n, nsib *polNode, hn *hashableNode, err error) { +func (p *Pollard) grabPos(pos uint64, build bool) ( + n, nsib *polNode, hn *hashableNode, err error) { // Grab the tree that the position is at tree, branchLen, bits := detectOffset(pos, p.numLeaves) if tree >= uint8(len(p.roots)) { @@ -373,11 +373,19 @@ func (p *Pollard) grabPos( } n, nsib = n.niece[lr], n.niece[lrSib] if n == nil { - // if a node doesn't exist, crash - // no niece in this case - // TODO error message could be better - err = ErrorStrings[ErrorNoPollardNode] - return + if !build { + // if a node doesn't exist, crash + // no niece in this case + // TODO error message could be better + err = ErrorStrings[ErrorNoPollardNode] + return + } + // build is set, so make a new empty node here + n = &polNode{} + // also build empty siblings when build is set + if nsib == nil { + nsib = &polNode{} + } } } @@ -396,7 +404,7 @@ func (p *Pollard) grabPos( // TODO should merge this with grabPos, as they're the same thing & this just // calls grabPos func (p *Pollard) read(pos uint64) Hash { - n, _, _, err := p.grabPos(pos) + n, _, _, err := p.grabPos(pos, false) if err != nil { fmt.Printf("read err %s pos %d\n", err.Error(), pos) return empty @@ -426,7 +434,7 @@ func (p *Pollard) toFull() (*Forest, error) { if !inForest(i, ff.numLeaves, ff.rows) { continue } - n, _, _, err := p.grabPos(i) + n, _, _, err := p.grabPos(i, false) if err != nil { return nil, err } @@ -454,7 +462,7 @@ func (p *Pollard) equalToForest(f *Forest) bool { } for leafpos := uint64(0); leafpos < f.numLeaves; leafpos++ { - n, _, _, err := p.grabPos(leafpos) + n, _, _, err := p.grabPos(leafpos, false) if err != nil { return false } @@ -476,7 +484,7 @@ func (p *Pollard) equalToForestIfThere(f *Forest) bool { } for leafpos := uint64(0); leafpos < f.numLeaves; leafpos++ { - n, _, _, err := p.grabPos(leafpos) + n, _, _, err := p.grabPos(leafpos, false) if err != nil || n == nil { continue // ignore grabPos errors / nils } diff --git a/accumulator/pollard_test.go b/accumulator/pollard_test.go index 693ce9fc..2d25a5c0 100644 --- a/accumulator/pollard_test.go +++ b/accumulator/pollard_test.go @@ -71,6 +71,7 @@ func TestPollardSimpleIngest(t *testing.T) { if err != nil { t.Fatal(err.Error()) } + err = p2.Modify(adds, nil) if err != nil { t.Fatal(err.Error()) @@ -301,7 +302,7 @@ func TestCache(t *testing.T) { pos := leafProof.Targets[0] fmt.Println(pos, l) - _, nsib, _, err := p.grabPos(pos) + _, nsib, _, err := p.grabPos(pos, false) if pos == p.numLeaves-1 { // roots are always cached diff --git a/accumulator/pollardproof.go b/accumulator/pollardproof.go index ca5ce7c0..4c486555 100644 --- a/accumulator/pollardproof.go +++ b/accumulator/pollardproof.go @@ -32,6 +32,13 @@ func (p *Pollard) ingestAndCheck(bp BatchProof, targs []Hash) error { if len(targs) == 0 { return nil } + // if bp targs and targs have different length, this will crash. + // they shouldn't though, make sure there are previous checks for that + + maxpp := len(bp.Proof) + pp := 0 // proof pointer; where we are in the pointer slice + // instead of popping like bp.proofs = bp.proofs[1:] + fmt.Printf("got proof %s\n", bp.ToString()) // the main thing ingestAndCheck does is write hashes to the pollard. @@ -45,13 +52,53 @@ func (p *Pollard) ingestAndCheck(bp BatchProof, targs []Hash) error { // first range through targets, populating / matching, and placing proof // hashes if the targets are not twins - for i, targpos := range bp.Targets { + for i := 0; i < len(bp.Targets); i++ { + targpos := bp.Targets[i] + + n, nsib, _, err := p.grabPos(targpos, true) + if err != nil { + return err + } + err = matchPop(n, targs[i]) + if err != nil { + return err + } + // see if current target is a twin target + if i+1 < len(targs) && bp.Targets[i]|1 == bp.Targets[i+1] { + err = matchPop(nsib, targs[i+1]) + if err != nil { + return err + } + i++ // dealt with an extra target + } else { // non-twin, needs proof + if pp == maxpp { + return fmt.Errorf("need more proofs") + } + err = matchPop(nsib, bp.Proof[pp]) + if err != nil { + return err + } + pp++ + } } return nil } +// quick function to populate, or match/fail +func matchPop(n *polNode, h Hash) error { + if n.data == empty { + n.data = h + return nil + } + if n.data == h { + return nil + } + + return fmt.Errorf("Proof doesn't match; expect %x, got %x", n.data, h) +} + // populate takes a root and populates it with the nodes of the paritial proof // tree that was computed in `verifyBatchProof`. func (p *Pollard) populate( From a0150470a57893d6aaeb500c496a40ee48b1fbbc Mon Sep 17 00:00:00 2001 From: adiabat Date: Sat, 30 Jan 2021 01:40:41 -0500 Subject: [PATCH 24/28] revert grabPos, new function to build 2d slice like ProofPositions but with nodes should be more efficient since we don't need to calculate all the way up to roots tricky part will be skipping proof hashes that we don't need though. --- accumulator/batchproof.go | 2 +- accumulator/pollard.go | 36 ++++++++++++++---------------------- accumulator/pollardproof.go | 27 +++++++++++++++++++++++---- 3 files changed, 38 insertions(+), 27 deletions(-) diff --git a/accumulator/batchproof.go b/accumulator/batchproof.go index dbe8c496..011d2634 100644 --- a/accumulator/batchproof.go +++ b/accumulator/batchproof.go @@ -279,7 +279,7 @@ func (p *Pollard) verifyBatchProof( parentPos := parent(target.Pos, rows) hash := parentHash(left.Val, right.Val) - populatedNode, _, _, err := p.grabPos(parentPos, false) + populatedNode, _, _, err := p.grabPos(parentPos) if err != nil { return nil, nil, fmt.Errorf("verify grabPos error %s", err.Error()) } diff --git a/accumulator/pollard.go b/accumulator/pollard.go index d9c3567b..c34c4414 100644 --- a/accumulator/pollard.go +++ b/accumulator/pollard.go @@ -250,7 +250,7 @@ func (p *Pollard) rem2(dels []uint64) error { nextRootPositions, _ := getRootsReverse(nextNumLeaves, ph) nextRoots := make([]*polNode, len(nextRootPositions)) for i, _ := range nextRoots { - nt, ntsib, _, err := p.grabPos(nextRootPositions[i], false) + nt, ntsib, _, err := p.grabPos(nextRootPositions[i]) if err != nil { return err } @@ -277,7 +277,7 @@ func (p *Pollard) hnFromPos(pos uint64) (*hashableNode, error) { // fmt.Printf("HnFromPos %d out of forest\n", pos) return nil, nil } - _, _, hn, err := p.grabPos(pos, false) + _, _, hn, err := p.grabPos(pos) if err != nil { return nil, err } @@ -313,11 +313,11 @@ func (p *Pollard) swapNodes(s arrow, row uint8) (*hashableNode, error) { // TODO could be improved by getting the highest common ancestor // and then splitting instead of doing 2 full descents - a, asib, _, err := p.grabPos(s.from, false) + a, asib, _, err := p.grabPos(s.from) if err != nil { return nil, err } - b, bsib, bhn, err := p.grabPos(s.to, false) + b, bsib, bhn, err := p.grabPos(s.to) if err != nil { return nil, err } @@ -346,7 +346,7 @@ func (p *Pollard) swapNodes(s arrow, row uint8) (*hashableNode, error) { // and a hashable node for the position ABOVE pos // Returns an error if it can't get it. // NOTE errors are not exhaustive; could return garbage without an error -func (p *Pollard) grabPos(pos uint64, build bool) ( +func (p *Pollard) grabPos(pos uint64) ( n, nsib *polNode, hn *hashableNode, err error) { // Grab the tree that the position is at tree, branchLen, bits := detectOffset(pos, p.numLeaves) @@ -373,19 +373,11 @@ func (p *Pollard) grabPos(pos uint64, build bool) ( } n, nsib = n.niece[lr], n.niece[lrSib] if n == nil { - if !build { - // if a node doesn't exist, crash - // no niece in this case - // TODO error message could be better - err = ErrorStrings[ErrorNoPollardNode] - return - } - // build is set, so make a new empty node here - n = &polNode{} - // also build empty siblings when build is set - if nsib == nil { - nsib = &polNode{} - } + // if a node doesn't exist, crash + // no niece in this case + // TODO error message could be better + err = ErrorStrings[ErrorNoPollardNode] + return } } @@ -404,7 +396,7 @@ func (p *Pollard) grabPos(pos uint64, build bool) ( // TODO should merge this with grabPos, as they're the same thing & this just // calls grabPos func (p *Pollard) read(pos uint64) Hash { - n, _, _, err := p.grabPos(pos, false) + n, _, _, err := p.grabPos(pos) if err != nil { fmt.Printf("read err %s pos %d\n", err.Error(), pos) return empty @@ -434,7 +426,7 @@ func (p *Pollard) toFull() (*Forest, error) { if !inForest(i, ff.numLeaves, ff.rows) { continue } - n, _, _, err := p.grabPos(i, false) + n, _, _, err := p.grabPos(i) if err != nil { return nil, err } @@ -462,7 +454,7 @@ func (p *Pollard) equalToForest(f *Forest) bool { } for leafpos := uint64(0); leafpos < f.numLeaves; leafpos++ { - n, _, _, err := p.grabPos(leafpos, false) + n, _, _, err := p.grabPos(leafpos) if err != nil { return false } @@ -484,7 +476,7 @@ func (p *Pollard) equalToForestIfThere(f *Forest) bool { } for leafpos := uint64(0); leafpos < f.numLeaves; leafpos++ { - n, _, _, err := p.grabPos(leafpos, false) + n, _, _, err := p.grabPos(leafpos) if err != nil || n == nil { continue // ignore grabPos errors / nils } diff --git a/accumulator/pollardproof.go b/accumulator/pollardproof.go index 4c486555..0122803f 100644 --- a/accumulator/pollardproof.go +++ b/accumulator/pollardproof.go @@ -55,7 +55,7 @@ func (p *Pollard) ingestAndCheck(bp BatchProof, targs []Hash) error { for i := 0; i < len(bp.Targets); i++ { targpos := bp.Targets[i] - n, nsib, _, err := p.grabPos(targpos, true) + n, nsib, _, err := p.grabPos(targpos) if err != nil { return err } @@ -86,16 +86,35 @@ func (p *Pollard) ingestAndCheck(bp BatchProof, targs []Hash) error { return nil } +// proofNodes is like ProofPositions but gets node pointers instead of positions, +// and doesn't go above populated nodes +func (p *Pollard) proofNodes(targetPositins []uint64) ([][]*polNode, error) { + // descend, like grabpos does, building a 2d slice of polnodes that you can + // then run matchPop on, ideally just by running throught linearly... + // I think it'll all match up and work ok, then ingestAndCheck can look + // something like this: + + var activeNodes [][]*polNode + for _, row := range activeNodes { + for _, n := range row { + matchPop(n, empty) + // do stuff, call parent match with row+1 + } + } + + return nil, nil +} + // quick function to populate, or match/fail func matchPop(n *polNode, h Hash) error { - if n.data == empty { + if n.data == empty { // node was empty; populate n.data = h return nil } - if n.data == h { + if n.data == h { // node was full & matches; OK return nil } - + // didn't match return fmt.Errorf("Proof doesn't match; expect %x, got %x", n.data, h) } From 2e152f41dc09d3240c921d06837ce2aff677e3ae Mon Sep 17 00:00:00 2001 From: adiabat Date: Mon, 1 Feb 2021 00:38:04 -0500 Subject: [PATCH 25/28] add notes --- accumulator/notes.txt | 51 ++++++++++++++++++++++++++++++++++++++++- accumulator/pollard.go | 52 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 1 deletion(-) diff --git a/accumulator/notes.txt b/accumulator/notes.txt index f98083d6..71cd1fc9 100644 --- a/accumulator/notes.txt +++ b/accumulator/notes.txt @@ -1,4 +1,53 @@ -things to try: +hash to known + +Not quite there, but the basic design: + +You start with a pollard, and a proof / target list. +What's important in the pollard is what's there and what's not. + +Example: +start with a tree like this: + +14 +|---------------\ +12 13 +|-------\ |-------\ +08 09 10 11 +|---\ |---\ |---\ |---\ +00 01 02 03 04 05 06 07 + +You have 12, 10, 6. (a proof for 7) +You get targets: 0, 1, 2 +You get proof: 3, 13 + +You start at the root and start building a slice of nodes, top to bottom and left to right. + +... really you get a list of targets, which means if you go through each target you go top to bottom each time, but... there's probably a way to only make one pass top to bottom. +There might be a fun bit shifty way to do this; given a list of leaves, give a list of parents. (deduped) There might be something in the code for this already? If not, maybe make it. + +ok starting at the root and going down, build a list of node-pairs, and also a list of bools, which mean "target / target parent". I'll put a * for those + +[12* 13] +[8* 9*] +[0* 1*] [2* 3] + +I think the bool list can be a separate bitmap, in this case 10111110. (or 10; 11; 1110 for rows) + Then go bottom to top. +Bottom row: * means match/pop with target, no * means match/pop with proof. +Upper rows: * means hash then match/pop, no * means match/pop with proof. + +In this case, populate target 0, then 1, then 2, and poplate 3 with the first proof. + +Then move up. 8 gets hashed from 0, 1. We have the pointers to 0, 1 since nodes always come in pairs so a node's sibling will always be next to them in this list. +9 gets hashed from 2, 3. These hashes are populated, since there's nothing in 8 or 9 yet. + +12 gets hashed from 8, 9, and it's not populated, it's matched. 13 comes from the proof. + +Since 12 matched and was the only * of this row, we're done. We only need to keep going when there are * nodes that got populated. If all * nodes matched, we can finish without further hashing. (But maybe still populate the rest of the row with proofs.) + +I'm not 100% sure this always works but feels like it does. + +-------- Make moving / swapping nodes happen by changing the pointers of their parents, not by copying values .. that might help with some of this? Then if you tag a node as dirty, you don't care where it ends up; you have a pointer diff --git a/accumulator/pollard.go b/accumulator/pollard.go index c34c4414..58ed5732 100644 --- a/accumulator/pollard.go +++ b/accumulator/pollard.go @@ -342,6 +342,58 @@ func (p *Pollard) swapNodes(s arrow, row uint8) (*hashableNode, error) { return bhn, nil } +// blazeLeaves blazes a trail to all positions, creating empty nodes and +// siblings along the way if needed. Returns a 2d slice of nodes +func (p *Pollard) blazeLeaves(positions []uint64) (pslice [][]*polNode, err error) { + + // Determine the tree that the position is at + tree, branchLen, bits := detectOffset(pos, p.numLeaves) + if tree >= uint8(len(p.roots)) { + err = ErrorStrings[ErrorNotEnoughTrees] + return + } + // not sure about this + if branchLen == 0 { + return + } + // allocate branches + nbranch, sibbranch = make([]*polNode, branchLen), make([]*polNode, branchLen) + // start with the tree's root + n, nsib := p.roots[tree], p.roots[tree] + + // loop downward from the root, building nodes as we go, copying pointers + for h := branchLen - 1; h != 0; h-- { // go through branch + // copy pointers into slice + nbranch[h], sibbranch[h] = n, nsib + + // left / right determined by the bits of position number + lr := uint8(bits>>h) & 1 + // follow the sibling of lr + lrSib := lr ^ 1 + + // if a sib doesn't exist, need to create it and hook it in + if n.niece[lrSib] == nil { + n.niece[lrSib] = &polNode{} + } + n, nsib = n.niece[lr], n.niece[lrSib] + + if n == nil { + // if a node doesn't exist, crash + // no niece in this case + // TODO error message could be better + err = ErrorStrings[ErrorNoPollardNode] + return + } + } + + lr := uint8(bits) & 1 + // grab the sibling of lr + lrSib := lr ^ 1 + + n, nsib = n.niece[lrSib], n.niece[lr] + return // only happens when returning a root +} + // grabPos returns the thing you asked for, as well as its sibling // and a hashable node for the position ABOVE pos // Returns an error if it can't get it. From 86c7ad0b990243a29a8ea1ff753e619f4b072e4f Mon Sep 17 00:00:00 2001 From: adiabat Date: Fri, 16 Apr 2021 01:04:58 -0400 Subject: [PATCH 26/28] more notes on ways to change blockproofs --- accumulator/notes.txt | 48 ++++++++++++++++++++++++++++++- accumulator/pollard.go | 52 --------------------------------- accumulator/pollardproof.go | 57 +++++++++++++++++++++++++++++++++++++ 3 files changed, 104 insertions(+), 53 deletions(-) diff --git a/accumulator/notes.txt b/accumulator/notes.txt index 71cd1fc9..cb014552 100644 --- a/accumulator/notes.txt +++ b/accumulator/notes.txt @@ -1,4 +1,50 @@ -hash to known +You get a slice of targets. They're in order. You've also got a pollard with at least the roots. +You need to ingest the targets & proofs. All the targets and all the proofs need to be in the pollard at the end of the ingestion, because those hashes need to be there for the Modify() call + +so we have 2 phases: blaze and matchPop + +blaze builds trails down to the target leaves. It would be faster to not start at the root each time, and branch off instead, but that can be optimized later, not a big deal. Following pointers is pretty quick. + +As it builds empty nodes down to the targets, it also builds a slice of pointers to those nodes. The pointers should match up right to the proof items. So the proof verification step is just two slices of equal length: one of node pointers, and one of hashes from the proof. Then you can just go through and matchPop each. + +Of course it's not that simple since you also have to hash if you populated, or flag parents as skippable if you matched. + +For now, keep matching all hashes in the proof. We're *hashing* to known, but we receive the whole proof and check the whole thing, so eve if there are proofs we don't need as that data is already in the pollard, we will fail the ingest. + + +------ + +change how ProofPositions works, maybe simpler. Up to the root first, then on to the next target. +for example + + +30 +|-------------------------------\ +28 29 +|---------------\ |---------------\ +24 25 26 27 +|-------\ |-------\ |-------\ |-------\ +16 17 18 19 20 21 22 23 +|---\ |---\ |---\ |---\ |---\ |---\ |---\ |---\ +00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 + +targets are 03, 09, 11. Instead of going row by row, go all the way up first. proofs would be + +02, 16, 25, 29 (done with proof for 03) +08, 21, 27 (done with proof for 09; intersects at 29) +12, 23 (done with proof for 11l intersects at 27) + +This might be simpler. ProofPositions and really just all the batchproof stuff is way to complicated and hard to understand. I don't think this is even any slower. + +This feels closer to how hashing to known should work; for each target we can decend to the leaf, keeping the whole branch down. The proof will populate that branch. Just need to keep track of what's been populated by the proof and what's already there. +Like the 08, 21, 27 proof branch; how to we know to stop at 27, and not go to 29? Because 29 was already given. But that's not as obvious as you go up the branch. Just keep some kind of hash map based set? That's kind of an ugly way to do it. Keeping an extra bit on the polNode would do it, it seems. Or just get a function that, given a list of targets, generates a list of proofs, and breaks them up in the way I just did there with the (done with proof for) parts. That might not be so bad..? There's got to be a bit flippy way to do that without maps. + +Given a list of leaf positions, return an equal length list of heights to intersection. So in this case 03, 09, 11 returns 4, 3, 2. + +Also this is sub optimal because 29 shouldn't be in the proof. Nor 27. So pop off the proof when you intersect. Anyway I can try this, maybe gives a simpler proof position sequence. + +----- + Not quite there, but the basic design: diff --git a/accumulator/pollard.go b/accumulator/pollard.go index 58ed5732..c34c4414 100644 --- a/accumulator/pollard.go +++ b/accumulator/pollard.go @@ -342,58 +342,6 @@ func (p *Pollard) swapNodes(s arrow, row uint8) (*hashableNode, error) { return bhn, nil } -// blazeLeaves blazes a trail to all positions, creating empty nodes and -// siblings along the way if needed. Returns a 2d slice of nodes -func (p *Pollard) blazeLeaves(positions []uint64) (pslice [][]*polNode, err error) { - - // Determine the tree that the position is at - tree, branchLen, bits := detectOffset(pos, p.numLeaves) - if tree >= uint8(len(p.roots)) { - err = ErrorStrings[ErrorNotEnoughTrees] - return - } - // not sure about this - if branchLen == 0 { - return - } - // allocate branches - nbranch, sibbranch = make([]*polNode, branchLen), make([]*polNode, branchLen) - // start with the tree's root - n, nsib := p.roots[tree], p.roots[tree] - - // loop downward from the root, building nodes as we go, copying pointers - for h := branchLen - 1; h != 0; h-- { // go through branch - // copy pointers into slice - nbranch[h], sibbranch[h] = n, nsib - - // left / right determined by the bits of position number - lr := uint8(bits>>h) & 1 - // follow the sibling of lr - lrSib := lr ^ 1 - - // if a sib doesn't exist, need to create it and hook it in - if n.niece[lrSib] == nil { - n.niece[lrSib] = &polNode{} - } - n, nsib = n.niece[lr], n.niece[lrSib] - - if n == nil { - // if a node doesn't exist, crash - // no niece in this case - // TODO error message could be better - err = ErrorStrings[ErrorNoPollardNode] - return - } - } - - lr := uint8(bits) & 1 - // grab the sibling of lr - lrSib := lr ^ 1 - - n, nsib = n.niece[lrSib], n.niece[lr] - return // only happens when returning a root -} - // grabPos returns the thing you asked for, as well as its sibling // and a hashable node for the position ABOVE pos // Returns an error if it can't get it. diff --git a/accumulator/pollardproof.go b/accumulator/pollardproof.go index 0122803f..8a74219d 100644 --- a/accumulator/pollardproof.go +++ b/accumulator/pollardproof.go @@ -118,6 +118,63 @@ func matchPop(n *polNode, h Hash) error { return fmt.Errorf("Proof doesn't match; expect %x, got %x", n.data, h) } +// blazeLeaves blazes a trail to all positions, creating empty nodes and +// siblings along the way if needed. Returns a slice of nodes that should +// correspond to the proof +func (p *Pollard) blazeLeaves(targets []uint64) (pslice []*polNode, err error) { + + // run through each position + for _, pos := range targets { + // Determine the tree that the position is at + tree, branchLen, bits := detectOffset(pos, p.numLeaves) + if tree >= uint8(len(p.roots)) { + err = ErrorStrings[ErrorNotEnoughTrees] + return + } + // not sure about this + if branchLen == 0 { + return + } + // allocate branches + nbranch, sibbranch := make([]*polNode, branchLen), make([]*polNode, branchLen) + // start with the tree's root + n, nsib := p.roots[tree], p.roots[tree] + + // loop downward from the root, building nodes as we go, copying pointers + for h := branchLen - 1; h != 0; h-- { // go through branch + // copy pointers into slice + nbranch[h], sibbranch[h] = n, nsib + + // left / right determined by the bits of position number + lr := uint8(bits>>h) & 1 + // follow the sibling of lr + lrSib := lr ^ 1 + + // if a sib doesn't exist, need to create it and hook it in + if n.niece[lrSib] == nil { + n.niece[lrSib] = &polNode{} + } + n, nsib = n.niece[lr], n.niece[lrSib] + + if n == nil { + // if a node doesn't exist, crash + // no niece in this case + // TODO error message could be better + err = ErrorStrings[ErrorNoPollardNode] + return + } + } + + lr := uint8(bits) & 1 + // grab the sibling of lr + lrSib := lr ^ 1 + + n, nsib = n.niece[lrSib], n.niece[lr] + + } + return // only happens when returning a root +} + // populate takes a root and populates it with the nodes of the paritial proof // tree that was computed in `verifyBatchProof`. func (p *Pollard) populate( From a4b40e6a67e86a229be9f9a4f285dc9ded48aa1d Mon Sep 17 00:00:00 2001 From: adiabat Date: Sat, 17 Apr 2021 00:36:58 -0400 Subject: [PATCH 27/28] add proofPositions2 Gives exact same output as previous proofPositions but shorter, and a bit easier to understand. Also it can output branch-at-a-time proofs which feel more suited to hash-to-known --- accumulator/pollard_test.go | 2 +- accumulator/utils.go | 75 ++++++++++++++++++++++++++++++++++++- accumulator/utils_test.go | 15 ++++++++ 3 files changed, 90 insertions(+), 2 deletions(-) diff --git a/accumulator/pollard_test.go b/accumulator/pollard_test.go index 2d25a5c0..d0b4ecf8 100644 --- a/accumulator/pollard_test.go +++ b/accumulator/pollard_test.go @@ -302,7 +302,7 @@ func TestCache(t *testing.T) { pos := leafProof.Targets[0] fmt.Println(pos, l) - _, nsib, _, err := p.grabPos(pos, false) + _, nsib, _, err := p.grabPos(pos) if pos == p.numLeaves-1 { // roots are always cached diff --git a/accumulator/utils.go b/accumulator/utils.go index bd2e6ece..49b00412 100644 --- a/accumulator/utils.go +++ b/accumulator/utils.go @@ -92,6 +92,60 @@ func ProofPositions( return proofPositions } +/* +Proof positions are computed by building a branch up to the root for every leaf, +but stopping early if that branch intersects a previously built branch. +In addition to stoping early, we delete the position where the intersection +occurs. +*/ + +// ProofPositions2 returns the positions that are needed to prove that +// targets exist. In top to bottom, then left to right ordering. +func ProofPositions2( + targets []uint64, numLeaves uint64, forestRows uint8) (prpos []uint64) { + edges, toprows := treeEdges(numLeaves) + maxRow := toprows[0] // top row we should make proofs up to + // each target gets its own proof slice. branches[targetnum][height] = pos + branches := make([][]uint64, len(targets)) + for i := range branches { + branches[i] = make([]uint64, forestRows) + } + // I think we don't have to worry that it starts out filled with 0s. + // We should always fill in entries before comparing them... probably OK +nextBranch: + for i, _ := range branches { + pos := targets[i] ^ 1 // position starts at the target's sibling + for len(edges) > 1 && pos > edges[0] { + edges = edges[1:] + toprows = toprows[1:] + maxRow = toprows[0] + } + for h, _ := range branches[i] { + if uint8(h) >= maxRow { + continue nextBranch + } + // check prior target branches to see if we collide positions + for k := i - 1; k >= 0; k-- { + // if branches have a sibling collision, remove it + if pos>>1 == branches[k][h]>>1 { + branches[k][h] = 0 + continue nextBranch // done with this branch + } + } + branches[i][h] = pos + pos = parent(pos, forestRows) ^ 1 // next pos is parent's sibling + } + } + for h, _ := range branches[0] { + for i, _ := range branches { + if branches[i][h] != 0 { + prpos = append(prpos, branches[i][h]) + } + } + } + return +} + // takes a slice of dels, removes the twins (in place) and returns a slice // of parents of twins // @@ -148,6 +202,25 @@ func detectRow(position uint64, forestRows uint8) uint8 { return h } +// treeEdges tells you the right edge of every tree, and the number of +// rows in that tree +func treeEdges(numLeaves uint64) (edge []uint64, toprow []uint8) { + maxRow := uint8(64 - bits.LeadingZeros64(numLeaves-1)) + edge = make([]uint64, numRoots(numLeaves)) + toprow = make([]uint8, numRoots(numLeaves)) + treenum := uint8(0) + treeedge := uint64(0) + for h := maxRow; h < 64; h-- { + if numLeaves&(1< Date: Tue, 20 Apr 2021 01:16:10 -0400 Subject: [PATCH 28/28] add notes about hashing to known hash to known isn't enough, you have to hash past known values... --- accumulator/notes.txt | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/accumulator/notes.txt b/accumulator/notes.txt index cb014552..559905c6 100644 --- a/accumulator/notes.txt +++ b/accumulator/notes.txt @@ -1,3 +1,25 @@ +Problems with hash to known: + +You can't JUST hash to known. That's not enough. Example: + +28 +|---------------\ +24 25* 26 +|-------\ |-------\ |-------\ +16 17* 18 19 20 21 22 +|---\ |---\ |---\ |---\ |---\ |---\ |---\ +00 01* 02 03 04d 05d 06d 07d 08 09 10 11 12 13 14 + +A proof for 00 is already known: 01, 17, 25. 04, 05, 06, 07 are deleted. +The proof for this deletion is just 24. If you only hash to known, you'll hash to find 18 and 19, then hash those to get 25 and see that it's a match. So you're done, right? +No... because you haven't populated 24. And when the deletion happens, 26 moves to 25, and then you'll need 24 to compute the new 28. +Also you can't just populate 24 without hashing up to 28, because what if the 24 they give you as a proof is wrong? Then you'd have a bad accumulator state. + +It seems like you can never populate without matching above it. That makes sense; you need to verify every hash you're populating in to the forest. +If 24 was already present then you could skip the populate and hash steps. + + + You get a slice of targets. They're in order. You've also got a pollard with at least the roots. You need to ingest the targets & proofs. All the targets and all the proofs need to be in the pollard at the end of the ingestion, because those hashes need to be there for the Modify() call