diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index e933db9ea19..6a6d6e02653 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -8,6 +8,7 @@ on: paths: - gnovm/**/*.gno - examples/**/*.gno + - examples/**/gno.mod concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} diff --git a/examples/gno.land/p/demo/avl/list/gno.mod b/examples/gno.land/p/demo/avl/list/gno.mod new file mode 100644 index 00000000000..c05923b7708 --- /dev/null +++ b/examples/gno.land/p/demo/avl/list/gno.mod @@ -0,0 +1 @@ +module gno.land/p/demo/avl/list diff --git a/examples/gno.land/p/demo/avl/list/list.gno b/examples/gno.land/p/demo/avl/list/list.gno new file mode 100644 index 00000000000..0875eb66e01 --- /dev/null +++ b/examples/gno.land/p/demo/avl/list/list.gno @@ -0,0 +1,298 @@ +// Package list implements a dynamic list data structure backed by an AVL tree. +// It provides O(log n) operations for most list operations while maintaining +// order stability. +// +// The list supports various operations including append, get, set, delete, +// range queries, and iteration. It can store values of any type. +// +// Example usage: +// +// // Create a new list and add elements +// var l list.List +// l.Append(1, 2, 3) +// +// // Get and set elements +// value := l.Get(1) // returns 2 +// l.Set(1, 42) // updates index 1 to 42 +// +// // Delete elements +// l.Delete(0) // removes first element +// +// // Iterate over elements +// l.ForEach(func(index int, value interface{}) bool { +// ufmt.Printf("index %d: %v\n", index, value) +// return false // continue iteration +// }) +// // Output: +// // index 0: 42 +// // index 1: 3 +// +// // Create a list of specific size +// l = list.Make(3, "default") // creates [default, default, default] +// +// // Create a list using a variable declaration +// var l2 list.List +// l2.Append(4, 5, 6) +// println(l2.Len()) // Output: 3 +package list + +import ( + "gno.land/p/demo/avl" + "gno.land/p/demo/seqid" +) + +// List represents an ordered sequence of items backed by an AVL tree +type List struct { + tree avl.Tree + idGen seqid.ID +} + +// Len returns the number of elements in the list. +// +// Example: +// +// l := list.New() +// l.Append(1, 2, 3) +// println(l.Len()) // Output: 3 +func (l *List) Len() int { + return l.tree.Size() +} + +// Append adds one or more values to the end of the list. +// +// Example: +// +// l := list.New() +// l.Append(1) // adds single value +// l.Append(2, 3, 4) // adds multiple values +// println(l.Len()) // Output: 4 +func (l *List) Append(values ...interface{}) { + for _, v := range values { + l.tree.Set(l.idGen.Next().String(), v) + } +} + +// Get returns the value at the specified index. +// Returns nil if index is out of bounds. +// +// Example: +// +// l := list.New() +// l.Append(1, 2, 3) +// println(l.Get(1)) // Output: 2 +// println(l.Get(-1)) // Output: nil +// println(l.Get(999)) // Output: nil +func (l *List) Get(index int) interface{} { + if index < 0 || index >= l.tree.Size() { + return nil + } + _, value := l.tree.GetByIndex(index) + return value +} + +// Set updates or appends a value at the specified index. +// Returns true if the operation was successful, false otherwise. +// For empty lists, only index 0 is valid (append case). +// +// Example: +// +// l := list.New() +// l.Append(1, 2, 3) +// +// l.Set(1, 42) // updates existing index +// println(l.Get(1)) // Output: 42 +// +// l.Set(3, 4) // appends at end +// println(l.Get(3)) // Output: 4 +// +// l.Set(-1, 5) // invalid index +// println(l.Len()) // Output: 4 (list unchanged) +func (l *List) Set(index int, value interface{}) bool { + size := l.tree.Size() + + // Handle empty list case - only allow index 0 + if size == 0 { + if index == 0 { + l.Append(value) + return true + } + return false + } + + if index < 0 || index > size { + return false + } + + // If setting at the end (append case) + if index == size { + l.Append(value) + return true + } + + // Get the key at the specified index + key, _ := l.tree.GetByIndex(index) + if key == "" { + return false + } + + // Update the value at the existing key + l.tree.Set(key, value) + return true +} + +// Delete removes the element at the specified index. +// Returns the deleted value and true if successful, nil and false otherwise. +// +// Example: +// +// l := list.New() +// l.Append(1, 2, 3) +// +// val, ok := l.Delete(1) +// println(val, ok) // Output: 2 true +// println(l.Len()) // Output: 2 +// +// val, ok = l.Delete(-1) +// println(val, ok) // Output: nil false +func (l *List) Delete(index int) (interface{}, bool) { + size := l.tree.Size() + // Always return nil, false for empty list + if size == 0 { + return nil, false + } + + if index < 0 || index >= size { + return nil, false + } + + key, value := l.tree.GetByIndex(index) + if key == "" { + return nil, false + } + + l.tree.Remove(key) + return value, true +} + +// Slice returns a slice of values from startIndex (inclusive) to endIndex (exclusive). +// Returns nil if the range is invalid. +// +// Example: +// +// l := list.New() +// l.Append(1, 2, 3, 4, 5) +// +// println(l.Slice(1, 4)) // Output: [2 3 4] +// println(l.Slice(-1, 2)) // Output: [1 2] +// println(l.Slice(3, 999)) // Output: [4 5] +// println(l.Slice(3, 2)) // Output: nil +func (l *List) Slice(startIndex, endIndex int) []interface{} { + size := l.tree.Size() + + // Normalize bounds + if startIndex < 0 { + startIndex = 0 + } + if endIndex > size { + endIndex = size + } + if startIndex >= endIndex { + return nil + } + + count := endIndex - startIndex + result := make([]interface{}, count) + + i := 0 + l.tree.IterateByOffset(startIndex, count, func(_ string, value interface{}) bool { + result[i] = value + i++ + return false + }) + return result +} + +// ForEach iterates through all elements in the list. +func (l *List) ForEach(fn func(index int, value interface{}) bool) { + if l.tree.Size() == 0 { + return + } + + index := 0 + l.tree.IterateByOffset(0, l.tree.Size(), func(_ string, value interface{}) bool { + result := fn(index, value) + index++ + return result + }) +} + +// Clone creates a shallow copy of the list. +// +// Example: +// +// l := list.New() +// l.Append(1, 2, 3) +// +// clone := l.Clone() +// clone.Set(0, 42) +// +// println(l.Get(0)) // Output: 1 +// println(clone.Get(0)) // Output: 42 +func (l *List) Clone() *List { + newList := &List{ + tree: avl.Tree{}, + idGen: l.idGen, + } + + size := l.tree.Size() + if size == 0 { + return newList + } + + l.tree.IterateByOffset(0, size, func(_ string, value interface{}) bool { + newList.Append(value) + return false + }) + + return newList +} + +// DeleteRange removes elements from startIndex (inclusive) to endIndex (exclusive). +// Returns the number of elements deleted. +// +// Example: +// +// l := list.New() +// l.Append(1, 2, 3, 4, 5) +// +// deleted := l.DeleteRange(1, 4) +// println(deleted) // Output: 3 +// println(l.Range(0, l.Len())) // Output: [1 5] +func (l *List) DeleteRange(startIndex, endIndex int) int { + size := l.tree.Size() + + // Normalize bounds + if startIndex < 0 { + startIndex = 0 + } + if endIndex > size { + endIndex = size + } + if startIndex >= endIndex { + return 0 + } + + // Collect keys to delete + keysToDelete := make([]string, 0, endIndex-startIndex) + l.tree.IterateByOffset(startIndex, endIndex-startIndex, func(key string, _ interface{}) bool { + keysToDelete = append(keysToDelete, key) + return false + }) + + // Delete collected keys + for _, key := range keysToDelete { + l.tree.Remove(key) + } + + return len(keysToDelete) +} diff --git a/examples/gno.land/p/demo/avl/list/list_test.gno b/examples/gno.land/p/demo/avl/list/list_test.gno new file mode 100644 index 00000000000..265fbdb5eb1 --- /dev/null +++ b/examples/gno.land/p/demo/avl/list/list_test.gno @@ -0,0 +1,409 @@ +package list + +import ( + "testing" +) + +func TestList_Basic(t *testing.T) { + var l List + + // Test empty list + if l.Len() != 0 { + t.Errorf("new list should be empty, got len %d", l.Len()) + } + + // Test append and length + l.Append(1, 2, 3) + if l.Len() != 3 { + t.Errorf("expected len 3, got %d", l.Len()) + } + + // Test get + if v := l.Get(0); v != 1 { + t.Errorf("expected 1 at index 0, got %v", v) + } + if v := l.Get(1); v != 2 { + t.Errorf("expected 2 at index 1, got %v", v) + } + if v := l.Get(2); v != 3 { + t.Errorf("expected 3 at index 2, got %v", v) + } + + // Test out of bounds + if v := l.Get(-1); v != nil { + t.Errorf("expected nil for negative index, got %v", v) + } + if v := l.Get(3); v != nil { + t.Errorf("expected nil for out of bounds index, got %v", v) + } +} + +func TestList_Set(t *testing.T) { + var l List + l.Append(1, 2, 3) + + // Test valid set within bounds + if ok := l.Set(1, 42); !ok { + t.Error("Set should return true for valid index") + } + if v := l.Get(1); v != 42 { + t.Errorf("expected 42 after Set, got %v", v) + } + + // Test set at size (append) + if ok := l.Set(3, 4); !ok { + t.Error("Set should return true when appending at size") + } + if v := l.Get(3); v != 4 { + t.Errorf("expected 4 after Set at size, got %v", v) + } + + // Test invalid sets + if ok := l.Set(-1, 10); ok { + t.Error("Set should return false for negative index") + } + if ok := l.Set(5, 10); ok { + t.Error("Set should return false for index > size") + } + + // Verify list state hasn't changed after invalid operations + expected := []interface{}{1, 42, 3, 4} + for i, want := range expected { + if got := l.Get(i); got != want { + t.Errorf("index %d = %v; want %v", i, got, want) + } + } +} + +func TestList_Delete(t *testing.T) { + var l List + l.Append(1, 2, 3) + + // Test valid delete + if v, ok := l.Delete(1); !ok || v != 2 { + t.Errorf("Delete(1) = %v, %v; want 2, true", v, ok) + } + if l.Len() != 2 { + t.Errorf("expected len 2 after delete, got %d", l.Len()) + } + if v := l.Get(1); v != 3 { + t.Errorf("expected 3 at index 1 after delete, got %v", v) + } + + // Test invalid delete + if v, ok := l.Delete(-1); ok || v != nil { + t.Errorf("Delete(-1) = %v, %v; want nil, false", v, ok) + } + if v, ok := l.Delete(2); ok || v != nil { + t.Errorf("Delete(2) = %v, %v; want nil, false", v, ok) + } +} + +func TestList_Slice(t *testing.T) { + var l List + l.Append(1, 2, 3, 4, 5) + + // Test valid ranges + values := l.Slice(1, 4) + expected := []interface{}{2, 3, 4} + if !sliceEqual(values, expected) { + t.Errorf("Slice(1,4) = %v; want %v", values, expected) + } + + // Test edge cases + if values := l.Slice(-1, 2); !sliceEqual(values, []interface{}{1, 2}) { + t.Errorf("Slice(-1,2) = %v; want [1 2]", values) + } + if values := l.Slice(3, 10); !sliceEqual(values, []interface{}{4, 5}) { + t.Errorf("Slice(3,10) = %v; want [4 5]", values) + } + if values := l.Slice(3, 2); values != nil { + t.Errorf("Slice(3,2) = %v; want nil", values) + } +} + +func TestList_ForEach(t *testing.T) { + var l List + l.Append(1, 2, 3) + + sum := 0 + l.ForEach(func(index int, value interface{}) bool { + sum += value.(int) + return false + }) + + if sum != 6 { + t.Errorf("ForEach sum = %d; want 6", sum) + } + + // Test early termination + count := 0 + l.ForEach(func(index int, value interface{}) bool { + count++ + return true // stop after first item + }) + + if count != 1 { + t.Errorf("ForEach early termination count = %d; want 1", count) + } +} + +func TestList_Clone(t *testing.T) { + var l List + l.Append(1, 2, 3) + + clone := l.Clone() + + // Test same length + if clone.Len() != l.Len() { + t.Errorf("clone.Len() = %d; want %d", clone.Len(), l.Len()) + } + + // Test same values + for i := 0; i < l.Len(); i++ { + if clone.Get(i) != l.Get(i) { + t.Errorf("clone.Get(%d) = %v; want %v", i, clone.Get(i), l.Get(i)) + } + } + + // Test independence + l.Set(0, 42) + if clone.Get(0) == l.Get(0) { + t.Error("clone should be independent of original") + } +} + +func TestList_DeleteRange(t *testing.T) { + var l List + l.Append(1, 2, 3, 4, 5) + + // Test valid range delete + deleted := l.DeleteRange(1, 4) + if deleted != 3 { + t.Errorf("DeleteRange(1,4) deleted %d elements; want 3", deleted) + } + if l.Len() != 2 { + t.Errorf("after DeleteRange(1,4) len = %d; want 2", l.Len()) + } + expected := []interface{}{1, 5} + for i, want := range expected { + if got := l.Get(i); got != want { + t.Errorf("after DeleteRange(1,4) index %d = %v; want %v", i, got, want) + } + } + + // Test edge cases + l = List{} + l.Append(1, 2, 3) + + // Delete with negative start + if deleted := l.DeleteRange(-1, 2); deleted != 2 { + t.Errorf("DeleteRange(-1,2) deleted %d elements; want 2", deleted) + } + + // Delete with end > length + l = List{} + l.Append(1, 2, 3) + if deleted := l.DeleteRange(1, 5); deleted != 2 { + t.Errorf("DeleteRange(1,5) deleted %d elements; want 2", deleted) + } + + // Delete invalid range + if deleted := l.DeleteRange(2, 1); deleted != 0 { + t.Errorf("DeleteRange(2,1) deleted %d elements; want 0", deleted) + } + + // Delete empty range + if deleted := l.DeleteRange(1, 1); deleted != 0 { + t.Errorf("DeleteRange(1,1) deleted %d elements; want 0", deleted) + } +} + +func TestList_EmptyOperations(t *testing.T) { + var l List + + // Operations on empty list + if v := l.Get(0); v != nil { + t.Errorf("Get(0) on empty list = %v; want nil", v) + } + + // Set should work at index 0 for empty list (append case) + if ok := l.Set(0, 1); !ok { + t.Error("Set(0,1) on empty list = false; want true") + } + if v := l.Get(0); v != 1 { + t.Errorf("Get(0) after Set = %v; want 1", v) + } + + l = List{} // Reset to empty list + if v, ok := l.Delete(0); ok || v != nil { + t.Errorf("Delete(0) on empty list = %v, %v; want nil, false", v, ok) + } + if values := l.Slice(0, 1); values != nil { + t.Errorf("Range(0,1) on empty list = %v; want nil", values) + } +} + +func TestList_DifferentTypes(t *testing.T) { + var l List + + // Test with different types + l.Append(42, "hello", true, 3.14) + + if v := l.Get(0).(int); v != 42 { + t.Errorf("Get(0) = %v; want 42", v) + } + if v := l.Get(1).(string); v != "hello" { + t.Errorf("Get(1) = %v; want 'hello'", v) + } + if v := l.Get(2).(bool); !v { + t.Errorf("Get(2) = %v; want true", v) + } + if v := l.Get(3).(float64); v != 3.14 { + t.Errorf("Get(3) = %v; want 3.14", v) + } +} + +func TestList_LargeOperations(t *testing.T) { + var l List + + // Test with larger number of elements + n := 1000 + for i := 0; i < n; i++ { + l.Append(i) + } + + if l.Len() != n { + t.Errorf("Len() = %d; want %d", l.Len(), n) + } + + // Test range on large list + values := l.Slice(n-3, n) + expected := []interface{}{n - 3, n - 2, n - 1} + if !sliceEqual(values, expected) { + t.Errorf("Range(%d,%d) = %v; want %v", n-3, n, values, expected) + } + + // Test large range deletion + deleted := l.DeleteRange(100, 900) + if deleted != 800 { + t.Errorf("DeleteRange(100,900) = %d; want 800", deleted) + } + if l.Len() != 200 { + t.Errorf("Len() after large delete = %d; want 200", l.Len()) + } +} + +func TestList_ChainedOperations(t *testing.T) { + var l List + + // Test sequence of operations + l.Append(1, 2, 3) + l.Delete(1) + l.Append(4) + l.Set(1, 5) + + expected := []interface{}{1, 5, 4} + for i, want := range expected { + if got := l.Get(i); got != want { + t.Errorf("index %d = %v; want %v", i, got, want) + } + } +} + +func TestList_RangeEdgeCases(t *testing.T) { + var l List + l.Append(1, 2, 3, 4, 5) + + // Test various edge cases for Range + cases := []struct { + start, end int + want []interface{} + }{ + {-10, 2, []interface{}{1, 2}}, + {3, 10, []interface{}{4, 5}}, + {0, 0, nil}, + {5, 5, nil}, + {4, 3, nil}, + {-1, -1, nil}, + } + + for _, tc := range cases { + got := l.Slice(tc.start, tc.end) + if !sliceEqual(got, tc.want) { + t.Errorf("Slice(%d,%d) = %v; want %v", tc.start, tc.end, got, tc.want) + } + } +} + +func TestList_IndexConsistency(t *testing.T) { + var l List + + // Initial additions + l.Append(1, 2, 3, 4, 5) // [1,2,3,4,5] + + // Delete from middle + l.Delete(2) // [1,2,4,5] + + // Add more elements + l.Append(6, 7) // [1,2,4,5,6,7] + + // Delete range from middle + l.DeleteRange(1, 4) // [1,6,7] + + // Add more elements + l.Append(8, 9, 10) // [1,6,7,8,9,10] + + // Verify sequence is continuous + expected := []interface{}{1, 6, 7, 8, 9, 10} + for i, want := range expected { + if got := l.Get(i); got != want { + t.Errorf("index %d = %v; want %v", i, got, want) + } + } + + // Verify no extra elements exist + if l.Len() != len(expected) { + t.Errorf("length = %d; want %d", l.Len(), len(expected)) + } + + // Verify all indices are accessible + allValues := l.Slice(0, l.Len()) + if !sliceEqual(allValues, expected) { + t.Errorf("Slice(0, Len()) = %v; want %v", allValues, expected) + } + + // Verify no gaps in iteration + var iteratedValues []interface{} + var indices []int + l.ForEach(func(index int, value interface{}) bool { + iteratedValues = append(iteratedValues, value) + indices = append(indices, index) + return false + }) + + // Check values from iteration + if !sliceEqual(iteratedValues, expected) { + t.Errorf("ForEach values = %v; want %v", iteratedValues, expected) + } + + // Check indices are sequential + for i, idx := range indices { + if idx != i { + t.Errorf("ForEach index %d = %d; want %d", i, idx, i) + } + } +} + +// Helper function to compare slices +func sliceEqual(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/examples/gno.land/p/demo/avl/node.gno b/examples/gno.land/p/demo/avl/node.gno index 7308e163768..7d4ddffff02 100644 --- a/examples/gno.land/p/demo/avl/node.gno +++ b/examples/gno.land/p/demo/avl/node.gno @@ -384,7 +384,7 @@ func (node *Node) TraverseInRange(start, end string, ascending bool, leavesOnly // TraverseByOffset traverses all nodes, including inner nodes. // A limit of math.MaxInt means no limit. -func (node *Node) TraverseByOffset(offset, limit int, descending bool, leavesOnly bool, cb func(*Node) bool) bool { +func (node *Node) TraverseByOffset(offset, limit int, ascending bool, leavesOnly bool, cb func(*Node) bool) bool { if node == nil { return false } @@ -401,21 +401,21 @@ func (node *Node) TraverseByOffset(offset, limit int, descending bool, leavesOnl } // go to the actual recursive function. - return node.traverseByOffset(offset, limit, descending, leavesOnly, cb) + return node.traverseByOffset(offset, limit, ascending, leavesOnly, cb) } // TraverseByOffset traverses the subtree rooted at the node by offset and limit, // in either ascending or descending order, and applies the callback function to each traversed node. // If leavesOnly is true, only leaf nodes are visited. -func (node *Node) traverseByOffset(offset, limit int, descending bool, leavesOnly bool, cb func(*Node) bool) bool { +func (node *Node) traverseByOffset(offset, limit int, ascending bool, leavesOnly bool, cb func(*Node) bool) bool { // caller guarantees: offset < node.size; limit > 0. if !leavesOnly { if cb(node) { - return true + return true // Stop traversal if callback returns true } } first, second := node.getLeftNode(), node.getRightNode() - if descending { + if !ascending { first, second = second, first } if first.IsLeaf() { @@ -423,10 +423,12 @@ func (node *Node) traverseByOffset(offset, limit int, descending bool, leavesOnl if offset > 0 { offset-- } else { - cb(first) + if cb(first) { + return true // Stop traversal if callback returns true + } limit-- if limit <= 0 { - return false + return true // Stop traversal when limit is reached } } } else { @@ -437,7 +439,7 @@ func (node *Node) traverseByOffset(offset, limit int, descending bool, leavesOnl if offset >= first.size { offset -= first.size // 1 } else { - if first.traverseByOffset(offset, limit, descending, leavesOnly, cb) { + if first.traverseByOffset(offset, limit, ascending, leavesOnly, cb) { return true } // number of leaves which could actually be called from inside @@ -460,7 +462,7 @@ func (node *Node) traverseByOffset(offset, limit int, descending bool, leavesOnl } // => if it is not a leaf, it will still be enough to recursively call this // function with the updated offset and limit - return second.traverseByOffset(offset, limit, descending, leavesOnly, cb) + return second.traverseByOffset(offset, limit, ascending, leavesOnly, cb) } // Only used in testing... diff --git a/examples/gno.land/p/demo/avl/node_test.gno b/examples/gno.land/p/demo/avl/node_test.gno index f24217625ea..3682cbc7c80 100644 --- a/examples/gno.land/p/demo/avl/node_test.gno +++ b/examples/gno.land/p/demo/avl/node_test.gno @@ -17,36 +17,34 @@ Book Browser` tt := []struct { name string - desc bool + asc bool }{ - {"ascending", false}, - {"descending", true}, + {"ascending", true}, + {"descending", false}, } for _, tt := range tt { t.Run(tt.name, func(t *testing.T) { + // use sl to insert the values, and reversed to match the values + // we do this to ensure that the order of TraverseByOffset is independent + // from the insertion order sl := strings.Split(testStrings, "\n") - - // sort a first time in the order opposite to how we'll be traversing - // the tree, to ensure that we are not just iterating through with - // insertion order. sort.Strings(sl) - if !tt.desc { - reverseSlice(sl) + reversed := append([]string{}, sl...) + reverseSlice(reversed) + + if !tt.asc { + sl, reversed = reversed, sl } - r := NewNode(sl[0], nil) - for _, v := range sl[1:] { + r := NewNode(reversed[0], nil) + for _, v := range reversed[1:] { r, _ = r.Set(v, nil) } - // then sort sl in the order we'll be traversing it, so that we can - // compare the result with sl. - reverseSlice(sl) - var result []string for i := 0; i < len(sl); i++ { - r.TraverseByOffset(i, 1, tt.desc, true, func(n *Node) bool { + r.TraverseByOffset(i, 1, tt.asc, true, func(n *Node) bool { result = append(result, n.Key()) return false }) @@ -66,7 +64,7 @@ Browser` exp := sl[i:max] actual := []string{} - r.TraverseByOffset(i, l, tt.desc, true, func(tr *Node) bool { + r.TraverseByOffset(i, l, tt.asc, true, func(tr *Node) bool { actual = append(actual, tr.Key()) return false }) @@ -422,6 +420,30 @@ func TestTraverse(t *testing.T) { t.Errorf("want %v got %v", expected, result) } }) + + t.Run("early termination", func(t *testing.T) { + if len(tt.input) == 0 { + return // Skip for empty tree + } + + var result []string + var count int + tree.Iterate("", "", func(n *Node) bool { + count++ + result = append(result, n.Key()) + return true // Stop after first item + }) + + if count != 1 { + t.Errorf("Expected callback to be called exactly once, got %d calls", count) + } + if len(result) != 1 { + t.Errorf("Expected exactly one result, got %d items", len(result)) + } + if len(result) > 0 && result[0] != tt.expected[0] { + t.Errorf("Expected first item to be %v, got %v", tt.expected[0], result[0]) + } + }) }) } } @@ -435,7 +457,7 @@ func TestRotateWhenHeightDiffers(t *testing.T) { { "right rotation when left subtree is higher", []string{"E", "C", "A", "B", "D"}, - []string{"A", "B", "C", "E", "D"}, + []string{"A", "B", "C", "D", "E"}, }, { "left rotation when right subtree is higher", @@ -445,12 +467,12 @@ func TestRotateWhenHeightDiffers(t *testing.T) { { "left-right rotation", []string{"E", "A", "C", "B", "D"}, - []string{"A", "B", "C", "E", "D"}, + []string{"A", "B", "C", "D", "E"}, }, { "right-left rotation", []string{"A", "E", "C", "B", "D"}, - []string{"A", "B", "C", "E", "D"}, + []string{"A", "B", "C", "D", "E"}, }, } @@ -533,7 +555,7 @@ func slicesEqual(w1, w2 []string) bool { return false } for i := 0; i < len(w1); i++ { - if w1[0] != w2[0] { + if w1[i] != w2[i] { return false } } diff --git a/examples/gno.land/p/demo/avl/pager/pager.gno b/examples/gno.land/p/demo/avl/pager/pager.gno index ca3eadde032..f5f909a473d 100644 --- a/examples/gno.land/p/demo/avl/pager/pager.gno +++ b/examples/gno.land/p/demo/avl/pager/pager.gno @@ -90,12 +90,12 @@ func (p *Pager) GetPageWithSize(pageNumber, pageSize int) *Page { items := []Item{} if p.Reversed { - p.Tree.IterateByOffset(startIndex, endIndex-startIndex, func(key string, value interface{}) bool { + p.Tree.ReverseIterateByOffset(startIndex, endIndex-startIndex, func(key string, value interface{}) bool { items = append(items, Item{Key: key, Value: value}) return false }) } else { - p.Tree.ReverseIterateByOffset(startIndex, endIndex-startIndex, func(key string, value interface{}) bool { + p.Tree.IterateByOffset(startIndex, endIndex-startIndex, func(key string, value interface{}) bool { items = append(items, Item{Key: key, Value: value}) return false }) diff --git a/examples/gno.land/p/demo/grc/grc20/token.gno b/examples/gno.land/p/demo/grc/grc20/token.gno index 4634bae933b..3ab3abc63a3 100644 --- a/examples/gno.land/p/demo/grc/grc20/token.gno +++ b/examples/gno.land/p/demo/grc/grc20/token.gno @@ -65,6 +65,14 @@ func (tok *Token) RenderHome() string { return str } +// Getter returns a TokenGetter function that returns this token. This allows +// storing indirect pointers to a token in a remote realm. +func (tok *Token) Getter() TokenGetter { + return func() *Token { + return tok + } +} + // SpendAllowance decreases the allowance of the specified owner and spender. func (led *PrivateLedger) SpendAllowance(owner, spender std.Address, amount uint64) error { if !owner.IsValid() { diff --git a/examples/gno.land/p/moul/typeutil/gno.mod b/examples/gno.land/p/moul/typeutil/gno.mod new file mode 100644 index 00000000000..4f9c432456b --- /dev/null +++ b/examples/gno.land/p/moul/typeutil/gno.mod @@ -0,0 +1 @@ +module gno.land/p/moul/typeutil diff --git a/examples/gno.land/p/moul/typeutil/typeutil.gno b/examples/gno.land/p/moul/typeutil/typeutil.gno new file mode 100644 index 00000000000..1fa79b94549 --- /dev/null +++ b/examples/gno.land/p/moul/typeutil/typeutil.gno @@ -0,0 +1,715 @@ +// Package typeutil provides utility functions for converting between different types +// and checking their states. It aims to provide consistent behavior across different +// types while remaining lightweight and dependency-free. +package typeutil + +import ( + "errors" + "sort" + "std" + "strconv" + "strings" + "time" +) + +// stringer is the interface that wraps the String method. +type stringer interface { + String() string +} + +// ToString converts any value to its string representation. +// It supports a wide range of Go types including: +// - Basic: string, bool +// - Numbers: int, int8-64, uint, uint8-64, float32, float64 +// - Special: time.Time, std.Address, []byte +// - Slices: []T for most basic types +// - Maps: map[string]string, map[string]interface{} +// - Interface: types implementing String() string +// +// Example usage: +// +// str := typeutil.ToString(42) // "42" +// str = typeutil.ToString([]int{1, 2}) // "[1 2]" +// str = typeutil.ToString(map[string]string{ // "map[a:1 b:2]" +// "a": "1", +// "b": "2", +// }) +func ToString(val interface{}) string { + if val == nil { + return "" + } + + // First check if value implements Stringer interface + if s, ok := val.(interface{ String() string }); ok { + return s.String() + } + + switch v := val.(type) { + // Pointer types - dereference and recurse + case *string: + if v == nil { + return "" + } + return *v + case *int: + if v == nil { + return "" + } + return strconv.Itoa(*v) + case *bool: + if v == nil { + return "" + } + return strconv.FormatBool(*v) + case *time.Time: + if v == nil { + return "" + } + return v.String() + case *std.Address: + if v == nil { + return "" + } + return string(*v) + + // String types + case string: + return v + case stringer: + return v.String() + + // Special types + case time.Time: + return v.String() + case std.Address: + return string(v) + case []byte: + return string(v) + case struct{}: + return "{}" + + // Integer types + case int: + return strconv.Itoa(v) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(v, 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + + // Float types + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + + // Boolean + case bool: + if v { + return "true" + } + return "false" + + // Slice types + case []string: + return join(v) + case []int: + return join(v) + case []int32: + return join(v) + case []int64: + return join(v) + case []float32: + return join(v) + case []float64: + return join(v) + case []interface{}: + return join(v) + case []time.Time: + return joinTimes(v) + case []stringer: + return join(v) + case []std.Address: + return joinAddresses(v) + case [][]byte: + return joinBytes(v) + + // Map types with various key types + case map[interface{}]interface{}, map[string]interface{}, map[string]string, map[string]int: + var b strings.Builder + b.WriteString("map[") + first := true + + switch m := v.(type) { + case map[interface{}]interface{}: + // Convert all keys to strings for consistent ordering + keys := make([]string, 0) + keyMap := make(map[string]interface{}) + + for k := range m { + keyStr := ToString(k) + keys = append(keys, keyStr) + keyMap[keyStr] = k + } + sort.Strings(keys) + + for _, keyStr := range keys { + if !first { + b.WriteString(" ") + } + origKey := keyMap[keyStr] + b.WriteString(keyStr) + b.WriteString(":") + b.WriteString(ToString(m[origKey])) + first = false + } + + case map[string]interface{}: + keys := make([]string, 0) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + if !first { + b.WriteString(" ") + } + b.WriteString(k) + b.WriteString(":") + b.WriteString(ToString(m[k])) + first = false + } + + case map[string]string: + keys := make([]string, 0) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + if !first { + b.WriteString(" ") + } + b.WriteString(k) + b.WriteString(":") + b.WriteString(m[k]) + first = false + } + + case map[string]int: + keys := make([]string, 0) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + if !first { + b.WriteString(" ") + } + b.WriteString(k) + b.WriteString(":") + b.WriteString(strconv.Itoa(m[k])) + first = false + } + } + b.WriteString("]") + return b.String() + + // Default + default: + return "" + } +} + +func join(slice interface{}) string { + if IsZero(slice) { + return "[]" + } + + items := ToInterfaceSlice(slice) + if items == nil { + return "[]" + } + + var b strings.Builder + b.WriteString("[") + for i, item := range items { + if i > 0 { + b.WriteString(" ") + } + b.WriteString(ToString(item)) + } + b.WriteString("]") + return b.String() +} + +func joinTimes(slice []time.Time) string { + if len(slice) == 0 { + return "[]" + } + var b strings.Builder + b.WriteString("[") + for i, t := range slice { + if i > 0 { + b.WriteString(" ") + } + b.WriteString(t.String()) + } + b.WriteString("]") + return b.String() +} + +func joinAddresses(slice []std.Address) string { + if len(slice) == 0 { + return "[]" + } + var b strings.Builder + b.WriteString("[") + for i, addr := range slice { + if i > 0 { + b.WriteString(" ") + } + b.WriteString(string(addr)) + } + b.WriteString("]") + return b.String() +} + +func joinBytes(slice [][]byte) string { + if len(slice) == 0 { + return "[]" + } + var b strings.Builder + b.WriteString("[") + for i, bytes := range slice { + if i > 0 { + b.WriteString(" ") + } + b.WriteString(string(bytes)) + } + b.WriteString("]") + return b.String() +} + +// ToBool converts any value to a boolean based on common programming conventions. +// For example: +// - Numbers: 0 is false, any other number is true +// - Strings: "", "0", "false", "f", "no", "n", "off" are false, others are true +// - Slices/Maps: empty is false, non-empty is true +// - nil: always false +// - bool: direct value +func ToBool(val interface{}) bool { + if IsZero(val) { + return false + } + + // Handle special string cases + if str, ok := val.(string); ok { + str = strings.ToLower(strings.TrimSpace(str)) + return str != "" && str != "0" && str != "false" && str != "f" && str != "no" && str != "n" && str != "off" + } + + return true +} + +// IsZero returns true if the value represents a "zero" or "empty" state for its type. +// For example: +// - Numbers: 0 +// - Strings: "" +// - Slices/Maps: empty +// - nil: true +// - bool: false +// - time.Time: IsZero() +// - std.Address: empty string +func IsZero(val interface{}) bool { + if val == nil { + return true + } + + switch v := val.(type) { + // Pointer types - nil pointer is zero, otherwise check pointed value + case *bool: + return v == nil || !*v + case *string: + return v == nil || *v == "" + case *int: + return v == nil || *v == 0 + case *time.Time: + return v == nil || v.IsZero() + case *std.Address: + return v == nil || string(*v) == "" + + // Bool + case bool: + return !v + + // String types + case string: + return v == "" + case stringer: + return v.String() == "" + + // Integer types + case int: + return v == 0 + case int8: + return v == 0 + case int16: + return v == 0 + case int32: + return v == 0 + case int64: + return v == 0 + case uint: + return v == 0 + case uint8: + return v == 0 + case uint16: + return v == 0 + case uint32: + return v == 0 + case uint64: + return v == 0 + + // Float types + case float32: + return v == 0 + case float64: + return v == 0 + + // Special types + case []byte: + return len(v) == 0 + case time.Time: + return v.IsZero() + case std.Address: + return string(v) == "" + + // Slices (check if empty) + case []string: + return len(v) == 0 + case []int: + return len(v) == 0 + case []int32: + return len(v) == 0 + case []int64: + return len(v) == 0 + case []float32: + return len(v) == 0 + case []float64: + return len(v) == 0 + case []interface{}: + return len(v) == 0 + case []time.Time: + return len(v) == 0 + case []std.Address: + return len(v) == 0 + case [][]byte: + return len(v) == 0 + case []stringer: + return len(v) == 0 + + // Maps (check if empty) + case map[string]string: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + + default: + return false // non-nil unknown types are considered non-zero + } +} + +// ToInterfaceSlice converts various slice types to []interface{} +func ToInterfaceSlice(val interface{}) []interface{} { + switch v := val.(type) { + case []interface{}: + return v + case []string: + result := make([]interface{}, len(v)) + for i, s := range v { + result[i] = s + } + return result + case []int: + result := make([]interface{}, len(v)) + for i, n := range v { + result[i] = n + } + return result + case []int32: + result := make([]interface{}, len(v)) + for i, n := range v { + result[i] = n + } + return result + case []int64: + result := make([]interface{}, len(v)) + for i, n := range v { + result[i] = n + } + return result + case []float32: + result := make([]interface{}, len(v)) + for i, n := range v { + result[i] = n + } + return result + case []float64: + result := make([]interface{}, len(v)) + for i, n := range v { + result[i] = n + } + return result + case []bool: + result := make([]interface{}, len(v)) + for i, b := range v { + result[i] = b + } + return result + default: + return nil + } +} + +// ToMapStringInterface converts a map with string keys and any value type to map[string]interface{} +func ToMapStringInterface(m interface{}) (map[string]interface{}, error) { + result := make(map[string]interface{}) + + switch v := m.(type) { + case map[string]interface{}: + return v, nil + case map[string]string: + for k, val := range v { + result[k] = val + } + case map[string]int: + for k, val := range v { + result[k] = val + } + case map[string]int64: + for k, val := range v { + result[k] = val + } + case map[string]float64: + for k, val := range v { + result[k] = val + } + case map[string]bool: + for k, val := range v { + result[k] = val + } + case map[string][]string: + for k, val := range v { + result[k] = ToInterfaceSlice(val) + } + case map[string][]int: + for k, val := range v { + result[k] = ToInterfaceSlice(val) + } + case map[string][]interface{}: + for k, val := range v { + result[k] = val + } + case map[string]map[string]interface{}: + for k, val := range v { + result[k] = val + } + case map[string]map[string]string: + for k, val := range v { + if converted, err := ToMapStringInterface(val); err == nil { + result[k] = converted + } else { + return nil, errors.New("failed to convert nested map at key: " + k) + } + } + default: + return nil, errors.New("unsupported map type: " + ToString(m)) + } + + return result, nil +} + +// ToMapIntInterface converts a map with int keys and any value type to map[int]interface{} +func ToMapIntInterface(m interface{}) (map[int]interface{}, error) { + result := make(map[int]interface{}) + + switch v := m.(type) { + case map[int]interface{}: + return v, nil + case map[int]string: + for k, val := range v { + result[k] = val + } + case map[int]int: + for k, val := range v { + result[k] = val + } + case map[int]int64: + for k, val := range v { + result[k] = val + } + case map[int]float64: + for k, val := range v { + result[k] = val + } + case map[int]bool: + for k, val := range v { + result[k] = val + } + case map[int][]string: + for k, val := range v { + result[k] = ToInterfaceSlice(val) + } + case map[int][]int: + for k, val := range v { + result[k] = ToInterfaceSlice(val) + } + case map[int][]interface{}: + for k, val := range v { + result[k] = val + } + case map[int]map[string]interface{}: + for k, val := range v { + result[k] = val + } + case map[int]map[int]interface{}: + for k, val := range v { + result[k] = val + } + default: + return nil, errors.New("unsupported map type: " + ToString(m)) + } + + return result, nil +} + +// ToStringSlice converts various slice types to []string +func ToStringSlice(val interface{}) []string { + switch v := val.(type) { + case []string: + return v + case []interface{}: + result := make([]string, len(v)) + for i, item := range v { + result[i] = ToString(item) + } + return result + case []int: + result := make([]string, len(v)) + for i, n := range v { + result[i] = strconv.Itoa(n) + } + return result + case []int32: + result := make([]string, len(v)) + for i, n := range v { + result[i] = strconv.FormatInt(int64(n), 10) + } + return result + case []int64: + result := make([]string, len(v)) + for i, n := range v { + result[i] = strconv.FormatInt(n, 10) + } + return result + case []float32: + result := make([]string, len(v)) + for i, n := range v { + result[i] = strconv.FormatFloat(float64(n), 'f', -1, 32) + } + return result + case []float64: + result := make([]string, len(v)) + for i, n := range v { + result[i] = strconv.FormatFloat(n, 'f', -1, 64) + } + return result + case []bool: + result := make([]string, len(v)) + for i, b := range v { + result[i] = strconv.FormatBool(b) + } + return result + case []time.Time: + result := make([]string, len(v)) + for i, t := range v { + result[i] = t.String() + } + return result + case []std.Address: + result := make([]string, len(v)) + for i, addr := range v { + result[i] = string(addr) + } + return result + case [][]byte: + result := make([]string, len(v)) + for i, b := range v { + result[i] = string(b) + } + return result + case []stringer: + result := make([]string, len(v)) + for i, s := range v { + result[i] = s.String() + } + return result + case []uint: + result := make([]string, len(v)) + for i, n := range v { + result[i] = strconv.FormatUint(uint64(n), 10) + } + return result + case []uint8: + result := make([]string, len(v)) + for i, n := range v { + result[i] = strconv.FormatUint(uint64(n), 10) + } + return result + case []uint16: + result := make([]string, len(v)) + for i, n := range v { + result[i] = strconv.FormatUint(uint64(n), 10) + } + return result + case []uint32: + result := make([]string, len(v)) + for i, n := range v { + result[i] = strconv.FormatUint(uint64(n), 10) + } + return result + case []uint64: + result := make([]string, len(v)) + for i, n := range v { + result[i] = strconv.FormatUint(n, 10) + } + return result + default: + // Try to convert using reflection if it's a slice + if slice := ToInterfaceSlice(val); slice != nil { + result := make([]string, len(slice)) + for i, item := range slice { + result[i] = ToString(item) + } + return result + } + return nil + } +} diff --git a/examples/gno.land/p/moul/typeutil/typeutil_test.gno b/examples/gno.land/p/moul/typeutil/typeutil_test.gno new file mode 100644 index 00000000000..543ea1deec4 --- /dev/null +++ b/examples/gno.land/p/moul/typeutil/typeutil_test.gno @@ -0,0 +1,1075 @@ +package typeutil + +import ( + "std" + "strings" + "testing" + "time" +) + +type testStringer struct { + value string +} + +func (t testStringer) String() string { + return "test:" + t.value +} + +func TestToString(t *testing.T) { + // setup test data + str := "hello" + num := 42 + b := true + now := time.Now() + addr := std.Address("g1jg8mtutu9khhfwc4nxmuhcpftf0pajdhfvsqf5") + stringer := testStringer{value: "hello"} + + type testCase struct { + name string + input interface{} + expected string + } + + tests := []testCase{ + // basic types + {"string", "hello", "hello"}, + {"empty_string", "", ""}, + {"nil", nil, ""}, + + // integer types + {"int", 42, "42"}, + {"int8", int8(8), "8"}, + {"int16", int16(16), "16"}, + {"int32", int32(32), "32"}, + {"int64", int64(64), "64"}, + {"uint", uint(42), "42"}, + {"uint8", uint8(8), "8"}, + {"uint16", uint16(16), "16"}, + {"uint32", uint32(32), "32"}, + {"uint64", uint64(64), "64"}, + + // float types + {"float32", float32(3.14), "3.14"}, + {"float64", 3.14159, "3.14159"}, + + // boolean + {"bool_true", true, "true"}, + {"bool_false", false, "false"}, + + // special types + {"time", now, now.String()}, + {"address", addr, string(addr)}, + {"bytes", []byte("hello"), "hello"}, + {"stringer", stringer, "test:hello"}, + + // slices + {"empty_slice", []string{}, "[]"}, + {"string_slice", []string{"a", "b"}, "[a b]"}, + {"int_slice", []int{1, 2}, "[1 2]"}, + {"int32_slice", []int32{1, 2}, "[1 2]"}, + {"int64_slice", []int64{1, 2}, "[1 2]"}, + {"float32_slice", []float32{1.1, 2.2}, "[1.1 2.2]"}, + {"float64_slice", []float64{1.1, 2.2}, "[1.1 2.2]"}, + {"bytes_slice", [][]byte{[]byte("a"), []byte("b")}, "[a b]"}, + {"time_slice", []time.Time{now, now}, "[" + now.String() + " " + now.String() + "]"}, + {"address_slice", []std.Address{addr, addr}, "[" + string(addr) + " " + string(addr) + "]"}, + {"interface_slice", []interface{}{1, "a", true}, "[1 a true]"}, + + // empty slices + {"empty_string_slice", []string{}, "[]"}, + {"empty_int_slice", []int{}, "[]"}, + {"empty_int32_slice", []int32{}, "[]"}, + {"empty_int64_slice", []int64{}, "[]"}, + {"empty_float32_slice", []float32{}, "[]"}, + {"empty_float64_slice", []float64{}, "[]"}, + {"empty_bytes_slice", [][]byte{}, "[]"}, + {"empty_time_slice", []time.Time{}, "[]"}, + {"empty_address_slice", []std.Address{}, "[]"}, + {"empty_interface_slice", []interface{}{}, "[]"}, + + // maps + {"empty_string_map", map[string]string{}, "map[]"}, + {"string_map", map[string]string{"a": "1", "b": "2"}, "map[a:1 b:2]"}, + {"empty_interface_map", map[string]interface{}{}, "map[]"}, + {"interface_map", map[string]interface{}{"a": 1, "b": "2"}, "map[a:1 b:2]"}, + + // edge cases + {"empty_bytes", []byte{}, ""}, + {"nil_interface", interface{}(nil), ""}, + {"empty_struct", struct{}{}, "{}"}, + {"unknown_type", struct{ foo string }{}, ""}, + + // pointer types + {"nil_string_ptr", (*string)(nil), ""}, + {"string_ptr", &str, "hello"}, + {"nil_int_ptr", (*int)(nil), ""}, + {"int_ptr", &num, "42"}, + {"nil_bool_ptr", (*bool)(nil), ""}, + {"bool_ptr", &b, "true"}, + // {"nil_time_ptr", (*time.Time)(nil), ""}, // TODO: fix this + {"time_ptr", &now, now.String()}, + // {"nil_address_ptr", (*std.Address)(nil), ""}, // TODO: fix this + {"address_ptr", &addr, string(addr)}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ToString(tt.input) + if got != tt.expected { + t.Errorf("%s: ToString(%v) = %q, want %q", tt.name, tt.input, got, tt.expected) + } + }) + } +} + +func TestToBool(t *testing.T) { + str := "true" + num := 42 + b := true + now := time.Now() + addr := std.Address("g1jg8mtutu9khhfwc4nxmuhcpftf0pajdhfvsqf5") + zero := 0 + empty := "" + falseVal := false + + type testCase struct { + name string + input interface{} + expected bool + } + + tests := []testCase{ + // basic types + {"true", true, true}, + {"false", false, false}, + {"nil", nil, false}, + + // strings + {"empty_string", "", false}, + {"zero_string", "0", false}, + {"false_string", "false", false}, + {"f_string", "f", false}, + {"no_string", "no", false}, + {"n_string", "n", false}, + {"off_string", "off", false}, + {"space_string", " ", false}, + {"true_string", "true", true}, + {"yes_string", "yes", true}, + {"random_string", "hello", true}, + + // numbers + {"zero_int", 0, false}, + {"positive_int", 1, true}, + {"negative_int", -1, true}, + {"zero_float", 0.0, false}, + {"positive_float", 0.1, true}, + {"negative_float", -0.1, true}, + + // special types + {"empty_bytes", []byte{}, false}, + {"non_empty_bytes", []byte{1}, true}, + /*{"zero_time", time.Time{}, false},*/ // TODO: fix this + {"empty_address", std.Address(""), false}, + + // slices + {"empty_slice", []string{}, false}, + {"non_empty_slice", []string{"a"}, true}, + + // maps + {"empty_map", map[string]string{}, false}, + {"non_empty_map", map[string]string{"a": "b"}, true}, + + // pointer types + {"nil_bool_ptr", (*bool)(nil), false}, + {"true_ptr", &b, true}, + {"false_ptr", &falseVal, false}, + {"nil_string_ptr", (*string)(nil), false}, + {"string_ptr", &str, true}, + {"empty_string_ptr", &empty, false}, + {"nil_int_ptr", (*int)(nil), false}, + {"int_ptr", &num, true}, + {"zero_int_ptr", &zero, false}, + // {"nil_time_ptr", (*time.Time)(nil), false}, // TODO: fix this + {"time_ptr", &now, true}, + // {"nil_address_ptr", (*std.Address)(nil), false}, // TODO: fix this + {"address_ptr", &addr, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ToBool(tt.input) + if got != tt.expected { + t.Errorf("%s: ToBool(%v) = %v, want %v", tt.name, tt.input, got, tt.expected) + } + }) + } +} + +func TestIsZero(t *testing.T) { + str := "hello" + num := 42 + b := true + now := time.Now() + addr := std.Address("g1jg8mtutu9khhfwc4nxmuhcpftf0pajdhfvsqf5") + zero := 0 + empty := "" + falseVal := false + + type testCase struct { + name string + input interface{} + expected bool + } + + tests := []testCase{ + // basic types + {"true", true, false}, + {"false", false, true}, + {"nil", nil, true}, + + // strings + {"empty_string", "", true}, + {"non_empty_string", "hello", false}, + + // numbers + {"zero_int", 0, true}, + {"non_zero_int", 1, false}, + {"zero_float", 0.0, true}, + {"non_zero_float", 0.1, false}, + + // special types + {"empty_bytes", []byte{}, true}, + {"non_empty_bytes", []byte{1}, false}, + /*{"zero_time", time.Time{}, true},*/ // TODO: fix this + {"empty_address", std.Address(""), true}, + + // slices + {"empty_slice", []string{}, true}, + {"non_empty_slice", []string{"a"}, false}, + + // maps + {"empty_map", map[string]string{}, true}, + {"non_empty_map", map[string]string{"a": "b"}, false}, + + // pointer types + {"nil_bool_ptr", (*bool)(nil), true}, + {"false_ptr", &falseVal, true}, + {"true_ptr", &b, false}, + {"nil_string_ptr", (*string)(nil), true}, + {"empty_string_ptr", &empty, true}, + {"string_ptr", &str, false}, + {"nil_int_ptr", (*int)(nil), true}, + {"zero_int_ptr", &zero, true}, + {"int_ptr", &num, false}, + // {"nil_time_ptr", (*time.Time)(nil), true}, // TODO: fix this + {"time_ptr", &now, false}, + // {"nil_address_ptr", (*std.Address)(nil), true}, // TODO: fix this + {"address_ptr", &addr, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsZero(tt.input) + if got != tt.expected { + t.Errorf("%s: IsZero(%v) = %v, want %v", tt.name, tt.input, got, tt.expected) + } + }) + } +} + +func TestToInterfaceSlice(t *testing.T) { + now := time.Now() + addr := std.Address("g1jg8mtutu9khhfwc4nxmuhcpftf0pajdhfvsqf5") + str := testStringer{value: "hello"} + + tests := []struct { + name string + input interface{} + expected []interface{} + compare func([]interface{}, []interface{}) bool + }{ + { + name: "nil", + input: nil, + expected: nil, + compare: compareNil, + }, + { + name: "empty_interface_slice", + input: []interface{}{}, + expected: []interface{}{}, + compare: compareEmpty, + }, + { + name: "interface_slice", + input: []interface{}{1, "two", true}, + expected: []interface{}{1, "two", true}, + compare: compareInterfaces, + }, + { + name: "string_slice", + input: []string{"a", "b", "c"}, + expected: []interface{}{"a", "b", "c"}, + compare: compareStrings, + }, + { + name: "int_slice", + input: []int{1, 2, 3}, + expected: []interface{}{1, 2, 3}, + compare: compareInts, + }, + { + name: "int32_slice", + input: []int32{1, 2, 3}, + expected: []interface{}{int32(1), int32(2), int32(3)}, + compare: compareInt32s, + }, + { + name: "int64_slice", + input: []int64{1, 2, 3}, + expected: []interface{}{int64(1), int64(2), int64(3)}, + compare: compareInt64s, + }, + { + name: "float32_slice", + input: []float32{1.1, 2.2, 3.3}, + expected: []interface{}{float32(1.1), float32(2.2), float32(3.3)}, + compare: compareFloat32s, + }, + { + name: "float64_slice", + input: []float64{1.1, 2.2, 3.3}, + expected: []interface{}{1.1, 2.2, 3.3}, + compare: compareFloat64s, + }, + { + name: "bool_slice", + input: []bool{true, false, true}, + expected: []interface{}{true, false, true}, + compare: compareBools, + }, + /* { + name: "time_slice", + input: []time.Time{now}, + expected: []interface{}{now}, + compare: compareTimes, + }, */ // TODO: fix this + /* { + name: "address_slice", + input: []std.Address{addr}, + expected: []interface{}{addr}, + compare: compareAddresses, + },*/ // TODO: fix this + /* { + name: "bytes_slice", + input: [][]byte{[]byte("hello"), []byte("world")}, + expected: []interface{}{[]byte("hello"), []byte("world")}, + compare: compareBytes, + },*/ // TODO: fix this + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ToInterfaceSlice(tt.input) + if !tt.compare(got, tt.expected) { + t.Errorf("ToInterfaceSlice() = %v, want %v", got, tt.expected) + } + }) + } +} + +func compareNil(a, b []interface{}) bool { + return a == nil && b == nil +} + +func compareEmpty(a, b []interface{}) bool { + return len(a) == 0 && len(b) == 0 +} + +func compareInterfaces(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func compareStrings(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + as, ok1 := a[i].(string) + bs, ok2 := b[i].(string) + if !ok1 || !ok2 || as != bs { + return false + } + } + return true +} + +func compareInts(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + ai, ok1 := a[i].(int) + bi, ok2 := b[i].(int) + if !ok1 || !ok2 || ai != bi { + return false + } + } + return true +} + +func compareInt32s(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + ai, ok1 := a[i].(int32) + bi, ok2 := b[i].(int32) + if !ok1 || !ok2 || ai != bi { + return false + } + } + return true +} + +func compareInt64s(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + ai, ok1 := a[i].(int64) + bi, ok2 := b[i].(int64) + if !ok1 || !ok2 || ai != bi { + return false + } + } + return true +} + +func compareFloat32s(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + ai, ok1 := a[i].(float32) + bi, ok2 := b[i].(float32) + if !ok1 || !ok2 || ai != bi { + return false + } + } + return true +} + +func compareFloat64s(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + ai, ok1 := a[i].(float64) + bi, ok2 := b[i].(float64) + if !ok1 || !ok2 || ai != bi { + return false + } + } + return true +} + +func compareBools(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + ab, ok1 := a[i].(bool) + bb, ok2 := b[i].(bool) + if !ok1 || !ok2 || ab != bb { + return false + } + } + return true +} + +func compareTimes(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + at, ok1 := a[i].(time.Time) + bt, ok2 := b[i].(time.Time) + if !ok1 || !ok2 || !at.Equal(bt) { + return false + } + } + return true +} + +func compareAddresses(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + aa, ok1 := a[i].(std.Address) + ba, ok2 := b[i].(std.Address) + if !ok1 || !ok2 || aa != ba { + return false + } + } + return true +} + +func compareBytes(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + for i := range a { + ab, ok1 := a[i].([]byte) + bb, ok2 := b[i].([]byte) + if !ok1 || !ok2 || string(ab) != string(bb) { + return false + } + } + return true +} + +// compareStringInterfaceMaps compares two map[string]interface{} for equality +func compareStringInterfaceMaps(a, b map[string]interface{}) bool { + if len(a) != len(b) { + return false + } + for k, v1 := range a { + v2, ok := b[k] + if !ok { + return false + } + // Compare values based on their type + switch val1 := v1.(type) { + case string: + val2, ok := v2.(string) + if !ok || val1 != val2 { + return false + } + case int: + val2, ok := v2.(int) + if !ok || val1 != val2 { + return false + } + case float64: + val2, ok := v2.(float64) + if !ok || val1 != val2 { + return false + } + case bool: + val2, ok := v2.(bool) + if !ok || val1 != val2 { + return false + } + case []interface{}: + val2, ok := v2.([]interface{}) + if !ok || len(val1) != len(val2) { + return false + } + for i := range val1 { + if val1[i] != val2[i] { + return false + } + } + case map[string]interface{}: + val2, ok := v2.(map[string]interface{}) + if !ok || !compareStringInterfaceMaps(val1, val2) { + return false + } + default: + return false + } + } + return true +} + +func TestToMapStringInterface(t *testing.T) { + tests := []struct { + name string + input interface{} + expected map[string]interface{} + wantErr bool + }{ + { + name: "map[string]interface{}", + input: map[string]interface{}{ + "key1": "value1", + "key2": 42, + }, + expected: map[string]interface{}{ + "key1": "value1", + "key2": 42, + }, + wantErr: false, + }, + { + name: "map[string]string", + input: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + expected: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + }, + wantErr: false, + }, + { + name: "map[string]int", + input: map[string]int{ + "key1": 1, + "key2": 2, + }, + expected: map[string]interface{}{ + "key1": 1, + "key2": 2, + }, + wantErr: false, + }, + { + name: "map[string]float64", + input: map[string]float64{ + "key1": 1.1, + "key2": 2.2, + }, + expected: map[string]interface{}{ + "key1": 1.1, + "key2": 2.2, + }, + wantErr: false, + }, + { + name: "map[string]bool", + input: map[string]bool{ + "key1": true, + "key2": false, + }, + expected: map[string]interface{}{ + "key1": true, + "key2": false, + }, + wantErr: false, + }, + { + name: "map[string][]string", + input: map[string][]string{ + "key1": {"a", "b"}, + "key2": {"c", "d"}, + }, + expected: map[string]interface{}{ + "key1": []interface{}{"a", "b"}, + "key2": []interface{}{"c", "d"}, + }, + wantErr: false, + }, + { + name: "nested map[string]map[string]string", + input: map[string]map[string]string{ + "key1": {"nested1": "value1"}, + "key2": {"nested2": "value2"}, + }, + expected: map[string]interface{}{ + "key1": map[string]interface{}{"nested1": "value1"}, + "key2": map[string]interface{}{"nested2": "value2"}, + }, + wantErr: false, + }, + { + name: "unsupported type", + input: 42, // not a map + expected: nil, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ToMapStringInterface(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("ToMapStringInterface() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr { + if !compareStringInterfaceMaps(got, tt.expected) { + t.Errorf("ToMapStringInterface() = %v, expected %v", got, tt.expected) + } + } + }) + } +} + +// Test error messages +func TestToMapStringInterfaceErrors(t *testing.T) { + _, err := ToMapStringInterface(42) + if err == nil || !strings.Contains(err.Error(), "unsupported map type") { + t.Errorf("Expected error containing 'unsupported map type', got %v", err) + } +} + +// compareIntInterfaceMaps compares two map[int]interface{} for equality +func compareIntInterfaceMaps(a, b map[int]interface{}) bool { + if len(a) != len(b) { + return false + } + for k, v1 := range a { + v2, ok := b[k] + if !ok { + return false + } + // Compare values based on their type + switch val1 := v1.(type) { + case string: + val2, ok := v2.(string) + if !ok || val1 != val2 { + return false + } + case int: + val2, ok := v2.(int) + if !ok || val1 != val2 { + return false + } + case float64: + val2, ok := v2.(float64) + if !ok || val1 != val2 { + return false + } + case bool: + val2, ok := v2.(bool) + if !ok || val1 != val2 { + return false + } + case []interface{}: + val2, ok := v2.([]interface{}) + if !ok || len(val1) != len(val2) { + return false + } + for i := range val1 { + if val1[i] != val2[i] { + return false + } + } + case map[string]interface{}: + val2, ok := v2.(map[string]interface{}) + if !ok || !compareStringInterfaceMaps(val1, val2) { + return false + } + default: + return false + } + } + return true +} + +func TestToMapIntInterface(t *testing.T) { + tests := []struct { + name string + input interface{} + expected map[int]interface{} + wantErr bool + }{ + { + name: "map[int]interface{}", + input: map[int]interface{}{ + 1: "value1", + 2: 42, + }, + expected: map[int]interface{}{ + 1: "value1", + 2: 42, + }, + wantErr: false, + }, + { + name: "map[int]string", + input: map[int]string{ + 1: "value1", + 2: "value2", + }, + expected: map[int]interface{}{ + 1: "value1", + 2: "value2", + }, + wantErr: false, + }, + { + name: "map[int]int", + input: map[int]int{ + 1: 10, + 2: 20, + }, + expected: map[int]interface{}{ + 1: 10, + 2: 20, + }, + wantErr: false, + }, + { + name: "map[int]float64", + input: map[int]float64{ + 1: 1.1, + 2: 2.2, + }, + expected: map[int]interface{}{ + 1: 1.1, + 2: 2.2, + }, + wantErr: false, + }, + { + name: "map[int]bool", + input: map[int]bool{ + 1: true, + 2: false, + }, + expected: map[int]interface{}{ + 1: true, + 2: false, + }, + wantErr: false, + }, + { + name: "map[int][]string", + input: map[int][]string{ + 1: {"a", "b"}, + 2: {"c", "d"}, + }, + expected: map[int]interface{}{ + 1: []interface{}{"a", "b"}, + 2: []interface{}{"c", "d"}, + }, + wantErr: false, + }, + { + name: "map[int]map[string]interface{}", + input: map[int]map[string]interface{}{ + 1: {"nested1": "value1"}, + 2: {"nested2": "value2"}, + }, + expected: map[int]interface{}{ + 1: map[string]interface{}{"nested1": "value1"}, + 2: map[string]interface{}{"nested2": "value2"}, + }, + wantErr: false, + }, + { + name: "unsupported type", + input: 42, // not a map + expected: nil, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ToMapIntInterface(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("ToMapIntInterface() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr { + if !compareIntInterfaceMaps(got, tt.expected) { + t.Errorf("ToMapIntInterface() = %v, expected %v", got, tt.expected) + } + } + }) + } +} + +func TestToStringSlice(t *testing.T) { + tests := []struct { + name string + input interface{} + expected []string + }{ + { + name: "nil input", + input: nil, + expected: nil, + }, + { + name: "empty slice", + input: []string{}, + expected: []string{}, + }, + { + name: "string slice", + input: []string{"a", "b", "c"}, + expected: []string{"a", "b", "c"}, + }, + { + name: "int slice", + input: []int{1, 2, 3}, + expected: []string{"1", "2", "3"}, + }, + { + name: "int32 slice", + input: []int32{1, 2, 3}, + expected: []string{"1", "2", "3"}, + }, + { + name: "int64 slice", + input: []int64{1, 2, 3}, + expected: []string{"1", "2", "3"}, + }, + { + name: "uint slice", + input: []uint{1, 2, 3}, + expected: []string{"1", "2", "3"}, + }, + { + name: "uint8 slice", + input: []uint8{1, 2, 3}, + expected: []string{"1", "2", "3"}, + }, + { + name: "uint16 slice", + input: []uint16{1, 2, 3}, + expected: []string{"1", "2", "3"}, + }, + { + name: "uint32 slice", + input: []uint32{1, 2, 3}, + expected: []string{"1", "2", "3"}, + }, + { + name: "uint64 slice", + input: []uint64{1, 2, 3}, + expected: []string{"1", "2", "3"}, + }, + { + name: "float32 slice", + input: []float32{1.1, 2.2, 3.3}, + expected: []string{"1.1", "2.2", "3.3"}, + }, + { + name: "float64 slice", + input: []float64{1.1, 2.2, 3.3}, + expected: []string{"1.1", "2.2", "3.3"}, + }, + { + name: "bool slice", + input: []bool{true, false, true}, + expected: []string{"true", "false", "true"}, + }, + { + name: "[]byte slice", + input: [][]byte{[]byte("hello"), []byte("world")}, + expected: []string{"hello", "world"}, + }, + { + name: "interface slice", + input: []interface{}{1, "hello", true}, + expected: []string{"1", "hello", "true"}, + }, + { + name: "time slice", + input: []time.Time{time.Time{}, time.Time{}}, + expected: []string{"0001-01-01 00:00:00 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC"}, + }, + { + name: "address slice", + input: []std.Address{"addr1", "addr2"}, + expected: []string{"addr1", "addr2"}, + }, + { + name: "non-slice input", + input: 42, + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ToStringSlice(tt.input) + if !slicesEqual(result, tt.expected) { + t.Errorf("ToStringSlice(%v) = %v, want %v", tt.input, result, tt.expected) + } + }) + } +} + +// Helper function to compare string slices +func slicesEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func TestToStringAdvanced(t *testing.T) { + tests := []struct { + name string + input interface{} + expected string + }{ + { + name: "slice with mixed basic types", + input: []interface{}{ + 42, + "hello", + true, + 3.14, + }, + expected: "[42 hello true 3.14]", + }, + { + name: "map with basic types", + input: map[string]interface{}{ + "int": 42, + "str": "hello", + "bool": true, + "float": 3.14, + }, + expected: "map[bool:true float:3.14 int:42 str:hello]", + }, + { + name: "mixed types map", + input: map[interface{}]interface{}{ + 42: "number", + "string": 123, + true: []int{1, 2, 3}, + struct{}{}: "empty", + }, + expected: "map[42:number string:123 true:[1 2 3] {}:empty]", + }, + { + name: "nested maps", + input: map[string]interface{}{ + "a": map[string]int{ + "x": 1, + "y": 2, + }, + "b": []interface{}{1, "two", true}, + }, + expected: "map[a:map[x:1 y:2] b:[1 two true]]", + }, + { + name: "empty struct", + input: struct{}{}, + expected: "{}", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ToString(tt.input) + if result != tt.expected { + t.Errorf("\nToString(%v) =\n%v\nwant:\n%v", tt.input, result, tt.expected) + } + }) + } +} diff --git a/examples/gno.land/p/moul/ulist/gno.mod b/examples/gno.land/p/moul/ulist/gno.mod new file mode 100644 index 00000000000..077f8c556f3 --- /dev/null +++ b/examples/gno.land/p/moul/ulist/gno.mod @@ -0,0 +1 @@ +module gno.land/p/moul/ulist diff --git a/examples/gno.land/p/moul/ulist/ulist.gno b/examples/gno.land/p/moul/ulist/ulist.gno new file mode 100644 index 00000000000..507a02a4e45 --- /dev/null +++ b/examples/gno.land/p/moul/ulist/ulist.gno @@ -0,0 +1,437 @@ +// Package ulist provides an append-only list implementation using a binary tree structure, +// optimized for scenarios requiring sequential inserts with auto-incrementing indices. +// +// The implementation uses a binary tree where new elements are added by following a path +// determined by the binary representation of the index. This provides automatic balancing +// for append operations without requiring any balancing logic. +// +// Unlike the AVL tree-based list implementation (p/demo/avl/list), ulist is specifically +// designed for append-only operations and does not require rebalancing. This makes it more +// efficient for sequential inserts but less flexible for general-purpose list operations. +// +// Key differences from AVL list: +// * Append-only design (no arbitrary inserts) +// * No tree rebalancing needed +// * Simpler implementation +// * More memory efficient for sequential operations +// * Less flexible than AVL (no arbitrary inserts/reordering) +// +// Key characteristics: +// * O(log n) append and access operations +// * Perfect balance for power-of-2 sizes +// * No balancing needed +// * Memory efficient +// * Natural support for range queries +// * Support for soft deletion of elements +// * Forward and reverse iteration capabilities +// * Offset-based iteration with count control +package ulist + +// TODO: Use this ulist in moul/collection for the primary index. +// TODO: Benchmarks. + +import ( + "errors" +) + +// List represents an append-only binary tree list +type List struct { + root *treeNode + totalSize int + activeSize int +} + +// Entry represents a key-value pair in the list, where Index is the position +// and Value is the stored data +type Entry struct { + Index int + Value interface{} +} + +// treeNode represents a node in the binary tree +type treeNode struct { + data interface{} + left *treeNode + right *treeNode +} + +// Error variables +var ( + ErrOutOfBounds = errors.New("index out of bounds") + ErrDeleted = errors.New("element already deleted") +) + +// New creates a new empty List instance +func New() *List { + return &List{} +} + +// Append adds one or more values to the end of the list. +// Values are added sequentially, and the list grows automatically. +func (l *List) Append(values ...interface{}) { + for _, value := range values { + index := l.totalSize + node := l.findNode(index, true) + node.data = value + l.totalSize++ + l.activeSize++ + } +} + +// Get retrieves the value at the specified index. +// Returns nil if the index is out of bounds or if the element was deleted. +func (l *List) Get(index int) interface{} { + node := l.findNode(index, false) + if node == nil { + return nil + } + return node.data +} + +// Delete marks the elements at the specified indices as deleted. +// Returns ErrOutOfBounds if any index is invalid or ErrDeleted if +// the element was already deleted. +func (l *List) Delete(indices ...int) error { + if len(indices) == 0 { + return nil + } + if l == nil || l.totalSize == 0 { + return ErrOutOfBounds + } + + for _, index := range indices { + if index < 0 || index >= l.totalSize { + return ErrOutOfBounds + } + + node := l.findNode(index, false) + if node == nil || node.data == nil { + return ErrDeleted + } + node.data = nil + l.activeSize-- + } + + return nil +} + +// Set updates or restores a value at the specified index if within bounds +// Returns ErrOutOfBounds if the index is invalid +func (l *List) Set(index int, value interface{}) error { + if l == nil || index < 0 || index >= l.totalSize { + return ErrOutOfBounds + } + + node := l.findNode(index, false) + if node == nil { + return ErrOutOfBounds + } + + // If this is restoring a deleted element + if value != nil && node.data == nil { + l.activeSize++ + } + + // If this is deleting an element + if value == nil && node.data != nil { + l.activeSize-- + } + + node.data = value + return nil +} + +// Size returns the number of active (non-deleted) elements in the list +func (l *List) Size() int { + if l == nil { + return 0 + } + return l.activeSize +} + +// TotalSize returns the total number of elements ever added to the list, +// including deleted elements +func (l *List) TotalSize() int { + if l == nil { + return 0 + } + return l.totalSize +} + +// IterCbFn is a callback function type used in iteration methods. +// Return true to stop iteration, false to continue. +type IterCbFn func(index int, value interface{}) bool + +// Iterator performs iteration between start and end indices, calling cb for each entry. +// If start > end, iteration is performed in reverse order. +// Returns true if iteration was stopped early by the callback returning true. +// Skips deleted elements. +func (l *List) Iterator(start, end int, cb IterCbFn) bool { + // For empty list or invalid range + if l == nil || l.totalSize == 0 { + return false + } + if start < 0 && end < 0 { + return false + } + if start >= l.totalSize && end >= l.totalSize { + return false + } + + // Normalize indices + if start < 0 { + start = 0 + } + if end < 0 { + end = 0 + } + if end >= l.totalSize { + end = l.totalSize - 1 + } + if start >= l.totalSize { + start = l.totalSize - 1 + } + + // Handle reverse iteration + if start > end { + for i := start; i >= end; i-- { + val := l.Get(i) + if val != nil { + if cb(i, val) { + return true + } + } + } + return false + } + + // Handle forward iteration + for i := start; i <= end; i++ { + val := l.Get(i) + if val != nil { + if cb(i, val) { + return true + } + } + } + return false +} + +// IteratorByOffset performs iteration starting from offset for count elements. +// If count is positive, iterates forward; if negative, iterates backward. +// The iteration stops after abs(count) elements or when reaching list bounds. +// Skips deleted elements. +func (l *List) IteratorByOffset(offset int, count int, cb IterCbFn) bool { + if count == 0 || l == nil || l.totalSize == 0 { + return false + } + + // Normalize offset + if offset < 0 { + offset = 0 + } + if offset >= l.totalSize { + offset = l.totalSize - 1 + } + + // Determine end based on count direction + var end int + if count > 0 { + end = l.totalSize - 1 + } else { + end = 0 + } + + wrapperReturned := false + + // Wrap the callback to limit iterations + remaining := abs(count) + wrapper := func(index int, value interface{}) bool { + if remaining <= 0 { + wrapperReturned = true + return true + } + remaining-- + return cb(index, value) + } + ret := l.Iterator(offset, end, wrapper) + if wrapperReturned { + return false + } + return ret +} + +// abs returns the absolute value of x +func abs(x int) int { + if x < 0 { + return -x + } + return x +} + +// findNode locates or creates a node at the given index in the binary tree. +// The tree is structured such that the path to a node is determined by the binary +// representation of the index. For example, a tree with 15 elements would look like: +// +// 0 +// / \ +// 1 2 +// / \ / \ +// 3 4 5 6 +// / \ / \ / \ / \ +// 7 8 9 10 11 12 13 14 +// +// To find index 13 (binary 1101): +// 1. Start at root (0) +// 2. Calculate bits needed (4 bits for index 13) +// 3. Skip the highest bit position and start from bits-2 +// 4. Read bits from left to right: +// - 1 -> go right to 2 +// - 1 -> go right to 6 +// - 0 -> go left to 13 +// +// Special cases: +// - Index 0 always returns the root node +// - For create=true, missing nodes are created along the path +// - For create=false, returns nil if any node is missing +func (l *List) findNode(index int, create bool) *treeNode { + // For read operations, check bounds strictly + if !create && (l == nil || index < 0 || index >= l.totalSize) { + return nil + } + + // For create operations, allow index == totalSize for append + if create && (l == nil || index < 0 || index > l.totalSize) { + return nil + } + + // Initialize root if needed + if l.root == nil { + if !create { + return nil + } + l.root = &treeNode{} + return l.root + } + + node := l.root + + // Special case for root node + if index == 0 { + return node + } + + // Calculate the number of bits needed (inline highestBit logic) + bits := 0 + n := index + 1 + for n > 0 { + n >>= 1 + bits++ + } + + // Start from the second highest bit + for level := bits - 2; level >= 0; level-- { + bit := (index & (1 << uint(level))) != 0 + + if bit { + if node.right == nil { + if !create { + return nil + } + node.right = &treeNode{} + } + node = node.right + } else { + if node.left == nil { + if !create { + return nil + } + node.left = &treeNode{} + } + node = node.left + } + } + + return node +} + +// MustDelete deletes elements at the specified indices. +// Panics if any index is invalid or if any element was already deleted. +func (l *List) MustDelete(indices ...int) { + if err := l.Delete(indices...); err != nil { + panic(err) + } +} + +// MustGet retrieves the value at the specified index. +// Panics if the index is out of bounds or if the element was deleted. +func (l *List) MustGet(index int) interface{} { + if l == nil || index < 0 || index >= l.totalSize { + panic(ErrOutOfBounds) + } + value := l.Get(index) + if value == nil { + panic(ErrDeleted) + } + return value +} + +// MustSet updates or restores a value at the specified index. +// Panics if the index is out of bounds. +func (l *List) MustSet(index int, value interface{}) { + if err := l.Set(index, value); err != nil { + panic(err) + } +} + +// GetRange returns a slice of Entry containing elements between start and end indices. +// If start > end, elements are returned in reverse order. +// Deleted elements are skipped. +func (l *List) GetRange(start, end int) []Entry { + var entries []Entry + l.Iterator(start, end, func(index int, value interface{}) bool { + entries = append(entries, Entry{Index: index, Value: value}) + return false + }) + return entries +} + +// GetByOffset returns a slice of Entry starting from offset for count elements. +// If count is positive, returns elements forward; if negative, returns elements backward. +// The operation stops after abs(count) elements or when reaching list bounds. +// Deleted elements are skipped. +func (l *List) GetByOffset(offset int, count int) []Entry { + var entries []Entry + l.IteratorByOffset(offset, count, func(index int, value interface{}) bool { + entries = append(entries, Entry{Index: index, Value: value}) + return false + }) + return entries +} + +// IList defines the interface for an ulist.List compatible structure. +type IList interface { + // Basic operations + Append(values ...interface{}) + Get(index int) interface{} + Delete(indices ...int) error + Size() int + TotalSize() int + Set(index int, value interface{}) error + + // Must variants that panic instead of returning errors + MustDelete(indices ...int) + MustGet(index int) interface{} + MustSet(index int, value interface{}) + + // Range operations + GetRange(start, end int) []Entry + GetByOffset(offset int, count int) []Entry + + // Iterator operations + Iterator(start, end int, cb IterCbFn) bool + IteratorByOffset(offset int, count int, cb IterCbFn) bool +} + +// Verify that List implements IList +var _ IList = (*List)(nil) diff --git a/examples/gno.land/p/moul/ulist/ulist_test.gno b/examples/gno.land/p/moul/ulist/ulist_test.gno new file mode 100644 index 00000000000..f098731a7db --- /dev/null +++ b/examples/gno.land/p/moul/ulist/ulist_test.gno @@ -0,0 +1,1422 @@ +package ulist + +import ( + "testing" + + "gno.land/p/demo/uassert" + "gno.land/p/demo/ufmt" + "gno.land/p/moul/typeutil" +) + +func TestNew(t *testing.T) { + l := New() + uassert.Equal(t, 0, l.Size()) + uassert.Equal(t, 0, l.TotalSize()) +} + +func TestListAppendAndGet(t *testing.T) { + tests := []struct { + name string + setup func() *List + index int + expected interface{} + }{ + { + name: "empty list", + setup: func() *List { + return New() + }, + index: 0, + expected: nil, + }, + { + name: "single append and get", + setup: func() *List { + l := New() + l.Append(42) + return l + }, + index: 0, + expected: 42, + }, + { + name: "multiple appends and get first", + setup: func() *List { + l := New() + l.Append(1) + l.Append(2) + l.Append(3) + return l + }, + index: 0, + expected: 1, + }, + { + name: "multiple appends and get last", + setup: func() *List { + l := New() + l.Append(1) + l.Append(2) + l.Append(3) + return l + }, + index: 2, + expected: 3, + }, + { + name: "get with invalid index", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + index: 1, + expected: nil, + }, + { + name: "31 items get first", + setup: func() *List { + l := New() + for i := 0; i < 31; i++ { + l.Append(i) + } + return l + }, + index: 0, + expected: 0, + }, + { + name: "31 items get last", + setup: func() *List { + l := New() + for i := 0; i < 31; i++ { + l.Append(i) + } + return l + }, + index: 30, + expected: 30, + }, + { + name: "31 items get middle", + setup: func() *List { + l := New() + for i := 0; i < 31; i++ { + l.Append(i) + } + return l + }, + index: 15, + expected: 15, + }, + { + name: "values around power of 2 boundary", + setup: func() *List { + l := New() + for i := 0; i < 18; i++ { + l.Append(i) + } + return l + }, + index: 15, + expected: 15, + }, + { + name: "values at power of 2", + setup: func() *List { + l := New() + for i := 0; i < 18; i++ { + l.Append(i) + } + return l + }, + index: 16, + expected: 16, + }, + { + name: "values after power of 2", + setup: func() *List { + l := New() + for i := 0; i < 18; i++ { + l.Append(i) + } + return l + }, + index: 17, + expected: 17, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := tt.setup() + got := l.Get(tt.index) + if got != tt.expected { + t.Errorf("List.Get() = %v, want %v", got, tt.expected) + } + }) + } +} + +// generateSequence creates a slice of integers from 0 to n-1 +func generateSequence(n int) []interface{} { + result := make([]interface{}, n) + for i := 0; i < n; i++ { + result[i] = i + } + return result +} + +func TestListDelete(t *testing.T) { + tests := []struct { + name string + setup func() *List + deleteIndices []int + expectedErr error + expectedSize int + }{ + { + name: "delete single element", + setup: func() *List { + l := New() + l.Append(1, 2, 3) + return l + }, + deleteIndices: []int{1}, + expectedErr: nil, + expectedSize: 2, + }, + { + name: "delete multiple elements", + setup: func() *List { + l := New() + l.Append(1, 2, 3, 4, 5) + return l + }, + deleteIndices: []int{0, 2, 4}, + expectedErr: nil, + expectedSize: 2, + }, + { + name: "delete with negative index", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + deleteIndices: []int{-1}, + expectedErr: ErrOutOfBounds, + expectedSize: 1, + }, + { + name: "delete beyond size", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + deleteIndices: []int{1}, + expectedErr: ErrOutOfBounds, + expectedSize: 1, + }, + { + name: "delete already deleted element", + setup: func() *List { + l := New() + l.Append(1) + l.Delete(0) + return l + }, + deleteIndices: []int{0}, + expectedErr: ErrDeleted, + expectedSize: 0, + }, + { + name: "delete multiple elements in reverse", + setup: func() *List { + l := New() + l.Append(1, 2, 3, 4, 5) + return l + }, + deleteIndices: []int{4, 2, 0}, + expectedErr: nil, + expectedSize: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := tt.setup() + initialSize := l.Size() + err := l.Delete(tt.deleteIndices...) + if err != nil && tt.expectedErr != nil { + uassert.Equal(t, tt.expectedErr.Error(), err.Error()) + } else { + uassert.Equal(t, tt.expectedErr, err) + } + uassert.Equal(t, tt.expectedSize, l.Size(), + ufmt.Sprintf("Expected size %d after deleting %d elements from size %d, got %d", + tt.expectedSize, len(tt.deleteIndices), initialSize, l.Size())) + }) + } +} + +func TestListSizeAndTotalSize(t *testing.T) { + t.Run("empty list", func(t *testing.T) { + list := New() + uassert.Equal(t, 0, list.Size()) + uassert.Equal(t, 0, list.TotalSize()) + }) + + t.Run("list with elements", func(t *testing.T) { + list := New() + list.Append(1) + list.Append(2) + list.Append(3) + uassert.Equal(t, 3, list.Size()) + uassert.Equal(t, 3, list.TotalSize()) + }) + + t.Run("list with deleted elements", func(t *testing.T) { + list := New() + list.Append(1) + list.Append(2) + list.Append(3) + list.Delete(1) + uassert.Equal(t, 2, list.Size()) + uassert.Equal(t, 3, list.TotalSize()) + }) +} + +func TestIterator(t *testing.T) { + tests := []struct { + name string + values []interface{} + start int + end int + expected []Entry + wantStop bool + stopAfter int // stop after N elements, -1 for no stop + }{ + { + name: "empty list", + values: []interface{}{}, + start: 0, + end: 10, + expected: []Entry{}, + stopAfter: -1, + }, + { + name: "nil list", + values: nil, + start: 0, + end: 0, + expected: []Entry{}, + stopAfter: -1, + }, + { + name: "single element forward", + values: []interface{}{42}, + start: 0, + end: 0, + expected: []Entry{ + {Index: 0, Value: 42}, + }, + stopAfter: -1, + }, + { + name: "multiple elements forward", + values: []interface{}{1, 2, 3, 4, 5}, + start: 0, + end: 4, + expected: []Entry{ + {Index: 0, Value: 1}, + {Index: 1, Value: 2}, + {Index: 2, Value: 3}, + {Index: 3, Value: 4}, + {Index: 4, Value: 5}, + }, + stopAfter: -1, + }, + { + name: "multiple elements reverse", + values: []interface{}{1, 2, 3, 4, 5}, + start: 4, + end: 0, + expected: []Entry{ + {Index: 4, Value: 5}, + {Index: 3, Value: 4}, + {Index: 2, Value: 3}, + {Index: 1, Value: 2}, + {Index: 0, Value: 1}, + }, + stopAfter: -1, + }, + { + name: "partial range forward", + values: []interface{}{1, 2, 3, 4, 5}, + start: 1, + end: 3, + expected: []Entry{ + {Index: 1, Value: 2}, + {Index: 2, Value: 3}, + {Index: 3, Value: 4}, + }, + stopAfter: -1, + }, + { + name: "partial range reverse", + values: []interface{}{1, 2, 3, 4, 5}, + start: 3, + end: 1, + expected: []Entry{ + {Index: 3, Value: 4}, + {Index: 2, Value: 3}, + {Index: 1, Value: 2}, + }, + stopAfter: -1, + }, + { + name: "stop iteration early", + values: []interface{}{1, 2, 3, 4, 5}, + start: 0, + end: 4, + wantStop: true, + stopAfter: 2, + expected: []Entry{ + {Index: 0, Value: 1}, + {Index: 1, Value: 2}, + }, + }, + { + name: "negative start", + values: []interface{}{1, 2, 3}, + start: -1, + end: 2, + expected: []Entry{ + {Index: 0, Value: 1}, + {Index: 1, Value: 2}, + {Index: 2, Value: 3}, + }, + stopAfter: -1, + }, + { + name: "negative end", + values: []interface{}{1, 2, 3}, + start: 0, + end: -2, + expected: []Entry{ + {Index: 0, Value: 1}, + }, + stopAfter: -1, + }, + { + name: "start beyond size", + values: []interface{}{1, 2, 3}, + start: 5, + end: 6, + expected: []Entry{}, + stopAfter: -1, + }, + { + name: "end beyond size", + values: []interface{}{1, 2, 3}, + start: 0, + end: 5, + expected: []Entry{ + {Index: 0, Value: 1}, + {Index: 1, Value: 2}, + {Index: 2, Value: 3}, + }, + stopAfter: -1, + }, + { + name: "with deleted elements", + values: []interface{}{1, 2, nil, 4, 5}, + start: 0, + end: 4, + expected: []Entry{ + {Index: 0, Value: 1}, + {Index: 1, Value: 2}, + {Index: 3, Value: 4}, + {Index: 4, Value: 5}, + }, + stopAfter: -1, + }, + { + name: "with deleted elements reverse", + values: []interface{}{1, nil, 3, nil, 5}, + start: 4, + end: 0, + expected: []Entry{ + {Index: 4, Value: 5}, + {Index: 2, Value: 3}, + {Index: 0, Value: 1}, + }, + stopAfter: -1, + }, + { + name: "start equals end", + values: []interface{}{1, 2, 3}, + start: 1, + end: 1, + expected: []Entry{{Index: 1, Value: 2}}, + stopAfter: -1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + list := New() + list.Append(tt.values...) + + var result []Entry + stopped := list.Iterator(tt.start, tt.end, func(index int, value interface{}) bool { + result = append(result, Entry{Index: index, Value: value}) + return tt.stopAfter >= 0 && len(result) >= tt.stopAfter + }) + + uassert.Equal(t, len(result), len(tt.expected), "comparing length") + + for i := range result { + uassert.Equal(t, result[i].Index, tt.expected[i].Index, "comparing index") + uassert.Equal(t, typeutil.ToString(result[i].Value), typeutil.ToString(tt.expected[i].Value), "comparing value") + } + + uassert.Equal(t, stopped, tt.wantStop, "comparing stopped") + }) + } +} + +func TestLargeListAppendGetAndDelete(t *testing.T) { + l := New() + size := 100 + + // Append values from 0 to 99 + for i := 0; i < size; i++ { + l.Append(i) + val := l.Get(i) + uassert.Equal(t, i, val) + } + + // Verify size + uassert.Equal(t, size, l.Size()) + uassert.Equal(t, size, l.TotalSize()) + + // Get and verify each value + for i := 0; i < size; i++ { + val := l.Get(i) + uassert.Equal(t, i, val) + } + + // Get and verify each value + for i := 0; i < size; i++ { + err := l.Delete(i) + uassert.Equal(t, nil, err) + } + + // Verify size + uassert.Equal(t, 0, l.Size()) + uassert.Equal(t, size, l.TotalSize()) + + // Get and verify each value + for i := 0; i < size; i++ { + val := l.Get(i) + uassert.Equal(t, nil, val) + } +} + +func TestEdgeCases(t *testing.T) { + tests := []struct { + name string + test func(t *testing.T) + }{ + { + name: "nil list operations", + test: func(t *testing.T) { + var l *List + uassert.Equal(t, 0, l.Size()) + uassert.Equal(t, 0, l.TotalSize()) + uassert.Equal(t, nil, l.Get(0)) + err := l.Delete(0) + uassert.Equal(t, ErrOutOfBounds.Error(), err.Error()) + }, + }, + { + name: "delete empty indices slice", + test: func(t *testing.T) { + l := New() + l.Append(1) + err := l.Delete() + uassert.Equal(t, nil, err) + uassert.Equal(t, 1, l.Size()) + }, + }, + { + name: "append nil values", + test: func(t *testing.T) { + l := New() + l.Append(nil, nil) + uassert.Equal(t, 2, l.Size()) + uassert.Equal(t, nil, l.Get(0)) + uassert.Equal(t, nil, l.Get(1)) + }, + }, + { + name: "delete same index multiple times", + test: func(t *testing.T) { + l := New() + l.Append(1, 2, 3) + err := l.Delete(1) + uassert.Equal(t, nil, err) + err = l.Delete(1) + uassert.Equal(t, ErrDeleted.Error(), err.Error()) + }, + }, + { + name: "iterator with all deleted elements", + test: func(t *testing.T) { + l := New() + l.Append(1, 2, 3) + l.Delete(0, 1, 2) + var count int + l.Iterator(0, 2, func(index int, value interface{}) bool { + count++ + return false + }) + uassert.Equal(t, 0, count) + }, + }, + { + name: "append after delete", + test: func(t *testing.T) { + l := New() + l.Append(1, 2) + l.Delete(1) + l.Append(3) + uassert.Equal(t, 2, l.Size()) + uassert.Equal(t, 3, l.TotalSize()) + uassert.Equal(t, 1, l.Get(0)) + uassert.Equal(t, nil, l.Get(1)) + uassert.Equal(t, 3, l.Get(2)) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.test(t) + }) + } +} + +func TestIteratorByOffset(t *testing.T) { + tests := []struct { + name string + values []interface{} + offset int + count int + expected []Entry + wantStop bool + }{ + { + name: "empty list", + values: []interface{}{}, + offset: 0, + count: 5, + expected: []Entry{}, + wantStop: false, + }, + { + name: "positive count forward iteration", + values: []interface{}{1, 2, 3, 4, 5}, + offset: 1, + count: 2, + expected: []Entry{ + {Index: 1, Value: 2}, + {Index: 2, Value: 3}, + }, + wantStop: false, + }, + { + name: "negative count backward iteration", + values: []interface{}{1, 2, 3, 4, 5}, + offset: 3, + count: -2, + expected: []Entry{ + {Index: 3, Value: 4}, + {Index: 2, Value: 3}, + }, + wantStop: false, + }, + { + name: "count exceeds available elements forward", + values: []interface{}{1, 2, 3}, + offset: 1, + count: 5, + expected: []Entry{ + {Index: 1, Value: 2}, + {Index: 2, Value: 3}, + }, + wantStop: false, + }, + { + name: "count exceeds available elements backward", + values: []interface{}{1, 2, 3}, + offset: 1, + count: -5, + expected: []Entry{ + {Index: 1, Value: 2}, + {Index: 0, Value: 1}, + }, + wantStop: false, + }, + { + name: "zero count", + values: []interface{}{1, 2, 3}, + offset: 0, + count: 0, + expected: []Entry{}, + wantStop: false, + }, + { + name: "negative offset", + values: []interface{}{1, 2, 3}, + offset: -1, + count: 2, + expected: []Entry{ + {Index: 0, Value: 1}, + {Index: 1, Value: 2}, + }, + wantStop: false, + }, + { + name: "offset beyond size", + values: []interface{}{1, 2, 3}, + offset: 5, + count: -2, + expected: []Entry{ + {Index: 2, Value: 3}, + {Index: 1, Value: 2}, + }, + wantStop: false, + }, + { + name: "with deleted elements", + values: []interface{}{1, nil, 3, nil, 5}, + offset: 0, + count: 3, + expected: []Entry{ + {Index: 0, Value: 1}, + {Index: 2, Value: 3}, + {Index: 4, Value: 5}, + }, + wantStop: false, + }, + { + name: "early stop in forward iteration", + values: []interface{}{1, 2, 3, 4, 5}, + offset: 0, + count: 5, + expected: []Entry{ + {Index: 0, Value: 1}, + {Index: 1, Value: 2}, + }, + wantStop: true, // The callback will return true after 2 elements + }, + { + name: "early stop in backward iteration", + values: []interface{}{1, 2, 3, 4, 5}, + offset: 4, + count: -5, + expected: []Entry{ + {Index: 4, Value: 5}, + {Index: 3, Value: 4}, + }, + wantStop: true, // The callback will return true after 2 elements + }, + { + name: "nil list", + values: nil, + offset: 0, + count: 5, + expected: []Entry{}, + wantStop: false, + }, + { + name: "single element forward", + values: []interface{}{1}, + offset: 0, + count: 5, + expected: []Entry{ + {Index: 0, Value: 1}, + }, + wantStop: false, + }, + { + name: "single element backward", + values: []interface{}{1}, + offset: 0, + count: -5, + expected: []Entry{ + {Index: 0, Value: 1}, + }, + wantStop: false, + }, + { + name: "all deleted elements", + values: []interface{}{nil, nil, nil}, + offset: 0, + count: 3, + expected: []Entry{}, + wantStop: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + list := New() + list.Append(tt.values...) + + var result []Entry + var cb IterCbFn + if tt.wantStop { + cb = func(index int, value interface{}) bool { + result = append(result, Entry{Index: index, Value: value}) + return len(result) >= 2 // Stop after 2 elements for early stop tests + } + } else { + cb = func(index int, value interface{}) bool { + result = append(result, Entry{Index: index, Value: value}) + return false + } + } + + stopped := list.IteratorByOffset(tt.offset, tt.count, cb) + + uassert.Equal(t, len(tt.expected), len(result), "comparing length") + for i := range result { + uassert.Equal(t, tt.expected[i].Index, result[i].Index, "comparing index") + uassert.Equal(t, typeutil.ToString(tt.expected[i].Value), typeutil.ToString(result[i].Value), "comparing value") + } + uassert.Equal(t, tt.wantStop, stopped, "comparing stopped") + }) + } +} + +func TestMustDelete(t *testing.T) { + tests := []struct { + name string + setup func() *List + indices []int + shouldPanic bool + panicMsg string + }{ + { + name: "successful delete", + setup: func() *List { + l := New() + l.Append(1, 2, 3) + return l + }, + indices: []int{1}, + shouldPanic: false, + }, + { + name: "out of bounds", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + indices: []int{1}, + shouldPanic: true, + panicMsg: ErrOutOfBounds.Error(), + }, + { + name: "already deleted", + setup: func() *List { + l := New() + l.Append(1) + l.Delete(0) + return l + }, + indices: []int{0}, + shouldPanic: true, + panicMsg: ErrDeleted.Error(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := tt.setup() + if tt.shouldPanic { + defer func() { + r := recover() + if r == nil { + t.Error("Expected panic but got none") + } + err, ok := r.(error) + if !ok { + t.Errorf("Expected error but got %v", r) + } + uassert.Equal(t, tt.panicMsg, err.Error()) + }() + } + l.MustDelete(tt.indices...) + if tt.shouldPanic { + t.Error("Expected panic") + } + }) + } +} + +func TestMustGet(t *testing.T) { + tests := []struct { + name string + setup func() *List + index int + expected interface{} + shouldPanic bool + panicMsg string + }{ + { + name: "successful get", + setup: func() *List { + l := New() + l.Append(42) + return l + }, + index: 0, + expected: 42, + shouldPanic: false, + }, + { + name: "out of bounds negative", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + index: -1, + shouldPanic: true, + panicMsg: ErrOutOfBounds.Error(), + }, + { + name: "out of bounds positive", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + index: 1, + shouldPanic: true, + panicMsg: ErrOutOfBounds.Error(), + }, + { + name: "deleted element", + setup: func() *List { + l := New() + l.Append(1) + l.Delete(0) + return l + }, + index: 0, + shouldPanic: true, + panicMsg: ErrDeleted.Error(), + }, + { + name: "nil list", + setup: func() *List { + return nil + }, + index: 0, + shouldPanic: true, + panicMsg: ErrOutOfBounds.Error(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := tt.setup() + if tt.shouldPanic { + defer func() { + r := recover() + if r == nil { + t.Error("Expected panic but got none") + } + err, ok := r.(error) + if !ok { + t.Errorf("Expected error but got %v", r) + } + uassert.Equal(t, tt.panicMsg, err.Error()) + }() + } + result := l.MustGet(tt.index) + if tt.shouldPanic { + t.Error("Expected panic") + } + uassert.Equal(t, typeutil.ToString(tt.expected), typeutil.ToString(result)) + }) + } +} + +func TestGetRange(t *testing.T) { + tests := []struct { + name string + values []interface{} + start int + end int + expected []Entry + }{ + { + name: "empty list", + values: []interface{}{}, + start: 0, + end: 10, + expected: []Entry{}, + }, + { + name: "single element", + values: []interface{}{42}, + start: 0, + end: 0, + expected: []Entry{ + {Index: 0, Value: 42}, + }, + }, + { + name: "multiple elements forward", + values: []interface{}{1, 2, 3, 4, 5}, + start: 1, + end: 3, + expected: []Entry{ + {Index: 1, Value: 2}, + {Index: 2, Value: 3}, + {Index: 3, Value: 4}, + }, + }, + { + name: "multiple elements reverse", + values: []interface{}{1, 2, 3, 4, 5}, + start: 3, + end: 1, + expected: []Entry{ + {Index: 3, Value: 4}, + {Index: 2, Value: 3}, + {Index: 1, Value: 2}, + }, + }, + { + name: "with deleted elements", + values: []interface{}{1, nil, 3, nil, 5}, + start: 0, + end: 4, + expected: []Entry{ + {Index: 0, Value: 1}, + {Index: 2, Value: 3}, + {Index: 4, Value: 5}, + }, + }, + { + name: "nil list", + values: nil, + start: 0, + end: 5, + expected: []Entry{}, + }, + { + name: "negative indices", + values: []interface{}{1, 2, 3}, + start: -1, + end: -2, + expected: []Entry{}, + }, + { + name: "indices beyond size", + values: []interface{}{1, 2, 3}, + start: 1, + end: 5, + expected: []Entry{ + {Index: 1, Value: 2}, + {Index: 2, Value: 3}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + list := New() + list.Append(tt.values...) + + result := list.GetRange(tt.start, tt.end) + + uassert.Equal(t, len(tt.expected), len(result), "comparing length") + for i := range result { + uassert.Equal(t, tt.expected[i].Index, result[i].Index, "comparing index") + uassert.Equal(t, typeutil.ToString(tt.expected[i].Value), typeutil.ToString(result[i].Value), "comparing value") + } + }) + } +} + +func TestGetByOffset(t *testing.T) { + tests := []struct { + name string + values []interface{} + offset int + count int + expected []Entry + }{ + { + name: "empty list", + values: []interface{}{}, + offset: 0, + count: 5, + expected: []Entry{}, + }, + { + name: "positive count forward", + values: []interface{}{1, 2, 3, 4, 5}, + offset: 1, + count: 2, + expected: []Entry{ + {Index: 1, Value: 2}, + {Index: 2, Value: 3}, + }, + }, + { + name: "negative count backward", + values: []interface{}{1, 2, 3, 4, 5}, + offset: 3, + count: -2, + expected: []Entry{ + {Index: 3, Value: 4}, + {Index: 2, Value: 3}, + }, + }, + { + name: "count exceeds available elements", + values: []interface{}{1, 2, 3}, + offset: 1, + count: 5, + expected: []Entry{ + {Index: 1, Value: 2}, + {Index: 2, Value: 3}, + }, + }, + { + name: "zero count", + values: []interface{}{1, 2, 3}, + offset: 0, + count: 0, + expected: []Entry{}, + }, + { + name: "with deleted elements", + values: []interface{}{1, nil, 3, nil, 5}, + offset: 0, + count: 3, + expected: []Entry{ + {Index: 0, Value: 1}, + {Index: 2, Value: 3}, + {Index: 4, Value: 5}, + }, + }, + { + name: "negative offset", + values: []interface{}{1, 2, 3}, + offset: -1, + count: 2, + expected: []Entry{ + {Index: 0, Value: 1}, + {Index: 1, Value: 2}, + }, + }, + { + name: "offset beyond size", + values: []interface{}{1, 2, 3}, + offset: 5, + count: -2, + expected: []Entry{ + {Index: 2, Value: 3}, + {Index: 1, Value: 2}, + }, + }, + { + name: "nil list", + values: nil, + offset: 0, + count: 5, + expected: []Entry{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + list := New() + list.Append(tt.values...) + + result := list.GetByOffset(tt.offset, tt.count) + + uassert.Equal(t, len(tt.expected), len(result), "comparing length") + for i := range result { + uassert.Equal(t, tt.expected[i].Index, result[i].Index, "comparing index") + uassert.Equal(t, typeutil.ToString(tt.expected[i].Value), typeutil.ToString(result[i].Value), "comparing value") + } + }) + } +} + +func TestMustSet(t *testing.T) { + tests := []struct { + name string + setup func() *List + index int + value interface{} + shouldPanic bool + panicMsg string + }{ + { + name: "successful set", + setup: func() *List { + l := New() + l.Append(42) + return l + }, + index: 0, + value: 99, + shouldPanic: false, + }, + { + name: "restore deleted element", + setup: func() *List { + l := New() + l.Append(42) + l.Delete(0) + return l + }, + index: 0, + value: 99, + shouldPanic: false, + }, + { + name: "out of bounds negative", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + index: -1, + value: 99, + shouldPanic: true, + panicMsg: ErrOutOfBounds.Error(), + }, + { + name: "out of bounds positive", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + index: 1, + value: 99, + shouldPanic: true, + panicMsg: ErrOutOfBounds.Error(), + }, + { + name: "nil list", + setup: func() *List { + return nil + }, + index: 0, + value: 99, + shouldPanic: true, + panicMsg: ErrOutOfBounds.Error(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := tt.setup() + if tt.shouldPanic { + defer func() { + r := recover() + if r == nil { + t.Error("Expected panic but got none") + } + err, ok := r.(error) + if !ok { + t.Errorf("Expected error but got %v", r) + } + uassert.Equal(t, tt.panicMsg, err.Error()) + }() + } + l.MustSet(tt.index, tt.value) + if tt.shouldPanic { + t.Error("Expected panic") + } + // Verify the value was set correctly for non-panic cases + if !tt.shouldPanic { + result := l.Get(tt.index) + uassert.Equal(t, typeutil.ToString(tt.value), typeutil.ToString(result)) + } + }) + } +} + +func TestSet(t *testing.T) { + tests := []struct { + name string + setup func() *List + index int + value interface{} + expectedErr error + verify func(t *testing.T, l *List) + }{ + { + name: "set value in empty list", + setup: func() *List { + return New() + }, + index: 0, + value: 42, + expectedErr: ErrOutOfBounds, + verify: func(t *testing.T, l *List) { + uassert.Equal(t, 0, l.Size()) + }, + }, + { + name: "set value at valid index", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + index: 0, + value: 42, + verify: func(t *testing.T, l *List) { + uassert.Equal(t, 42, l.Get(0)) + uassert.Equal(t, 1, l.Size()) + uassert.Equal(t, 1, l.TotalSize()) + }, + }, + { + name: "set value at negative index", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + index: -1, + value: 42, + expectedErr: ErrOutOfBounds, + verify: func(t *testing.T, l *List) { + uassert.Equal(t, 1, l.Get(0)) + }, + }, + { + name: "set value beyond size", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + index: 1, + value: 42, + expectedErr: ErrOutOfBounds, + verify: func(t *testing.T, l *List) { + uassert.Equal(t, 1, l.Get(0)) + uassert.Equal(t, 1, l.Size()) + }, + }, + { + name: "set nil value", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + index: 0, + value: nil, + verify: func(t *testing.T, l *List) { + uassert.Equal(t, nil, l.Get(0)) + uassert.Equal(t, 0, l.Size()) + }, + }, + { + name: "set value at deleted index", + setup: func() *List { + l := New() + l.Append(1, 2, 3) + l.Delete(1) + return l + }, + index: 1, + value: 42, + verify: func(t *testing.T, l *List) { + uassert.Equal(t, 42, l.Get(1)) + uassert.Equal(t, 3, l.Size()) + uassert.Equal(t, 3, l.TotalSize()) + }, + }, + { + name: "set value in nil list", + setup: func() *List { + return nil + }, + index: 0, + value: 42, + expectedErr: ErrOutOfBounds, + verify: func(t *testing.T, l *List) { + uassert.Equal(t, 0, l.Size()) + }, + }, + { + name: "set multiple values at same index", + setup: func() *List { + l := New() + l.Append(1) + return l + }, + index: 0, + value: 42, + verify: func(t *testing.T, l *List) { + uassert.Equal(t, 42, l.Get(0)) + err := l.Set(0, 99) + uassert.Equal(t, nil, err) + uassert.Equal(t, 99, l.Get(0)) + uassert.Equal(t, 1, l.Size()) + }, + }, + { + name: "set value at last index", + setup: func() *List { + l := New() + l.Append(1, 2, 3) + return l + }, + index: 2, + value: 42, + verify: func(t *testing.T, l *List) { + uassert.Equal(t, 42, l.Get(2)) + uassert.Equal(t, 3, l.Size()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := tt.setup() + err := l.Set(tt.index, tt.value) + + if tt.expectedErr != nil { + uassert.Equal(t, tt.expectedErr.Error(), err.Error()) + } else { + uassert.Equal(t, nil, err) + } + + tt.verify(t, l) + }) + } +} diff --git a/examples/gno.land/p/n2p5/mgroup/mgroup_test.gno b/examples/gno.land/p/n2p5/mgroup/mgroup_test.gno index 7ef0619188f..cd02db98683 100644 --- a/examples/gno.land/p/n2p5/mgroup/mgroup_test.gno +++ b/examples/gno.land/p/n2p5/mgroup/mgroup_test.gno @@ -297,13 +297,13 @@ func TestManagedGroup(t *testing.T) { if len(owners) != 3 { t.Errorf("expected 2, got %v", len(owners)) } - if owners[0] != u2.String() { + if owners[0] != u1.String() { t.Errorf("expected %v, got %v", u2, owners[0]) } if owners[1] != u3.String() { t.Errorf("expected %v, got %v", u3, owners[1]) } - if owners[2] != u1.String() { + if owners[2] != u2.String() { t.Errorf("expected %v, got %v", u3, owners[1]) } }) @@ -317,13 +317,13 @@ func TestManagedGroup(t *testing.T) { if len(members) != 3 { t.Errorf("expected 2, got %v", len(members)) } - if members[0] != u2.String() { + if members[0] != u1.String() { t.Errorf("expected %v, got %v", u2, members[0]) } if members[1] != u3.String() { t.Errorf("expected %v, got %v", u3, members[1]) } - if members[2] != u1.String() { + if members[2] != u2.String() { t.Errorf("expected %v, got %v", u3, members[1]) } }) diff --git a/examples/gno.land/r/demo/bar20/bar20.gno b/examples/gno.land/r/demo/bar20/bar20.gno index 25636fcda78..52f1baa7408 100644 --- a/examples/gno.land/r/demo/bar20/bar20.gno +++ b/examples/gno.land/r/demo/bar20/bar20.gno @@ -18,8 +18,7 @@ var ( ) func init() { - getter := func() *grc20.Token { return Token } - grc20reg.Register(getter, "") + grc20reg.Register(Token.Getter(), "") } func Faucet() string { diff --git a/examples/gno.land/r/demo/foo20/foo20.gno b/examples/gno.land/r/demo/foo20/foo20.gno index 5c7d7f12b99..6522fbdc90e 100644 --- a/examples/gno.land/r/demo/foo20/foo20.gno +++ b/examples/gno.land/r/demo/foo20/foo20.gno @@ -22,8 +22,7 @@ var ( func init() { privateLedger.Mint(Ownable.Owner(), 1_000_000*10_000) // @privateLedgeristrator (1M) - getter := func() *grc20.Token { return Token } - grc20reg.Register(getter, "") + grc20reg.Register(Token.Getter(), "") } func TotalSupply() uint64 { diff --git a/examples/gno.land/r/demo/grc20factory/grc20factory.gno b/examples/gno.land/r/demo/grc20factory/grc20factory.gno index 58874409d7f..aa91084ab32 100644 --- a/examples/gno.land/r/demo/grc20factory/grc20factory.gno +++ b/examples/gno.land/r/demo/grc20factory/grc20factory.gno @@ -43,8 +43,7 @@ func NewWithAdmin(name, symbol string, decimals uint, initialMint, faucet uint64 faucet: faucet, } instances.Set(symbol, &inst) - getter := func() *grc20.Token { return token } - grc20reg.Register(getter, symbol) + grc20reg.Register(token.Getter(), symbol) } func (inst instance) Token() *grc20.Token { diff --git a/examples/gno.land/r/demo/tests/test20/gno.mod b/examples/gno.land/r/demo/tests/test20/gno.mod new file mode 100644 index 00000000000..7a71668d2df --- /dev/null +++ b/examples/gno.land/r/demo/tests/test20/gno.mod @@ -0,0 +1 @@ +module gno.land/r/demo/tests/test20 diff --git a/examples/gno.land/r/demo/tests/test20/test20.gno b/examples/gno.land/r/demo/tests/test20/test20.gno new file mode 100644 index 00000000000..9c4df58d1c4 --- /dev/null +++ b/examples/gno.land/r/demo/tests/test20/test20.gno @@ -0,0 +1,20 @@ +// Package test20 implements a deliberately insecure ERC20 token for testing purposes. +// The Test20 token allows anyone to mint any amount of tokens to any address, making +// it unsuitable for production use. The primary goal of this package is to facilitate +// testing and experimentation without any security measures or restrictions. +// +// WARNING: This token is highly insecure and should not be used in any +// production environment. It is intended solely for testing and +// educational purposes. +package test20 + +import ( + "gno.land/p/demo/grc/grc20" + "gno.land/r/demo/grc20reg" +) + +var Token, PrivateLedger = grc20.NewToken("Test20", "TST", 4) + +func init() { + grc20reg.Register(Token.Getter(), "") +} diff --git a/examples/gno.land/r/demo/wugnot/wugnot.gno b/examples/gno.land/r/demo/wugnot/wugnot.gno index 09538b860ca..b72f5161e7d 100644 --- a/examples/gno.land/r/demo/wugnot/wugnot.gno +++ b/examples/gno.land/r/demo/wugnot/wugnot.gno @@ -19,8 +19,7 @@ const ( ) func init() { - getter := func() *grc20.Token { return Token } - grc20reg.Register(getter, "") + grc20reg.Register(Token.Getter(), "") } func Deposit() { diff --git a/examples/gno.land/r/nemanya/config/config.gno b/examples/gno.land/r/nemanya/config/config.gno new file mode 100644 index 00000000000..795e48c94c1 --- /dev/null +++ b/examples/gno.land/r/nemanya/config/config.gno @@ -0,0 +1,63 @@ +package config + +import ( + "errors" + "std" +) + +var ( + main std.Address + backup std.Address + + ErrInvalidAddr = errors.New("Invalid address") + ErrUnauthorized = errors.New("Unauthorized") +) + +func init() { + main = "g1x9qyf6f34v2g52k4q5smn5tctmj3hl2kj7l2ql" +} + +func Address() std.Address { + return main +} + +func Backup() std.Address { + return backup +} + +func SetAddress(a std.Address) error { + if !a.IsValid() { + return ErrInvalidAddr + } + + if err := checkAuthorized(); err != nil { + return err + } + + main = a + return nil +} + +func SetBackup(a std.Address) error { + if !a.IsValid() { + return ErrInvalidAddr + } + + if err := checkAuthorized(); err != nil { + return err + } + + backup = a + return nil +} + +func checkAuthorized() error { + caller := std.PrevRealm().Addr() + isAuthorized := caller == main || caller == backup + + if !isAuthorized { + return ErrUnauthorized + } + + return nil +} diff --git a/examples/gno.land/r/nemanya/config/gno.mod b/examples/gno.land/r/nemanya/config/gno.mod new file mode 100644 index 00000000000..4388b5bd525 --- /dev/null +++ b/examples/gno.land/r/nemanya/config/gno.mod @@ -0,0 +1 @@ +module gno.land/r/nemanya/config diff --git a/examples/gno.land/r/nemanya/home/gno.mod b/examples/gno.land/r/nemanya/home/gno.mod new file mode 100644 index 00000000000..d0220197489 --- /dev/null +++ b/examples/gno.land/r/nemanya/home/gno.mod @@ -0,0 +1 @@ +module gno.land/r/nemanya/home diff --git a/examples/gno.land/r/nemanya/home/home.gno b/examples/gno.land/r/nemanya/home/home.gno new file mode 100644 index 00000000000..08e24baecfd --- /dev/null +++ b/examples/gno.land/r/nemanya/home/home.gno @@ -0,0 +1,280 @@ +package home + +import ( + "std" + "strings" + + "gno.land/p/demo/ufmt" + "gno.land/r/nemanya/config" +) + +type SocialLink struct { + URL string + Text string +} + +type Sponsor struct { + Address std.Address + Amount std.Coins +} + +type Project struct { + Name string + Description string + URL string + ImageURL string + Sponsors map[std.Address]Sponsor +} + +var ( + textArt string + aboutMe string + sponsorInfo string + socialLinks map[string]SocialLink + gnoProjects map[string]Project + otherProjects map[string]Project + totalDonations std.Coins +) + +func init() { + textArt = renderTextArt() + aboutMe = "I am a student of IT at Faculty of Sciences in Novi Sad, Serbia. My background is mainly in web and low-level programming, but since Web3 Bootcamp at Petnica this year I've been actively learning about blockchain and adjacent technologies. I am excited about contributing to the gno.land ecosystem and learning from the community.\n\n" + sponsorInfo = "You can sponsor a project by sending GNOT to this address. Your sponsorship will be displayed on the project page. Thank you for supporting the development of gno.land!\n\n" + + socialLinks = map[string]SocialLink{ + "GitHub": {URL: "https://github.com/Nemanya8", Text: "Explore my repositories and open-source contributions."}, + "LinkedIn": {URL: "https://www.linkedin.com/in/nemanjamatic/", Text: "Connect with me professionally."}, + "Email Me": {URL: "mailto:matic.nemanya@gmail.com", Text: "Reach out for collaboration or inquiries."}, + } + + gnoProjects = make(map[string]Project) + otherProjects = make(map[string]Project) + + gnoProjects["Liberty Bridge"] = Project{ + Name: "Liberty Bridge", + Description: "Liberty Bridge was my first Web3 project, developed as part of the Web3 Bootcamp at Petnica. This project served as a centralized bridge between Ethereum and gno.land, enabling seamless asset transfers and fostering interoperability between the two ecosystems.\n\n The primary objective of Liberty Bridge was to address the challenges of connecting decentralized networks by implementing a user-friendly solution that simplified the process for users. The project incorporated mechanisms to securely transfer assets between the Ethereum and gno.land blockchains, ensuring efficiency and reliability while maintaining a centralized framework for governance and operations.\n\n Through this project, I gained hands-on knowledge of blockchain interoperability, Web3 protocols, and the intricacies of building solutions that bridge different blockchain ecosystems.\n\n", + URL: "https://gno.land", + ImageURL: "https://github.com/Milosevic02/LibertyBridge/raw/main/lb_banner.png", + Sponsors: make(map[std.Address]Sponsor), + } + + otherProjects["Incognito"] = Project{ + Name: "Incognito", + Description: "Incognito is a Web3 platform built for Ethereum-based chains, designed to connect advertisers with users in a privacy-first and mutually beneficial way. Its modular architecture makes it easily expandable to other blockchains. Developed during the ETH Sofia Hackathon, it was recognized as a winning project for its innovation and impact.\n\n The platform allows advertisers to send personalized ads while sharing a portion of the marketing budget with users. It uses machine learning to match users based on wallet activity, ensuring precise targeting. User emails are stored securely on-chain and never shared, prioritizing privacy and transparency.\n\n With all campaign data stored on-chain, Incognito ensures decentralization and accountability. By rewarding users and empowering advertisers, it sets a new standard for fair and transparent blockchain-based advertising.", + URL: "https://github.com/Milosevic02/Incognito-ETHSofia", + ImageURL: "", + Sponsors: make(map[std.Address]Sponsor), + } +} + +func Render(path string) string { + var sb strings.Builder + sb.WriteString("# Hi, I'm\n") + sb.WriteString(textArt) + sb.WriteString("---\n") + sb.WriteString("## About me\n") + sb.WriteString(aboutMe) + sb.WriteString(sponsorInfo) + sb.WriteString(ufmt.Sprintf("# Total Sponsor Donations: %s\n", totalDonations.String())) + sb.WriteString("---\n") + sb.WriteString(renderProjects(gnoProjects, "Gno Projects")) + sb.WriteString("---\n") + sb.WriteString(renderProjects(otherProjects, "Other Projects")) + sb.WriteString("---\n") + sb.WriteString(renderSocialLinks()) + + return sb.String() +} + +func renderTextArt() string { + var sb strings.Builder + sb.WriteString("```\n") + sb.WriteString(" ___ ___ ___ ___ ___ ___ ___ \n") + sb.WriteString(" /\\__\\ /\\ \\ /\\__\\ /\\ \\ /\\__\\ |\\__\\ /\\ \\ \n") + sb.WriteString(" /::| | /::\\ \\ /::| | /::\\ \\ /::| | |:| | /::\\ \\ \n") + sb.WriteString(" /:|:| | /:/\\:\\ \\ /:|:| | /:/\\:\\ \\ /:|:| | |:| | /:/\\:\\ \\ \n") + sb.WriteString(" /:/|:| |__ /::\\~\\:\\ \\ /:/|:|__|__ /::\\~\\:\\ \\ /:/|:| |__ |:|__|__ /::\\~\\:\\ \\ \n") + sb.WriteString(" /:/ |:| /\\__\\ /:/\\:\\ \\:\\__\\ /:/ |::::\\__\\ /:/\\:\\ \\:\\__\\ /:/ |:| /\\__\\ /::::\\__\\ /:/\\:\\ \\:\\__\\\n") + sb.WriteString(" \\/__|:|/:/ / \\:\\~\\:\\ \\/__/ \\/__/~~/:/ / \\/__\\:\\/:/ / \\/__|:|/:/ / /:/~~/~ \\/__\\:\\/:/ / \n") + sb.WriteString(" |:/:/ / \\:\\ \\:\\__\\ /:/ / \\::/ / |:/:/ / /:/ / \\::/ / \n") + sb.WriteString(" |::/ / \\:\\ \\/__/ /:/ / /:/ / |::/ / \\/__/ /:/ / \n") + sb.WriteString(" /:/ / \\:\\__\\ /:/ / /:/ / /:/ / /:/ / \n") + sb.WriteString(" \\/__/ \\/__/ \\/__/ \\/__/ \\/__/ \\/__/ \n") + sb.WriteString("\n```\n") + return sb.String() +} + +func renderSocialLinks() string { + var sb strings.Builder + sb.WriteString("## Links\n\n") + sb.WriteString("You can find me here:\n\n") + sb.WriteString(ufmt.Sprintf("- [GitHub](%s) - %s\n", socialLinks["GitHub"].URL, socialLinks["GitHub"].Text)) + sb.WriteString(ufmt.Sprintf("- [LinkedIn](%s) - %s\n", socialLinks["LinkedIn"].URL, socialLinks["LinkedIn"].Text)) + sb.WriteString(ufmt.Sprintf("- [Email Me](%s) - %s\n", socialLinks["Email Me"].URL, socialLinks["Email Me"].Text)) + sb.WriteString("\n") + return sb.String() +} + +func renderProjects(projectsMap map[string]Project, title string) string { + var sb strings.Builder + sb.WriteString(ufmt.Sprintf("## %s\n\n", title)) + for _, project := range projectsMap { + if project.ImageURL != "" { + sb.WriteString(ufmt.Sprintf("![%s](%s)\n\n", project.Name, project.ImageURL)) + } + sb.WriteString(ufmt.Sprintf("### [%s](%s)\n\n", project.Name, project.URL)) + sb.WriteString(project.Description + "\n\n") + + if len(project.Sponsors) > 0 { + sb.WriteString(ufmt.Sprintf("#### %s Sponsors\n", project.Name)) + for _, sponsor := range project.Sponsors { + sb.WriteString(ufmt.Sprintf("- %s: %s\n", sponsor.Address.String(), sponsor.Amount.String())) + } + sb.WriteString("\n") + } + } + return sb.String() +} + +func UpdateLink(name, newURL string) { + if !isAuthorized(std.PrevRealm().Addr()) { + panic(config.ErrUnauthorized) + } + + if _, exists := socialLinks[name]; !exists { + panic("Link with the given name does not exist") + } + + socialLinks[name] = SocialLink{ + URL: newURL, + Text: socialLinks[name].Text, + } +} + +func UpdateAboutMe(text string) { + if !isAuthorized(std.PrevRealm().Addr()) { + panic(config.ErrUnauthorized) + } + + aboutMe = text +} + +func AddGnoProject(name, description, url, imageURL string) { + if !isAuthorized(std.PrevRealm().Addr()) { + panic(config.ErrUnauthorized) + } + project := Project{ + Name: name, + Description: description, + URL: url, + ImageURL: imageURL, + Sponsors: make(map[std.Address]Sponsor), + } + gnoProjects[name] = project +} + +func DeleteGnoProject(projectName string) { + if !isAuthorized(std.PrevRealm().Addr()) { + panic(config.ErrUnauthorized) + } + + if _, exists := gnoProjects[projectName]; !exists { + panic("Project not found") + } + + delete(gnoProjects, projectName) +} + +func AddOtherProject(name, description, url, imageURL string) { + if !isAuthorized(std.PrevRealm().Addr()) { + panic(config.ErrUnauthorized) + } + project := Project{ + Name: name, + Description: description, + URL: url, + ImageURL: imageURL, + Sponsors: make(map[std.Address]Sponsor), + } + otherProjects[name] = project +} + +func RemoveOtherProject(projectName string) { + if !isAuthorized(std.PrevRealm().Addr()) { + panic(config.ErrUnauthorized) + } + + if _, exists := otherProjects[projectName]; !exists { + panic("Project not found") + } + + delete(otherProjects, projectName) +} + +func isAuthorized(addr std.Address) bool { + return addr == config.Address() || addr == config.Backup() +} + +func SponsorGnoProject(projectName string) { + address := std.GetOrigCaller() + amount := std.GetOrigSend() + + if amount.AmountOf("ugnot") == 0 { + panic("Donation must include GNOT") + } + + project, exists := gnoProjects[projectName] + if !exists { + panic("Gno project not found") + } + + project.Sponsors[address] = Sponsor{ + Address: address, + Amount: project.Sponsors[address].Amount.Add(amount), + } + + totalDonations = totalDonations.Add(amount) + + gnoProjects[projectName] = project +} + +func SponsorOtherProject(projectName string) { + address := std.GetOrigCaller() + amount := std.GetOrigSend() + + if amount.AmountOf("ugnot") == 0 { + panic("Donation must include GNOT") + } + + project, exists := otherProjects[projectName] + if !exists { + panic("Other project not found") + } + + project.Sponsors[address] = Sponsor{ + Address: address, + Amount: project.Sponsors[address].Amount.Add(amount), + } + + totalDonations = totalDonations.Add(amount) + + otherProjects[projectName] = project +} + +func Withdraw() string { + if !isAuthorized(std.PrevRealm().Addr()) { + panic(config.ErrUnauthorized) + } + + banker := std.GetBanker(std.BankerTypeRealmSend) + realmAddress := std.GetOrigPkgAddr() + coins := banker.GetCoins(realmAddress) + + if len(coins) == 0 { + return "No coins available to withdraw" + } + + banker.SendCoins(realmAddress, config.Address(), coins) + + return "Successfully withdrew all coins to config address" +} diff --git a/examples/gno.land/r/x/jeronimo_render_proxy/example.gno b/examples/gno.land/r/x/jeronimo_render_proxy/example.gno new file mode 100644 index 00000000000..7b3da098232 --- /dev/null +++ b/examples/gno.land/r/x/jeronimo_render_proxy/example.gno @@ -0,0 +1,20 @@ +package example + +func Render(string) string { + return `# Render Proxy + +This example shows how proxying render calls can be used to allow updating realms to new +versions while keeping the same realm path. The idea is to have a simple "parent" realm +that only keeps track of the latest realm version and forwards all render calls to it. + +By only focusing on the 'Render()' function the proxy realm keeps its public functions +stable allowing each version to update their public functions and exposed types without +needing to also update the proxy realm. + +Any interaction or transaction must be sent to the latest, or target, realm version while +render calls are sent to the proxy realm. + +Each realm version registers itself on deployment as the latest available version, and +its allowed to do so because each versioned realm path shares the proxy realm path. +` +} diff --git a/examples/gno.land/r/x/jeronimo_render_proxy/gno.mod b/examples/gno.land/r/x/jeronimo_render_proxy/gno.mod new file mode 100644 index 00000000000..9236b28f5ad --- /dev/null +++ b/examples/gno.land/r/x/jeronimo_render_proxy/gno.mod @@ -0,0 +1 @@ +module gno.land/r/x/jeronimo_render_proxy diff --git a/examples/gno.land/r/x/jeronimo_render_proxy/home/home.gno b/examples/gno.land/r/x/jeronimo_render_proxy/home/home.gno new file mode 100644 index 00000000000..c73e99cc583 --- /dev/null +++ b/examples/gno.land/r/x/jeronimo_render_proxy/home/home.gno @@ -0,0 +1,52 @@ +package home + +import ( + "std" + "strings" +) + +// RenderFn defines the type for the render function of realms. +type RenderFn func(string) string + +var current = struct { + realmPath string + renderFn RenderFn +}{} + +// CurrentRealmPath returns the path of the realm that is currently registered. +func CurrentRealmPath() string { + return current.realmPath +} + +// Register registers a render function of a realm. +func Register(fn RenderFn) { + if fn == nil { + panic("render function must not be nil") + } + + proxyPath := std.CurrentRealm().PkgPath() + callerPath := std.PrevRealm().PkgPath() + if !strings.HasPrefix(callerPath, proxyPath+"/") { + panic("caller realm path must start with " + proxyPath) + } + + current.renderFn = fn + current.realmPath = callerPath +} + +// URL returns a URL that links to the proxy realm. +func URL(renderPath string) string { + url := "http://" + std.CurrentRealm().PkgPath() + if renderPath != "" { + url += ":" + renderPath + } + return url +} + +// Render renders the rendered Markdown of the realm that is currently registered. +func Render(path string) string { + if current.renderFn == nil { + panic("no realm has been registered") + } + return current.renderFn(path) +} diff --git a/examples/gno.land/r/x/jeronimo_render_proxy/home/v1/v1.gno b/examples/gno.land/r/x/jeronimo_render_proxy/home/v1/v1.gno new file mode 100644 index 00000000000..8698998577c --- /dev/null +++ b/examples/gno.land/r/x/jeronimo_render_proxy/home/v1/v1.gno @@ -0,0 +1,16 @@ +package v1 + +import "gno.land/r/x/jeronimo_render_proxy/home" + +func init() { + // Register the private render function with the render proxy + home.Register(render) +} + +func render(string) string { + return "Rendered by v1" +} + +func Render(string) string { + return "[Home](" + home.URL("") + ")" +} diff --git a/examples/gno.land/r/x/jeronimo_render_proxy/home/v1/z_filetest.gno b/examples/gno.land/r/x/jeronimo_render_proxy/home/v1/z_filetest.gno new file mode 100644 index 00000000000..cebe2aeb5ba --- /dev/null +++ b/examples/gno.land/r/x/jeronimo_render_proxy/home/v1/z_filetest.gno @@ -0,0 +1,10 @@ +package main + +import "gno.land/r/x/jeronimo_render_proxy/home/v1" + +func main() { + println(v1.Render("")) +} + +// Output: +// [Home](http://gno.land/r/x/jeronimo_render_proxy/home) diff --git a/examples/gno.land/r/x/jeronimo_render_proxy/home/v2/v2.gno b/examples/gno.land/r/x/jeronimo_render_proxy/home/v2/v2.gno new file mode 100644 index 00000000000..031f8568441 --- /dev/null +++ b/examples/gno.land/r/x/jeronimo_render_proxy/home/v2/v2.gno @@ -0,0 +1,16 @@ +package v2 + +import "gno.land/r/x/jeronimo_render_proxy/home" + +func init() { + // Register the private render function with the render proxy + home.Register(render) +} + +func render(string) string { + return "Rendered by v2" +} + +func Render(string) string { + return "[Home](" + home.URL("") + ")" +} diff --git a/examples/gno.land/r/x/jeronimo_render_proxy/home/v2/z_filetest.gno b/examples/gno.land/r/x/jeronimo_render_proxy/home/v2/z_filetest.gno new file mode 100644 index 00000000000..feff15533ee --- /dev/null +++ b/examples/gno.land/r/x/jeronimo_render_proxy/home/v2/z_filetest.gno @@ -0,0 +1,10 @@ +package main + +import "gno.land/r/x/jeronimo_render_proxy/home/v2" + +func main() { + println(v2.Render("")) +} + +// Output: +// [Home](http://gno.land/r/x/jeronimo_render_proxy/home) diff --git a/examples/gno.land/r/x/jeronimo_render_proxy/home/z_a_filetest.gno b/examples/gno.land/r/x/jeronimo_render_proxy/home/z_a_filetest.gno new file mode 100644 index 00000000000..c7d4d7febd2 --- /dev/null +++ b/examples/gno.land/r/x/jeronimo_render_proxy/home/z_a_filetest.gno @@ -0,0 +1,10 @@ +package main + +import "gno.land/r/x/jeronimo_render_proxy/home" + +func main() { + home.Render("") +} + +// Error: +// no realm has been registered diff --git a/examples/gno.land/r/x/jeronimo_render_proxy/home/z_b_filetest.gno b/examples/gno.land/r/x/jeronimo_render_proxy/home/z_b_filetest.gno new file mode 100644 index 00000000000..6ebdace67b4 --- /dev/null +++ b/examples/gno.land/r/x/jeronimo_render_proxy/home/z_b_filetest.gno @@ -0,0 +1,15 @@ +package main + +import ( + "gno.land/r/x/jeronimo_render_proxy/home" + _ "gno.land/r/x/jeronimo_render_proxy/home/v1" +) + +func main() { + println(home.CurrentRealmPath()) + println(home.Render("")) +} + +// Output: +// gno.land/r/x/jeronimo_render_proxy/home/v1 +// Rendered by v1 diff --git a/examples/gno.land/r/x/jeronimo_render_proxy/home/z_c_filetest.gno b/examples/gno.land/r/x/jeronimo_render_proxy/home/z_c_filetest.gno new file mode 100644 index 00000000000..f85b13bc5dd --- /dev/null +++ b/examples/gno.land/r/x/jeronimo_render_proxy/home/z_c_filetest.gno @@ -0,0 +1,16 @@ +package main + +import ( + "gno.land/r/x/jeronimo_render_proxy/home" + _ "gno.land/r/x/jeronimo_render_proxy/home/v1" + _ "gno.land/r/x/jeronimo_render_proxy/home/v2" +) + +func main() { + println(home.CurrentRealmPath()) + println(home.Render("")) +} + +// Output: +// gno.land/r/x/jeronimo_render_proxy/home/v2 +// Rendered by v2 diff --git a/gno.land/pkg/gnoclient/integration_test.go b/gno.land/pkg/gnoclient/integration_test.go index 4b70fb60c49..d3d4e0d2c52 100644 --- a/gno.land/pkg/gnoclient/integration_test.go +++ b/gno.land/pkg/gnoclient/integration_test.go @@ -1,10 +1,12 @@ package gnoclient import ( + "path/filepath" "testing" "github.com/gnolang/gno/gnovm/pkg/gnolang" + "github.com/gnolang/gno/gno.land/pkg/gnoland" "github.com/gnolang/gno/gno.land/pkg/gnoland/ugnot" "github.com/gnolang/gno/gno.land/pkg/integration" "github.com/gnolang/gno/gno.land/pkg/sdk/vm" @@ -21,8 +23,14 @@ import ( ) func TestCallSingle_Integration(t *testing.T) { - // Set up in-memory node - config, _ := integration.TestingNodeConfig(t, gnoenv.RootDir()) + // Setup packages + rootdir := gnoenv.RootDir() + config := integration.TestingMinimalNodeConfig(gnoenv.RootDir()) + meta := loadpkgs(t, rootdir, "gno.land/r/demo/deep/very/deep") + state := config.Genesis.AppState.(gnoland.GnoGenesisState) + state.Txs = append(state.Txs, meta...) + config.Genesis.AppState = state + node, remoteAddr := integration.TestingInMemoryNode(t, log.NewNoopLogger(), config) defer node.Stop() @@ -74,8 +82,14 @@ func TestCallSingle_Integration(t *testing.T) { } func TestCallMultiple_Integration(t *testing.T) { - // Set up in-memory node - config, _ := integration.TestingNodeConfig(t, gnoenv.RootDir()) + // Setup packages + rootdir := gnoenv.RootDir() + config := integration.TestingMinimalNodeConfig(gnoenv.RootDir()) + meta := loadpkgs(t, rootdir, "gno.land/r/demo/deep/very/deep") + state := config.Genesis.AppState.(gnoland.GnoGenesisState) + state.Txs = append(state.Txs, meta...) + config.Genesis.AppState = state + node, remoteAddr := integration.TestingInMemoryNode(t, log.NewNoopLogger(), config) defer node.Stop() @@ -137,7 +151,7 @@ func TestCallMultiple_Integration(t *testing.T) { func TestSendSingle_Integration(t *testing.T) { // Set up in-memory node - config, _ := integration.TestingNodeConfig(t, gnoenv.RootDir()) + config := integration.TestingMinimalNodeConfig(gnoenv.RootDir()) node, remoteAddr := integration.TestingInMemoryNode(t, log.NewNoopLogger(), config) defer node.Stop() @@ -201,7 +215,7 @@ func TestSendSingle_Integration(t *testing.T) { func TestSendMultiple_Integration(t *testing.T) { // Set up in-memory node - config, _ := integration.TestingNodeConfig(t, gnoenv.RootDir()) + config := integration.TestingMinimalNodeConfig(gnoenv.RootDir()) node, remoteAddr := integration.TestingInMemoryNode(t, log.NewNoopLogger(), config) defer node.Stop() @@ -273,8 +287,15 @@ func TestSendMultiple_Integration(t *testing.T) { // Run tests func TestRunSingle_Integration(t *testing.T) { + // Setup packages + rootdir := gnoenv.RootDir() + config := integration.TestingMinimalNodeConfig(gnoenv.RootDir()) + meta := loadpkgs(t, rootdir, "gno.land/p/demo/ufmt", "gno.land/r/demo/tests") + state := config.Genesis.AppState.(gnoland.GnoGenesisState) + state.Txs = append(state.Txs, meta...) + config.Genesis.AppState = state + // Set up in-memory node - config, _ := integration.TestingNodeConfig(t, gnoenv.RootDir()) node, remoteAddr := integration.TestingInMemoryNode(t, log.NewNoopLogger(), config) defer node.Stop() @@ -342,7 +363,17 @@ func main() { // Run tests func TestRunMultiple_Integration(t *testing.T) { // Set up in-memory node - config, _ := integration.TestingNodeConfig(t, gnoenv.RootDir()) + rootdir := gnoenv.RootDir() + config := integration.TestingMinimalNodeConfig(rootdir) + meta := loadpkgs(t, rootdir, + "gno.land/p/demo/ufmt", + "gno.land/r/demo/tests", + "gno.land/r/demo/deep/very/deep", + ) + state := config.Genesis.AppState.(gnoland.GnoGenesisState) + state.Txs = append(state.Txs, meta...) + config.Genesis.AppState = state + node, remoteAddr := integration.TestingInMemoryNode(t, log.NewNoopLogger(), config) defer node.Stop() @@ -434,7 +465,7 @@ func main() { func TestAddPackageSingle_Integration(t *testing.T) { // Set up in-memory node - config, _ := integration.TestingNodeConfig(t, gnoenv.RootDir()) + config := integration.TestingMinimalNodeConfig(gnoenv.RootDir()) node, remoteAddr := integration.TestingInMemoryNode(t, log.NewNoopLogger(), config) defer node.Stop() @@ -519,7 +550,7 @@ func Echo(str string) string { func TestAddPackageMultiple_Integration(t *testing.T) { // Set up in-memory node - config, _ := integration.TestingNodeConfig(t, gnoenv.RootDir()) + config := integration.TestingMinimalNodeConfig(gnoenv.RootDir()) node, remoteAddr := integration.TestingInMemoryNode(t, log.NewNoopLogger(), config) defer node.Stop() @@ -670,3 +701,24 @@ func newInMemorySigner(t *testing.T, chainid string) *SignerFromKeybase { ChainID: chainid, // Chain ID for transaction signing } } + +func loadpkgs(t *testing.T, rootdir string, paths ...string) []gnoland.TxWithMetadata { + t.Helper() + + loader := integration.NewPkgsLoader() + examplesDir := filepath.Join(rootdir, "examples") + for _, path := range paths { + path = filepath.Clean(path) + path = filepath.Join(examplesDir, path) + err := loader.LoadPackage(examplesDir, path, "") + require.NoErrorf(t, err, "`loadpkg` unable to load package(s) from %q: %s", path, err) + } + + creator := crypto.MustAddressFromString(integration.DefaultAccount_Address) + defaultFee := std.NewFee(50000, std.MustParseCoin(ugnot.ValueString(1000000))) + + meta, err := loader.LoadPackages(creator, defaultFee, nil) + require.NoError(t, err) + + return meta +} diff --git a/gno.land/pkg/gnoland/node_inmemory.go b/gno.land/pkg/gnoland/node_inmemory.go index f42166411c8..a89f39b0b4a 100644 --- a/gno.land/pkg/gnoland/node_inmemory.go +++ b/gno.land/pkg/gnoland/node_inmemory.go @@ -23,8 +23,8 @@ type InMemoryNodeConfig struct { PrivValidator bft.PrivValidator // identity of the validator Genesis *bft.GenesisDoc TMConfig *tmcfg.Config - DB *memdb.MemDB // will be initialized if nil - VMOutput io.Writer // optional + DB db.DB // will be initialized if nil + VMOutput io.Writer // optional // If StdlibDir not set, then it's filepath.Join(TMConfig.RootDir, "gnovm", "stdlibs") InitChainerConfig diff --git a/gno.land/pkg/integration/doc.go b/gno.land/pkg/integration/doc.go index ef3ed9923da..3e09d627c9a 100644 --- a/gno.land/pkg/integration/doc.go +++ b/gno.land/pkg/integration/doc.go @@ -76,13 +76,6 @@ // // Input: // -// - LOG_LEVEL: -// The logging level to be used, which can be one of "error", "debug", "info", or an empty string. -// If empty, the log level defaults to "debug". -// -// - LOG_DIR: -// If set, logs will be directed to the specified directory. -// // - TESTWORK: // A boolean that, when enabled, retains working directories after tests for // inspection. If enabled, gnoland logs will be persisted inside this diff --git a/gno.land/pkg/integration/testing_node.go b/gno.land/pkg/integration/node_testing.go similarity index 86% rename from gno.land/pkg/integration/testing_node.go rename to gno.land/pkg/integration/node_testing.go index 7eaf3457b03..fdf94c8c545 100644 --- a/gno.land/pkg/integration/testing_node.go +++ b/gno.land/pkg/integration/node_testing.go @@ -55,7 +55,7 @@ func TestingInMemoryNode(t TestingTS, logger *slog.Logger, config *gnoland.InMem // with default packages and genesis transactions already loaded. // It will return the default creator address of the loaded packages. func TestingNodeConfig(t TestingTS, gnoroot string, additionalTxs ...gnoland.TxWithMetadata) (*gnoland.InMemoryNodeConfig, bft.Address) { - cfg := TestingMinimalNodeConfig(t, gnoroot) + cfg := TestingMinimalNodeConfig(gnoroot) creator := crypto.MustAddressFromString(DefaultAccount_Address) // test1 @@ -65,24 +65,24 @@ func TestingNodeConfig(t TestingTS, gnoroot string, additionalTxs ...gnoland.TxW txs = append(txs, LoadDefaultPackages(t, creator, gnoroot)...) txs = append(txs, additionalTxs...) - ggs := cfg.Genesis.AppState.(gnoland.GnoGenesisState) - ggs.Balances = balances - ggs.Txs = txs - ggs.Params = params - cfg.Genesis.AppState = ggs + cfg.Genesis.AppState = gnoland.GnoGenesisState{ + Balances: balances, + Txs: txs, + Params: params, + } return cfg, creator } // TestingMinimalNodeConfig constructs the default minimal in-memory node configuration for testing. -func TestingMinimalNodeConfig(t TestingTS, gnoroot string) *gnoland.InMemoryNodeConfig { +func TestingMinimalNodeConfig(gnoroot string) *gnoland.InMemoryNodeConfig { tmconfig := DefaultTestingTMConfig(gnoroot) // Create Mocked Identity pv := gnoland.NewMockedPrivValidator() // Generate genesis config - genesis := DefaultTestingGenesisConfig(t, gnoroot, pv.GetPubKey(), tmconfig) + genesis := DefaultTestingGenesisConfig(gnoroot, pv.GetPubKey(), tmconfig) return &gnoland.InMemoryNodeConfig{ PrivValidator: pv, @@ -96,16 +96,7 @@ func TestingMinimalNodeConfig(t TestingTS, gnoroot string) *gnoland.InMemoryNode } } -func DefaultTestingGenesisConfig(t TestingTS, gnoroot string, self crypto.PubKey, tmconfig *tmcfg.Config) *bft.GenesisDoc { - genState := gnoland.DefaultGenState() - genState.Balances = []gnoland.Balance{ - { - Address: crypto.MustAddressFromString(DefaultAccount_Address), - Amount: std.MustParseCoins(ugnot.ValueString(10000000000000)), - }, - } - genState.Txs = []gnoland.TxWithMetadata{} - genState.Params = []gnoland.Param{} +func DefaultTestingGenesisConfig(gnoroot string, self crypto.PubKey, tmconfig *tmcfg.Config) *bft.GenesisDoc { return &bft.GenesisDoc{ GenesisTime: time.Now(), ChainID: tmconfig.ChainID(), @@ -125,7 +116,16 @@ func DefaultTestingGenesisConfig(t TestingTS, gnoroot string, self crypto.PubKey Name: "self", }, }, - AppState: genState, + AppState: gnoland.GnoGenesisState{ + Balances: []gnoland.Balance{ + { + Address: crypto.MustAddressFromString(DefaultAccount_Address), + Amount: std.MustParseCoins(ugnot.ValueString(10_000_000_000_000)), + }, + }, + Txs: []gnoland.TxWithMetadata{}, + Params: []gnoland.Param{}, + }, } } @@ -178,8 +178,9 @@ func DefaultTestingTMConfig(gnoroot string) *tmcfg.Config { tmconfig := tmcfg.TestConfig().SetRootDir(gnoroot) tmconfig.Consensus.WALDisabled = true + tmconfig.Consensus.SkipTimeoutCommit = true tmconfig.Consensus.CreateEmptyBlocks = true - tmconfig.Consensus.CreateEmptyBlocksInterval = time.Duration(0) + tmconfig.Consensus.CreateEmptyBlocksInterval = time.Millisecond * 100 tmconfig.RPC.ListenAddress = defaultListner tmconfig.P2P.ListenAddress = defaultListner return tmconfig diff --git a/gno.land/pkg/integration/pkgloader.go b/gno.land/pkg/integration/pkgloader.go new file mode 100644 index 00000000000..541e24b96eb --- /dev/null +++ b/gno.land/pkg/integration/pkgloader.go @@ -0,0 +1,168 @@ +package integration + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/gnolang/gno/gno.land/pkg/gnoland" + "github.com/gnolang/gno/gno.land/pkg/sdk/vm" + "github.com/gnolang/gno/gnovm/pkg/gnolang" + "github.com/gnolang/gno/gnovm/pkg/gnomod" + "github.com/gnolang/gno/gnovm/pkg/packages" + bft "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/std" +) + +type PkgsLoader struct { + pkgs []gnomod.Pkg + visited map[string]struct{} + + // list of occurrences to patchs with the given value + // XXX: find a better way + patchs map[string]string +} + +func NewPkgsLoader() *PkgsLoader { + return &PkgsLoader{ + pkgs: make([]gnomod.Pkg, 0), + visited: make(map[string]struct{}), + patchs: make(map[string]string), + } +} + +func (pl *PkgsLoader) List() gnomod.PkgList { + return pl.pkgs +} + +func (pl *PkgsLoader) SetPatch(replace, with string) { + pl.patchs[replace] = with +} + +func (pl *PkgsLoader) LoadPackages(creator bft.Address, fee std.Fee, deposit std.Coins) ([]gnoland.TxWithMetadata, error) { + pkgslist, err := pl.List().Sort() // sorts packages by their dependencies. + if err != nil { + return nil, fmt.Errorf("unable to sort packages: %w", err) + } + + txs := make([]gnoland.TxWithMetadata, len(pkgslist)) + for i, pkg := range pkgslist { + tx, err := gnoland.LoadPackage(pkg, creator, fee, deposit) + if err != nil { + return nil, fmt.Errorf("unable to load pkg %q: %w", pkg.Name, err) + } + + // If any replace value is specified, apply them + if len(pl.patchs) > 0 { + for _, msg := range tx.Msgs { + addpkg, ok := msg.(vm.MsgAddPackage) + if !ok { + continue + } + + if addpkg.Package == nil { + continue + } + + for _, file := range addpkg.Package.Files { + for replace, with := range pl.patchs { + file.Body = strings.ReplaceAll(file.Body, replace, with) + } + } + } + } + + txs[i] = gnoland.TxWithMetadata{ + Tx: tx, + } + } + + return txs, nil +} + +func (pl *PkgsLoader) LoadAllPackagesFromDir(path string) error { + // list all packages from target path + pkgslist, err := gnomod.ListPkgs(path) + if err != nil { + return fmt.Errorf("listing gno packages: %w", err) + } + + for _, pkg := range pkgslist { + if !pl.exist(pkg) { + pl.add(pkg) + } + } + + return nil +} + +func (pl *PkgsLoader) LoadPackage(modroot string, path, name string) error { + // Initialize a queue with the root package + queue := []gnomod.Pkg{{Dir: path, Name: name}} + + for len(queue) > 0 { + // Dequeue the first package + currentPkg := queue[0] + queue = queue[1:] + + if currentPkg.Dir == "" { + return fmt.Errorf("no path specified for package") + } + + if currentPkg.Name == "" { + // Load `gno.mod` information + gnoModPath := filepath.Join(currentPkg.Dir, "gno.mod") + gm, err := gnomod.ParseGnoMod(gnoModPath) + if err != nil { + return fmt.Errorf("unable to load %q: %w", gnoModPath, err) + } + gm.Sanitize() + + // Override package info with mod infos + currentPkg.Name = gm.Module.Mod.Path + currentPkg.Draft = gm.Draft + + pkg, err := gnolang.ReadMemPackage(currentPkg.Dir, currentPkg.Name) + if err != nil { + return fmt.Errorf("unable to read package at %q: %w", currentPkg.Dir, err) + } + imports, err := packages.Imports(pkg, nil) + if err != nil { + return fmt.Errorf("unable to load package imports in %q: %w", currentPkg.Dir, err) + } + for _, imp := range imports { + if imp.PkgPath == currentPkg.Name || gnolang.IsStdlib(imp.PkgPath) { + continue + } + currentPkg.Imports = append(currentPkg.Imports, imp.PkgPath) + } + } + + if currentPkg.Draft { + continue // Skip draft package + } + + if pl.exist(currentPkg) { + continue + } + pl.add(currentPkg) + + // Add requirements to the queue + for _, pkgPath := range currentPkg.Imports { + fullPath := filepath.Join(modroot, pkgPath) + queue = append(queue, gnomod.Pkg{Dir: fullPath}) + } + } + + return nil +} + +func (pl *PkgsLoader) add(pkg gnomod.Pkg) { + pl.visited[pkg.Name] = struct{}{} + pl.pkgs = append(pl.pkgs, pkg) +} + +func (pl *PkgsLoader) exist(pkg gnomod.Pkg) (ok bool) { + _, ok = pl.visited[pkg.Name] + return +} diff --git a/gno.land/pkg/integration/process.go b/gno.land/pkg/integration/process.go new file mode 100644 index 00000000000..839004ca1f3 --- /dev/null +++ b/gno.land/pkg/integration/process.go @@ -0,0 +1,451 @@ +package integration + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log/slog" + "os" + "os/exec" + "os/signal" + "slices" + "sync" + "testing" + "time" + + "github.com/gnolang/gno/gno.land/pkg/gnoland" + "github.com/gnolang/gno/tm2/pkg/amino" + tmcfg "github.com/gnolang/gno/tm2/pkg/bft/config" + bft "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/crypto/ed25519" + "github.com/gnolang/gno/tm2/pkg/db" + "github.com/gnolang/gno/tm2/pkg/db/goleveldb" + "github.com/gnolang/gno/tm2/pkg/db/memdb" + "github.com/stretchr/testify/require" +) + +const gracefulShutdown = time.Second * 5 + +type ProcessNodeConfig struct { + ValidatorKey ed25519.PrivKeyEd25519 `json:"priv"` + Verbose bool `json:"verbose"` + DBDir string `json:"dbdir"` + RootDir string `json:"rootdir"` + Genesis *MarshalableGenesisDoc `json:"genesis"` + TMConfig *tmcfg.Config `json:"tm"` +} + +type ProcessConfig struct { + Node *ProcessNodeConfig + + // These parameters are not meant to be passed to the process + CoverDir string + Stderr, Stdout io.Writer +} + +func (i ProcessConfig) validate() error { + if i.Node.TMConfig == nil { + return errors.New("no tm config set") + } + + if i.Node.Genesis == nil { + return errors.New("no genesis is set") + } + + return nil +} + +// RunNode initializes and runs a gnoaland node with the provided configuration. +func RunNode(ctx context.Context, pcfg *ProcessNodeConfig, stdout, stderr io.Writer) error { + // Setup logger based on verbosity + var handler slog.Handler + if pcfg.Verbose { + handler = slog.NewTextHandler(stdout, &slog.HandlerOptions{Level: slog.LevelDebug}) + } else { + handler = slog.NewTextHandler(stdout, &slog.HandlerOptions{Level: slog.LevelError}) + } + logger := slog.New(handler) + + // Initialize database + db, err := initDatabase(pcfg.DBDir) + if err != nil { + return err + } + defer db.Close() // ensure db is close + + nodecfg := TestingMinimalNodeConfig(pcfg.RootDir) + + // Configure validator if provided + if len(pcfg.ValidatorKey) > 0 && !isAllZero(pcfg.ValidatorKey) { + nodecfg.PrivValidator = bft.NewMockPVWithParams(pcfg.ValidatorKey, false, false) + } + pv := nodecfg.PrivValidator.GetPubKey() + + // Setup node configuration + nodecfg.DB = db + nodecfg.TMConfig.DBPath = pcfg.DBDir + nodecfg.TMConfig = pcfg.TMConfig + nodecfg.Genesis = pcfg.Genesis.ToGenesisDoc() + nodecfg.Genesis.Validators = []bft.GenesisValidator{ + { + Address: pv.Address(), + PubKey: pv, + Power: 10, + Name: "self", + }, + } + + // Create and start the node + node, err := gnoland.NewInMemoryNode(logger, nodecfg) + if err != nil { + return fmt.Errorf("failed to create new in-memory node: %w", err) + } + + if err := node.Start(); err != nil { + return fmt.Errorf("failed to start node: %w", err) + } + defer node.Stop() + + // Determine if the node is a validator + ourAddress := nodecfg.PrivValidator.GetPubKey().Address() + isValidator := slices.ContainsFunc(nodecfg.Genesis.Validators, func(val bft.GenesisValidator) bool { + return val.Address == ourAddress + }) + + lisnAddress := node.Config().RPC.ListenAddress + if isValidator { + select { + case <-ctx.Done(): + return fmt.Errorf("waiting for the node to start: %w", ctx.Err()) + case <-node.Ready(): + } + } + + // Write READY signal to stdout + signalWriteReady(stdout, lisnAddress) + + <-ctx.Done() + return node.Stop() +} + +type NodeProcess interface { + Stop() error + Address() string +} + +type nodeProcess struct { + cmd *exec.Cmd + address string + + stopOnce sync.Once + stopErr error +} + +func (n *nodeProcess) Address() string { + return n.address +} + +func (n *nodeProcess) Stop() error { + n.stopOnce.Do(func() { + // Send SIGTERM to the process + if err := n.cmd.Process.Signal(os.Interrupt); err != nil { + n.stopErr = fmt.Errorf("error sending `SIGINT` to the node: %w", err) + return + } + + // Optionally wait for the process to exit + if _, err := n.cmd.Process.Wait(); err != nil { + n.stopErr = fmt.Errorf("process exited with error: %w", err) + return + } + }) + + return n.stopErr +} + +// RunNodeProcess runs the binary at the given path with the provided configuration. +func RunNodeProcess(ctx context.Context, cfg ProcessConfig, name string, args ...string) (NodeProcess, error) { + if cfg.Stdout == nil { + cfg.Stdout = os.Stdout + } + + if cfg.Stderr == nil { + cfg.Stderr = os.Stderr + } + + if err := cfg.validate(); err != nil { + return nil, err + } + + // Marshal the configuration to JSON + nodeConfigData, err := json.Marshal(cfg.Node) + if err != nil { + return nil, fmt.Errorf("failed to marshal config to JSON: %w", err) + } + + // Create and configure the command to execute the binary + cmd := exec.Command(name, args...) + cmd.Env = os.Environ() + cmd.Stdin = bytes.NewReader(nodeConfigData) + + if cfg.CoverDir != "" { + cmd.Env = append(cmd.Env, "GOCOVERDIR="+cfg.CoverDir) + } + + // Redirect all errors into a buffer + cmd.Stderr = os.Stderr + if cfg.Stderr != nil { + cmd.Stderr = cfg.Stderr + } + + // Create pipes for stdout + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stdout pipe: %w", err) + } + + // Start the command + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to start command: %w", err) + } + + address, err := waitForProcessReady(ctx, stdoutPipe, cfg.Stdout) + if err != nil { + return nil, fmt.Errorf("waiting for readiness: %w", err) + } + + return &nodeProcess{ + cmd: cmd, + address: address, + }, nil +} + +type nodeInMemoryProcess struct { + address string + + stopOnce sync.Once + stopErr error + stop context.CancelFunc + ccNodeError chan error +} + +func (n *nodeInMemoryProcess) Address() string { + return n.address +} + +func (n *nodeInMemoryProcess) Stop() error { + n.stopOnce.Do(func() { + n.stop() + var err error + select { + case err = <-n.ccNodeError: + case <-time.After(time.Second * 5): + err = fmt.Errorf("timeout while waiting for node to stop") + } + + if err != nil { + n.stopErr = fmt.Errorf("unable to node gracefully: %w", err) + } + }) + + return n.stopErr +} + +func RunInMemoryProcess(ctx context.Context, cfg ProcessConfig) (NodeProcess, error) { + ctx, cancel := context.WithCancel(ctx) + + out, in := io.Pipe() + ccStopErr := make(chan error, 1) + go func() { + defer close(ccStopErr) + defer cancel() + + err := RunNode(ctx, cfg.Node, in, cfg.Stderr) + if err != nil { + fmt.Fprintf(cfg.Stderr, "run node failed: %v", err) + } + + ccStopErr <- err + }() + + address, err := waitForProcessReady(ctx, out, cfg.Stdout) + if err == nil { // ok + return &nodeInMemoryProcess{ + address: address, + stop: cancel, + ccNodeError: ccStopErr, + }, nil + } + + cancel() + + select { + case err = <-ccStopErr: // return node error in priority + default: + } + + return nil, err +} + +func RunMain(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer) error { + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) + defer stop() + + // Read the configuration from standard input + configData, err := io.ReadAll(stdin) + if err != nil { + // log.Fatalf("error reading stdin: %v", err) + return fmt.Errorf("error reading stdin: %w", err) + } + + // Unmarshal the JSON configuration + var cfg ProcessNodeConfig + if err := json.Unmarshal(configData, &cfg); err != nil { + return fmt.Errorf("error unmarshaling JSON: %w", err) + // log.Fatalf("error unmarshaling JSON: %v", err) + } + + // Run the node + ccErr := make(chan error, 1) + go func() { + ccErr <- RunNode(ctx, &cfg, stdout, stderr) + close(ccErr) + }() + + // Wait for the node to gracefully terminate + <-ctx.Done() + + // Attempt graceful shutdown + select { + case <-time.After(gracefulShutdown): + return fmt.Errorf("unable to gracefully stop the node, exiting now") + case err = <-ccErr: // done + } + + return err +} + +func runTestingNodeProcess(t TestingTS, ctx context.Context, pcfg ProcessConfig) NodeProcess { + bin, err := os.Executable() + require.NoError(t, err) + args := []string{ + "-test.run=^$", + "-run-node-process", + } + + if pcfg.CoverDir != "" && testing.CoverMode() != "" { + args = append(args, "-test.gocoverdir="+pcfg.CoverDir) + } + + node, err := RunNodeProcess(ctx, pcfg, bin, args...) + require.NoError(t, err) + + return node +} + +// initDatabase initializes the database based on the provided directory configuration. +func initDatabase(dbDir string) (db.DB, error) { + if dbDir == "" { + return memdb.NewMemDB(), nil + } + + data, err := goleveldb.NewGoLevelDB("testdb", dbDir) + if err != nil { + return nil, fmt.Errorf("unable to init database in %q: %w", dbDir, err) + } + + return data, nil +} + +func signalWriteReady(w io.Writer, address string) error { + _, err := fmt.Fprintf(w, "READY:%s\n", address) + return err +} + +func signalReadReady(line string) (string, bool) { + var address string + if _, err := fmt.Sscanf(line, "READY:%s", &address); err == nil { + return address, true + } + return "", false +} + +// waitForProcessReady waits for the process to signal readiness and returns the address. +func waitForProcessReady(ctx context.Context, stdoutPipe io.Reader, out io.Writer) (string, error) { + var address string + + cReady := make(chan error, 2) + go func() { + defer close(cReady) + + scanner := bufio.NewScanner(stdoutPipe) + ready := false + for scanner.Scan() { + line := scanner.Text() + + if !ready { + if addr, ok := signalReadReady(line); ok { + address = addr + ready = true + cReady <- nil + } + } + + fmt.Fprintln(out, line) + } + + if err := scanner.Err(); err != nil { + cReady <- fmt.Errorf("error reading stdout: %w", err) + } else { + cReady <- fmt.Errorf("process exited without 'READY'") + } + }() + + select { + case err := <-cReady: + return address, err + case <-ctx.Done(): + return "", ctx.Err() + } +} + +// isAllZero checks if a 64-byte key consists entirely of zeros. +func isAllZero(key [64]byte) bool { + for _, v := range key { + if v != 0 { + return false + } + } + return true +} + +type MarshalableGenesisDoc bft.GenesisDoc + +func NewMarshalableGenesisDoc(doc *bft.GenesisDoc) *MarshalableGenesisDoc { + m := MarshalableGenesisDoc(*doc) + return &m +} + +func (m *MarshalableGenesisDoc) MarshalJSON() ([]byte, error) { + doc := (*bft.GenesisDoc)(m) + return amino.MarshalJSON(doc) +} + +func (m *MarshalableGenesisDoc) UnmarshalJSON(data []byte) (err error) { + doc, err := bft.GenesisDocFromJSON(data) + if err != nil { + return err + } + + *m = MarshalableGenesisDoc(*doc) + return +} + +// Cast back to the original bft.GenesisDoc. +func (m *MarshalableGenesisDoc) ToGenesisDoc() *bft.GenesisDoc { + return (*bft.GenesisDoc)(m) +} diff --git a/gno.land/pkg/integration/process/main.go b/gno.land/pkg/integration/process/main.go new file mode 100644 index 00000000000..bcd52e6fd44 --- /dev/null +++ b/gno.land/pkg/integration/process/main.go @@ -0,0 +1,20 @@ +package main + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/gnolang/gno/gno.land/pkg/integration" +) + +func main() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + if err := integration.RunMain(ctx, os.Stdin, os.Stdout, os.Stderr); err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) + } +} diff --git a/gno.land/pkg/integration/process_test.go b/gno.land/pkg/integration/process_test.go new file mode 100644 index 00000000000..b8768ad0e63 --- /dev/null +++ b/gno.land/pkg/integration/process_test.go @@ -0,0 +1,144 @@ +package integration + +import ( + "bytes" + "context" + "flag" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/gnolang/gno/gnovm/pkg/gnoenv" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/client" + "github.com/gnolang/gno/tm2/pkg/crypto/ed25519" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Define a flag to indicate whether to run the embedded command +var runCommand = flag.Bool("run-node-process", false, "execute the embedded command") + +func TestMain(m *testing.M) { + flag.Parse() + + // Check if the embedded command should be executed + if !*runCommand { + fmt.Println("Running tests...") + os.Exit(m.Run()) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + if err := RunMain(ctx, os.Stdin, os.Stdout, os.Stderr); err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) + } +} + +// TestGnolandIntegration tests the forking of a Gnoland node. +func TestNodeProcess(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*20) + defer cancel() + + gnoRootDir := gnoenv.RootDir() + + // Define paths for the build directory and the gnoland binary + gnolandDBDir := filepath.Join(t.TempDir(), "db") + + // Prepare a minimal node configuration for testing + cfg := TestingMinimalNodeConfig(gnoRootDir) + + var stdio bytes.Buffer + defer func() { + t.Log("node output:") + t.Log(stdio.String()) + }() + + start := time.Now() + node := runTestingNodeProcess(t, ctx, ProcessConfig{ + Stderr: &stdio, Stdout: &stdio, + Node: &ProcessNodeConfig{ + Verbose: true, + ValidatorKey: ed25519.GenPrivKey(), + DBDir: gnolandDBDir, + RootDir: gnoRootDir, + TMConfig: cfg.TMConfig, + Genesis: NewMarshalableGenesisDoc(cfg.Genesis), + }, + }) + t.Logf("time to start the node: %v", time.Since(start).String()) + + // Create a new HTTP client to interact with the integration node + cli, err := client.NewHTTPClient(node.Address()) + require.NoError(t, err) + + // Retrieve node info + info, err := cli.ABCIInfo() + require.NoError(t, err) + assert.NotEmpty(t, info.Response.Data) + + // Attempt to stop the node + err = node.Stop() + require.NoError(t, err) + + // Attempt to stop the node a second time, should not fail + err = node.Stop() + require.NoError(t, err) +} + +// TestGnolandIntegration tests the forking of a Gnoland node. +func TestInMemoryNodeProcess(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*20) + defer cancel() + + gnoRootDir := gnoenv.RootDir() + + // Define paths for the build directory and the gnoland binary + gnolandDBDir := filepath.Join(t.TempDir(), "db") + + // Prepare a minimal node configuration for testing + cfg := TestingMinimalNodeConfig(gnoRootDir) + + var stdio bytes.Buffer + defer func() { + t.Log("node output:") + t.Log(stdio.String()) + }() + + start := time.Now() + node, err := RunInMemoryProcess(ctx, ProcessConfig{ + Stderr: &stdio, Stdout: &stdio, + Node: &ProcessNodeConfig{ + Verbose: true, + ValidatorKey: ed25519.GenPrivKey(), + DBDir: gnolandDBDir, + RootDir: gnoRootDir, + TMConfig: cfg.TMConfig, + Genesis: NewMarshalableGenesisDoc(cfg.Genesis), + }, + }) + require.NoError(t, err) + t.Logf("time to start the node: %v", time.Since(start).String()) + + // Create a new HTTP client to interact with the integration node + cli, err := client.NewHTTPClient(node.Address()) + require.NoError(t, err) + + // Retrieve node info + info, err := cli.ABCIInfo() + require.NoError(t, err) + assert.NotEmpty(t, info.Response.Data) + + // Attempt to stop the node + err = node.Stop() + require.NoError(t, err) + + // Attempt to stop the node a second time, should not fail + err = node.Stop() + require.NoError(t, err) +} diff --git a/gno.land/pkg/integration/testdata/gnoland.txtar b/gno.land/pkg/integration/testdata/gnoland.txtar index 78bdc9cae4e..83c8fe9c9a5 100644 --- a/gno.land/pkg/integration/testdata/gnoland.txtar +++ b/gno.land/pkg/integration/testdata/gnoland.txtar @@ -28,7 +28,7 @@ cmp stderr gnoland-already-stop.stderr.golden -- gnoland-no-arguments.stdout.golden -- -- gnoland-no-arguments.stderr.golden -- -"gnoland" error: syntax: gnoland [start|stop|restart] +"gnoland" error: no command provided -- gnoland-start.stdout.golden -- node started successfully -- gnoland-start.stderr.golden -- diff --git a/gno.land/pkg/integration/testdata/loadpkg_example.txtar b/gno.land/pkg/integration/testdata/loadpkg_example.txtar index c05bedfef65..f7be500f3b6 100644 --- a/gno.land/pkg/integration/testdata/loadpkg_example.txtar +++ b/gno.land/pkg/integration/testdata/loadpkg_example.txtar @@ -4,11 +4,11 @@ loadpkg gno.land/p/demo/ufmt ## start a new node gnoland start -gnokey maketx addpkg -pkgdir $WORK -pkgpath gno.land/r/importtest -gas-fee 1000000ugnot -gas-wanted 9000000 -broadcast -chainid=tendermint_test test1 +gnokey maketx addpkg -pkgdir $WORK -pkgpath gno.land/r/importtest -gas-fee 1000000ugnot -gas-wanted 10000000 -broadcast -chainid=tendermint_test test1 stdout OK! ## execute Render -gnokey maketx call -pkgpath gno.land/r/importtest -func Render -gas-fee 1000000ugnot -gas-wanted 9000000 -args '' -broadcast -chainid=tendermint_test test1 +gnokey maketx call -pkgpath gno.land/r/importtest -func Render -gas-fee 1000000ugnot -gas-wanted 10000000 -args '' -broadcast -chainid=tendermint_test test1 stdout '("92054" string)' stdout OK! diff --git a/gno.land/pkg/integration/testdata/restart.txtar b/gno.land/pkg/integration/testdata/restart.txtar index 8a63713a214..5571aa9fa66 100644 --- a/gno.land/pkg/integration/testdata/restart.txtar +++ b/gno.land/pkg/integration/testdata/restart.txtar @@ -4,12 +4,12 @@ loadpkg gno.land/r/demo/counter $WORK gnoland start -gnokey maketx call -pkgpath gno.land/r/demo/counter -func Incr -gas-fee 1000000ugnot -gas-wanted 150000 -broadcast -chainid tendermint_test test1 +gnokey maketx call -pkgpath gno.land/r/demo/counter -func Incr -gas-fee 1000000ugnot -gas-wanted 200000 -broadcast -chainid tendermint_test test1 stdout '\(1 int\)' gnoland restart -gnokey maketx call -pkgpath gno.land/r/demo/counter -func Incr -gas-fee 1000000ugnot -gas-wanted 150000 -broadcast -chainid tendermint_test test1 +gnokey maketx call -pkgpath gno.land/r/demo/counter -func Incr -gas-fee 1000000ugnot -gas-wanted 200000 -broadcast -chainid tendermint_test test1 stdout '\(2 int\)' -- counter.gno -- diff --git a/gno.land/pkg/integration/testdata_test.go b/gno.land/pkg/integration/testdata_test.go new file mode 100644 index 00000000000..ba4d5176df1 --- /dev/null +++ b/gno.land/pkg/integration/testdata_test.go @@ -0,0 +1,67 @@ +package integration + +import ( + "os" + "strconv" + "testing" + + gno_integration "github.com/gnolang/gno/gnovm/pkg/integration" + "github.com/rogpeppe/go-internal/testscript" + "github.com/stretchr/testify/require" +) + +func TestTestdata(t *testing.T) { + t.Parallel() + + flagInMemoryTS, _ := strconv.ParseBool(os.Getenv("INMEMORY_TS")) + flagNoSeqTS, _ := strconv.ParseBool(os.Getenv("NO_SEQ_TS")) + + p := gno_integration.NewTestingParams(t, "testdata") + + if coverdir, ok := gno_integration.ResolveCoverageDir(); ok { + err := gno_integration.SetupTestscriptsCoverage(&p, coverdir) + require.NoError(t, err) + } + + // Set up gnoland for testscript + err := SetupGnolandTestscript(t, &p) + require.NoError(t, err) + + mode := commandKindTesting + if flagInMemoryTS { + mode = commandKindInMemory + } + + origSetup := p.Setup + p.Setup = func(env *testscript.Env) error { + env.Values[envKeyExecCommand] = mode + if origSetup != nil { + if err := origSetup(env); err != nil { + return err + } + } + + return nil + } + + if flagInMemoryTS && !flagNoSeqTS { + testscript.RunT(tSeqShim{t}, p) + } else { + testscript.Run(t, p) + } +} + +type tSeqShim struct{ *testing.T } + +// noop Parallel method allow us to run test sequentially +func (tSeqShim) Parallel() {} + +func (t tSeqShim) Run(name string, f func(testscript.T)) { + t.T.Run(name, func(t *testing.T) { + f(tSeqShim{t}) + }) +} + +func (t tSeqShim) Verbose() bool { + return testing.Verbose() +} diff --git a/gno.land/pkg/integration/testing_integration.go b/gno.land/pkg/integration/testing_integration.go deleted file mode 100644 index 0a181950bb3..00000000000 --- a/gno.land/pkg/integration/testing_integration.go +++ /dev/null @@ -1,795 +0,0 @@ -package integration - -import ( - "context" - "errors" - "flag" - "fmt" - "hash/crc32" - "log/slog" - "os" - "path/filepath" - "strconv" - "strings" - "testing" - - "github.com/gnolang/gno/gno.land/pkg/gnoland" - "github.com/gnolang/gno/gno.land/pkg/gnoland/ugnot" - "github.com/gnolang/gno/gno.land/pkg/keyscli" - "github.com/gnolang/gno/gno.land/pkg/log" - "github.com/gnolang/gno/gno.land/pkg/sdk/vm" - "github.com/gnolang/gno/gnovm/pkg/gnoenv" - "github.com/gnolang/gno/gnovm/pkg/gnolang" - "github.com/gnolang/gno/gnovm/pkg/gnomod" - "github.com/gnolang/gno/gnovm/pkg/packages" - "github.com/gnolang/gno/tm2/pkg/bft/node" - bft "github.com/gnolang/gno/tm2/pkg/bft/types" - "github.com/gnolang/gno/tm2/pkg/commands" - "github.com/gnolang/gno/tm2/pkg/crypto" - "github.com/gnolang/gno/tm2/pkg/crypto/bip39" - "github.com/gnolang/gno/tm2/pkg/crypto/keys" - "github.com/gnolang/gno/tm2/pkg/crypto/keys/client" - "github.com/gnolang/gno/tm2/pkg/db/memdb" - tm2Log "github.com/gnolang/gno/tm2/pkg/log" - "github.com/gnolang/gno/tm2/pkg/std" - "github.com/rogpeppe/go-internal/testscript" - "go.uber.org/zap/zapcore" -) - -const ( - envKeyGenesis int = iota - envKeyLogger - envKeyPkgsLoader -) - -type tSeqShim struct{ *testing.T } - -// noop Parallel method allow us to run test sequentially -func (tSeqShim) Parallel() {} - -func (t tSeqShim) Run(name string, f func(testscript.T)) { - t.T.Run(name, func(t *testing.T) { - f(tSeqShim{t}) - }) -} - -func (t tSeqShim) Verbose() bool { - return testing.Verbose() -} - -// RunGnolandTestscripts sets up and runs txtar integration tests for gnoland nodes. -// It prepares an in-memory gnoland node and initializes the necessary environment and custom commands. -// The function adapts the test setup for use with the testscript package, enabling -// the execution of gnoland and gnokey commands within txtar scripts. -// -// Refer to package documentation in doc.go for more information on commands and example txtar scripts. -func RunGnolandTestscripts(t *testing.T, txtarDir string) { - t.Helper() - - p := setupGnolandTestScript(t, txtarDir) - if deadline, ok := t.Deadline(); ok && p.Deadline.IsZero() { - p.Deadline = deadline - } - - testscript.RunT(tSeqShim{t}, p) -} - -type testNode struct { - *node.Node - cfg *gnoland.InMemoryNodeConfig - nGnoKeyExec uint // Counter for execution of gnokey. -} - -func setupGnolandTestScript(t *testing.T, txtarDir string) testscript.Params { - t.Helper() - - tmpdir := t.TempDir() - - // `gnoRootDir` should point to the local location of the gno repository. - // It serves as the gno equivalent of GOROOT. - gnoRootDir := gnoenv.RootDir() - - // `gnoHomeDir` should be the local directory where gnokey stores keys. - gnoHomeDir := filepath.Join(tmpdir, "gno") - - // Testscripts run concurrently by default, so we need to be prepared for that. - nodes := map[string]*testNode{} - - updateScripts, _ := strconv.ParseBool(os.Getenv("UPDATE_SCRIPTS")) - persistWorkDir, _ := strconv.ParseBool(os.Getenv("TESTWORK")) - return testscript.Params{ - UpdateScripts: updateScripts, - TestWork: persistWorkDir, - Dir: txtarDir, - Setup: func(env *testscript.Env) error { - kb, err := keys.NewKeyBaseFromDir(gnoHomeDir) - if err != nil { - return err - } - - // create sessions ID - var sid string - { - works := env.Getenv("WORK") - sum := crc32.ChecksumIEEE([]byte(works)) - sid = strconv.FormatUint(uint64(sum), 16) - env.Setenv("SID", sid) - } - - // setup logger - var logger *slog.Logger - { - logger = tm2Log.NewNoopLogger() - if persistWorkDir || os.Getenv("LOG_PATH_DIR") != "" { - logname := fmt.Sprintf("txtar-gnoland-%s.log", sid) - logger, err = getTestingLogger(env, logname) - if err != nil { - return fmt.Errorf("unable to setup logger: %w", err) - } - } - - env.Values[envKeyLogger] = logger - } - - // Track new user balances added via the `adduser` - // command and packages added with the `loadpkg` command. - // This genesis will be use when node is started. - - genesis := gnoland.DefaultGenState() - genesis.Balances = LoadDefaultGenesisBalanceFile(t, gnoRootDir) - genesis.Params = LoadDefaultGenesisParamFile(t, gnoRootDir) - genesis.Auth.Params.InitialGasPrice = std.GasPrice{Gas: 0, Price: std.Coin{Amount: 0, Denom: "ugnot"}} - genesis.Txs = []gnoland.TxWithMetadata{} - - // test1 must be created outside of the loop below because it is already included in genesis so - // attempting to recreate results in it getting overwritten and breaking existing tests that - // rely on its address being static. - kb.CreateAccount(DefaultAccount_Name, DefaultAccount_Seed, "", "", 0, 0) - env.Setenv("USER_SEED_"+DefaultAccount_Name, DefaultAccount_Seed) - env.Setenv("USER_ADDR_"+DefaultAccount_Name, DefaultAccount_Address) - - env.Values[envKeyGenesis] = &genesis - env.Values[envKeyPkgsLoader] = newPkgsLoader() - - env.Setenv("GNOROOT", gnoRootDir) - env.Setenv("GNOHOME", gnoHomeDir) - - return nil - }, - Cmds: map[string]func(ts *testscript.TestScript, neg bool, args []string){ - "gnoland": func(ts *testscript.TestScript, neg bool, args []string) { - if len(args) == 0 { - tsValidateError(ts, "gnoland", neg, fmt.Errorf("syntax: gnoland [start|stop|restart]")) - return - } - - logger := ts.Value(envKeyLogger).(*slog.Logger) // grab logger - sid := getNodeSID(ts) // grab session id - - var cmd string - cmd, args = args[0], args[1:] - - var err error - switch cmd { - case "start": - if nodeIsRunning(nodes, sid) { - err = fmt.Errorf("node already started") - break - } - - // parse flags - fs := flag.NewFlagSet("start", flag.ContinueOnError) - nonVal := fs.Bool("non-validator", false, "set up node as a non-validator") - if err := fs.Parse(args); err != nil { - ts.Fatalf("unable to parse `gnoland start` flags: %s", err) - } - - // get packages - pkgs := ts.Value(envKeyPkgsLoader).(*pkgsLoader) // grab logger - creator := crypto.MustAddressFromString(DefaultAccount_Address) // test1 - defaultFee := std.NewFee(50000, std.MustParseCoin(ugnot.ValueString(1000000))) - // we need to define a new err1 otherwise the out err would be shadowed in the case "start": - pkgsTxs, loadErr := pkgs.LoadPackages(creator, defaultFee, nil) - - if loadErr != nil { - ts.Fatalf("unable to load packages txs: %s", err) - } - - // Warp up `ts` so we can pass it to other testing method - t := TSTestingT(ts) - - // Generate config and node - cfg := TestingMinimalNodeConfig(t, gnoRootDir) - genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState) - genesis.Txs = append(pkgsTxs, genesis.Txs...) - - // setup genesis state - cfg.Genesis.AppState = *genesis - if *nonVal { - // re-create cfg.Genesis.Validators with a throwaway pv, so we start as a - // non-validator. - pv := gnoland.NewMockedPrivValidator() - cfg.Genesis.Validators = []bft.GenesisValidator{ - { - Address: pv.GetPubKey().Address(), - PubKey: pv.GetPubKey(), - Power: 10, - Name: "none", - }, - } - } - cfg.DB = memdb.NewMemDB() // so it can be reused when restarting. - - n, remoteAddr := TestingInMemoryNode(t, logger, cfg) - - // Register cleanup - nodes[sid] = &testNode{Node: n, cfg: cfg} - - // Add default environments - ts.Setenv("RPC_ADDR", remoteAddr) - - fmt.Fprintln(ts.Stdout(), "node started successfully") - case "restart": - n, ok := nodes[sid] - if !ok { - err = fmt.Errorf("node must be started before being restarted") - break - } - - if stopErr := n.Stop(); stopErr != nil { - err = fmt.Errorf("error stopping node: %w", stopErr) - break - } - - // Create new node with same config. - newNode, newRemoteAddr := TestingInMemoryNode(t, logger, n.cfg) - - // Update testNode and environment variables. - n.Node = newNode - ts.Setenv("RPC_ADDR", newRemoteAddr) - - fmt.Fprintln(ts.Stdout(), "node restarted successfully") - case "stop": - n, ok := nodes[sid] - if !ok { - err = fmt.Errorf("node not started cannot be stopped") - break - } - if err = n.Stop(); err == nil { - delete(nodes, sid) - - // Unset gnoland environments - ts.Setenv("RPC_ADDR", "") - fmt.Fprintln(ts.Stdout(), "node stopped successfully") - } - default: - err = fmt.Errorf("invalid gnoland subcommand: %q", cmd) - } - - tsValidateError(ts, "gnoland "+cmd, neg, err) - }, - "gnokey": func(ts *testscript.TestScript, neg bool, args []string) { - logger := ts.Value(envKeyLogger).(*slog.Logger) // grab logger - sid := ts.Getenv("SID") // grab session id - - // Unquote args enclosed in `"` to correctly handle `\n` or similar escapes. - args, err := unquote(args) - if err != nil { - tsValidateError(ts, "gnokey", neg, err) - } - - // Setup IO command - io := commands.NewTestIO() - io.SetOut(commands.WriteNopCloser(ts.Stdout())) - io.SetErr(commands.WriteNopCloser(ts.Stderr())) - cmd := keyscli.NewRootCmd(io, client.DefaultBaseOptions) - - io.SetIn(strings.NewReader("\n")) // Inject empty password to stdin. - defaultArgs := []string{ - "-home", gnoHomeDir, - "-insecure-password-stdin=true", // There no use to not have this param by default. - } - - if n, ok := nodes[sid]; ok { - if raddr := n.Config().RPC.ListenAddress; raddr != "" { - defaultArgs = append(defaultArgs, "-remote", raddr) - } - - n.nGnoKeyExec++ - headerlog := fmt.Sprintf("%.02d!EXEC_GNOKEY", n.nGnoKeyExec) - - // Log the command inside gnoland logger, so we can better scope errors. - logger.Info(headerlog, "args", strings.Join(args, " ")) - defer logger.Info(headerlog, "delimiter", "END") - } - - // Inject default argument, if duplicate - // arguments, it should be override by the ones - // user provided. - args = append(defaultArgs, args...) - - err = cmd.ParseAndRun(context.Background(), args) - tsValidateError(ts, "gnokey", neg, err) - }, - // adduser command must be executed before starting the node; it errors out otherwise. - "adduser": func(ts *testscript.TestScript, neg bool, args []string) { - if nodeIsRunning(nodes, getNodeSID(ts)) { - tsValidateError(ts, "adduser", neg, errors.New("adduser must be used before starting node")) - return - } - - if len(args) == 0 { - ts.Fatalf("new user name required") - } - - kb, err := keys.NewKeyBaseFromDir(gnoHomeDir) - if err != nil { - ts.Fatalf("unable to get keybase") - } - - balance, err := createAccount(ts, kb, args[0]) - if err != nil { - ts.Fatalf("error creating account %s: %s", args[0], err) - } - - // Add balance to genesis - genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState) - genesis.Balances = append(genesis.Balances, balance) - }, - // adduserfrom commands must be executed before starting the node; it errors out otherwise. - "adduserfrom": func(ts *testscript.TestScript, neg bool, args []string) { - if nodeIsRunning(nodes, getNodeSID(ts)) { - tsValidateError(ts, "adduserfrom", neg, errors.New("adduserfrom must be used before starting node")) - return - } - - var account, index uint64 - var err error - - switch len(args) { - case 2: - // expected user input - // adduserfrom 'username 'menmonic' - // no need to do anything - - case 4: - // expected user input - // adduserfrom 'username 'menmonic' 'account' 'index' - - // parse 'index' first, then fallghrough to `case 3` to parse 'account' - index, err = strconv.ParseUint(args[3], 10, 32) - if err != nil { - ts.Fatalf("invalid index number %s", args[3]) - } - - fallthrough // parse 'account' - case 3: - // expected user input - // adduserfrom 'username 'menmonic' 'account' - - account, err = strconv.ParseUint(args[2], 10, 32) - if err != nil { - ts.Fatalf("invalid account number %s", args[2]) - } - default: - ts.Fatalf("to create account from metadatas, user name and mnemonic are required ( account and index are optional )") - } - - kb, err := keys.NewKeyBaseFromDir(gnoHomeDir) - if err != nil { - ts.Fatalf("unable to get keybase") - } - - balance, err := createAccountFrom(ts, kb, args[0], args[1], uint32(account), uint32(index)) - if err != nil { - ts.Fatalf("error creating wallet %s", err) - } - - // Add balance to genesis - genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState) - genesis.Balances = append(genesis.Balances, balance) - - fmt.Fprintf(ts.Stdout(), "Added %s(%s) to genesis", args[0], balance.Address) - }, - // `patchpkg` Patch any loaded files by packages by replacing all occurrences of the - // first argument with the second. - // This is mostly use to replace hardcoded address inside txtar file. - "patchpkg": func(ts *testscript.TestScript, neg bool, args []string) { - args, err := unquote(args) - if err != nil { - tsValidateError(ts, "patchpkg", neg, err) - } - - if len(args) != 2 { - ts.Fatalf("`patchpkg`: should have exactly 2 arguments") - } - - pkgs := ts.Value(envKeyPkgsLoader).(*pkgsLoader) - replace, with := args[0], args[1] - pkgs.SetPatch(replace, with) - }, - // `loadpkg` load a specific package from the 'examples' or working directory. - "loadpkg": func(ts *testscript.TestScript, neg bool, args []string) { - // special dirs - workDir := ts.Getenv("WORK") - examplesDir := filepath.Join(gnoRootDir, "examples") - - pkgs := ts.Value(envKeyPkgsLoader).(*pkgsLoader) - - var path, name string - switch len(args) { - case 2: - name = args[0] - path = filepath.Clean(args[1]) - case 1: - path = filepath.Clean(args[0]) - case 0: - ts.Fatalf("`loadpkg`: no arguments specified") - default: - ts.Fatalf("`loadpkg`: too many arguments specified") - } - - // If `all` is specified, fully load 'examples' directory. - // NOTE: In 99% of cases, this is not needed, and - // packages should be loaded individually. - if path == "all" { - ts.Logf("warning: loading all packages") - if err := pkgs.LoadAllPackagesFromDir(examplesDir); err != nil { - ts.Fatalf("unable to load packages from %q: %s", examplesDir, err) - } - - return - } - - if !strings.HasPrefix(path, workDir) { - path = filepath.Join(examplesDir, path) - } - - if err := pkgs.LoadPackage(examplesDir, path, name); err != nil { - ts.Fatalf("`loadpkg` unable to load package(s) from %q: %s", args[0], err) - } - - ts.Logf("%q package was added to genesis", args[0]) - }, - }, - } -} - -// `unquote` takes a slice of strings, resulting from splitting a string block by spaces, and -// processes them. The function handles quoted phrases and escape characters within these strings. -func unquote(args []string) ([]string, error) { - const quote = '"' - - parts := []string{} - var inQuote bool - - var part strings.Builder - for _, arg := range args { - var escaped bool - for _, c := range arg { - if escaped { - // If the character is meant to be escaped, it is processed with Unquote. - // We use `Unquote` here for two main reasons: - // 1. It will validate that the escape sequence is correct - // 2. It converts the escaped string to its corresponding raw character. - // For example, "\\t" becomes '\t'. - uc, err := strconv.Unquote(`"\` + string(c) + `"`) - if err != nil { - return nil, fmt.Errorf("unhandled escape sequence `\\%c`: %w", c, err) - } - - part.WriteString(uc) - escaped = false - continue - } - - // If we are inside a quoted string and encounter an escape character, - // flag the next character as `escaped` - if inQuote && c == '\\' { - escaped = true - continue - } - - // Detect quote and toggle inQuote state - if c == quote { - inQuote = !inQuote - continue - } - - // Handle regular character - part.WriteRune(c) - } - - // If we're inside a quote, add a single space. - // It reflects one or multiple spaces between args in the original string. - if inQuote { - part.WriteRune(' ') - continue - } - - // Finalize part, add to parts, and reset for next part - parts = append(parts, part.String()) - part.Reset() - } - - // Check if a quote is left open - if inQuote { - return nil, errors.New("unfinished quote") - } - - return parts, nil -} - -func getNodeSID(ts *testscript.TestScript) string { - return ts.Getenv("SID") -} - -func nodeIsRunning(nodes map[string]*testNode, sid string) bool { - _, ok := nodes[sid] - return ok -} - -func getTestingLogger(env *testscript.Env, logname string) (*slog.Logger, error) { - var path string - - if logdir := os.Getenv("LOG_PATH_DIR"); logdir != "" { - if err := os.MkdirAll(logdir, 0o755); err != nil { - return nil, fmt.Errorf("unable to make log directory %q", logdir) - } - - var err error - if path, err = filepath.Abs(filepath.Join(logdir, logname)); err != nil { - return nil, fmt.Errorf("unable to get absolute path of logdir %q", logdir) - } - } else if workdir := env.Getenv("WORK"); workdir != "" { - path = filepath.Join(workdir, logname) - } else { - return tm2Log.NewNoopLogger(), nil - } - - f, err := os.Create(path) - if err != nil { - return nil, fmt.Errorf("unable to create log file %q: %w", path, err) - } - - env.Defer(func() { - if err := f.Close(); err != nil { - panic(fmt.Errorf("unable to close log file %q: %w", path, err)) - } - }) - - // Initialize the logger - logLevel, err := zapcore.ParseLevel(strings.ToLower(os.Getenv("LOG_LEVEL"))) - if err != nil { - return nil, fmt.Errorf("unable to parse log level, %w", err) - } - - // Build zap logger for testing - zapLogger := log.NewZapTestingLogger(f, logLevel) - env.Defer(func() { zapLogger.Sync() }) - - env.T().Log("starting logger", path) - return log.ZapLoggerToSlog(zapLogger), nil -} - -func tsValidateError(ts *testscript.TestScript, cmd string, neg bool, err error) { - if err != nil { - fmt.Fprintf(ts.Stderr(), "%q error: %+v\n", cmd, err) - if !neg { - ts.Fatalf("unexpected %q command failure: %s", cmd, err) - } - } else { - if neg { - ts.Fatalf("unexpected %q command success", cmd) - } - } -} - -type envSetter interface { - Setenv(key, value string) -} - -// createAccount creates a new account with the given name and adds it to the keybase. -func createAccount(env envSetter, kb keys.Keybase, accountName string) (gnoland.Balance, error) { - var balance gnoland.Balance - entropy, err := bip39.NewEntropy(256) - if err != nil { - return balance, fmt.Errorf("error creating entropy: %w", err) - } - - mnemonic, err := bip39.NewMnemonic(entropy) - if err != nil { - return balance, fmt.Errorf("error generating mnemonic: %w", err) - } - - var keyInfo keys.Info - if keyInfo, err = kb.CreateAccount(accountName, mnemonic, "", "", 0, 0); err != nil { - return balance, fmt.Errorf("unable to create account: %w", err) - } - - address := keyInfo.GetAddress() - env.Setenv("USER_SEED_"+accountName, mnemonic) - env.Setenv("USER_ADDR_"+accountName, address.String()) - - return gnoland.Balance{ - Address: address, - Amount: std.Coins{std.NewCoin(ugnot.Denom, 10e6)}, - }, nil -} - -// createAccountFrom creates a new account with the given metadata and adds it to the keybase. -func createAccountFrom(env envSetter, kb keys.Keybase, accountName, mnemonic string, account, index uint32) (gnoland.Balance, error) { - var balance gnoland.Balance - - // check if mnemonic is valid - if !bip39.IsMnemonicValid(mnemonic) { - return balance, fmt.Errorf("invalid mnemonic") - } - - keyInfo, err := kb.CreateAccount(accountName, mnemonic, "", "", account, index) - if err != nil { - return balance, fmt.Errorf("unable to create account: %w", err) - } - - address := keyInfo.GetAddress() - env.Setenv("USER_SEED_"+accountName, mnemonic) - env.Setenv("USER_ADDR_"+accountName, address.String()) - - return gnoland.Balance{ - Address: address, - Amount: std.Coins{std.NewCoin(ugnot.Denom, 10e6)}, - }, nil -} - -type pkgsLoader struct { - pkgs []gnomod.Pkg - visited map[string]struct{} - - // list of occurrences to patchs with the given value - // XXX: find a better way - patchs map[string]string -} - -func newPkgsLoader() *pkgsLoader { - return &pkgsLoader{ - pkgs: make([]gnomod.Pkg, 0), - visited: make(map[string]struct{}), - patchs: make(map[string]string), - } -} - -func (pl *pkgsLoader) List() gnomod.PkgList { - return pl.pkgs -} - -func (pl *pkgsLoader) SetPatch(replace, with string) { - pl.patchs[replace] = with -} - -func (pl *pkgsLoader) LoadPackages(creator bft.Address, fee std.Fee, deposit std.Coins) ([]gnoland.TxWithMetadata, error) { - pkgslist, err := pl.List().Sort() // sorts packages by their dependencies. - if err != nil { - return nil, fmt.Errorf("unable to sort packages: %w", err) - } - - txs := make([]gnoland.TxWithMetadata, len(pkgslist)) - for i, pkg := range pkgslist { - tx, err := gnoland.LoadPackage(pkg, creator, fee, deposit) - if err != nil { - return nil, fmt.Errorf("unable to load pkg %q: %w", pkg.Name, err) - } - - // If any replace value is specified, apply them - if len(pl.patchs) > 0 { - for _, msg := range tx.Msgs { - addpkg, ok := msg.(vm.MsgAddPackage) - if !ok { - continue - } - - if addpkg.Package == nil { - continue - } - - for _, file := range addpkg.Package.Files { - for replace, with := range pl.patchs { - file.Body = strings.ReplaceAll(file.Body, replace, with) - } - } - } - } - - txs[i] = gnoland.TxWithMetadata{ - Tx: tx, - } - } - - return txs, nil -} - -func (pl *pkgsLoader) LoadAllPackagesFromDir(path string) error { - // list all packages from target path - pkgslist, err := gnomod.ListPkgs(path) - if err != nil { - return fmt.Errorf("listing gno packages: %w", err) - } - - for _, pkg := range pkgslist { - if !pl.exist(pkg) { - pl.add(pkg) - } - } - - return nil -} - -func (pl *pkgsLoader) LoadPackage(modroot string, path, name string) error { - // Initialize a queue with the root package - queue := []gnomod.Pkg{{Dir: path, Name: name}} - - for len(queue) > 0 { - // Dequeue the first package - currentPkg := queue[0] - queue = queue[1:] - - if currentPkg.Dir == "" { - return fmt.Errorf("no path specified for package") - } - - if currentPkg.Name == "" { - // Load `gno.mod` information - gnoModPath := filepath.Join(currentPkg.Dir, "gno.mod") - gm, err := gnomod.ParseGnoMod(gnoModPath) - if err != nil { - return fmt.Errorf("unable to load %q: %w", gnoModPath, err) - } - gm.Sanitize() - - // Override package info with mod infos - currentPkg.Name = gm.Module.Mod.Path - currentPkg.Draft = gm.Draft - - pkg, err := gnolang.ReadMemPackage(currentPkg.Dir, currentPkg.Name) - if err != nil { - return fmt.Errorf("unable to read package at %q: %w", currentPkg.Dir, err) - } - imports, err := packages.Imports(pkg, nil) - if err != nil { - return fmt.Errorf("unable to load package imports in %q: %w", currentPkg.Dir, err) - } - for _, imp := range imports { - if imp.PkgPath == currentPkg.Name || gnolang.IsStdlib(imp.PkgPath) { - continue - } - currentPkg.Imports = append(currentPkg.Imports, imp.PkgPath) - } - } - - if currentPkg.Draft { - continue // Skip draft package - } - - if pl.exist(currentPkg) { - continue - } - pl.add(currentPkg) - - // Add requirements to the queue - for _, pkgPath := range currentPkg.Imports { - fullPath := filepath.Join(modroot, pkgPath) - queue = append(queue, gnomod.Pkg{Dir: fullPath}) - } - } - - return nil -} - -func (pl *pkgsLoader) add(pkg gnomod.Pkg) { - pl.visited[pkg.Name] = struct{}{} - pl.pkgs = append(pl.pkgs, pkg) -} - -func (pl *pkgsLoader) exist(pkg gnomod.Pkg) (ok bool) { - _, ok = pl.visited[pkg.Name] - return -} diff --git a/gno.land/pkg/integration/testscript_gnoland.go b/gno.land/pkg/integration/testscript_gnoland.go new file mode 100644 index 00000000000..ae484a07669 --- /dev/null +++ b/gno.land/pkg/integration/testscript_gnoland.go @@ -0,0 +1,787 @@ +package integration + +import ( + "bytes" + "context" + "errors" + "flag" + "fmt" + "hash/crc32" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/gnolang/gno/gno.land/pkg/gnoland" + "github.com/gnolang/gno/gno.land/pkg/gnoland/ugnot" + "github.com/gnolang/gno/gno.land/pkg/keyscli" + "github.com/gnolang/gno/gnovm/pkg/gnoenv" + bft "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/commands" + "github.com/gnolang/gno/tm2/pkg/crypto" + "github.com/gnolang/gno/tm2/pkg/crypto/bip39" + "github.com/gnolang/gno/tm2/pkg/crypto/ed25519" + "github.com/gnolang/gno/tm2/pkg/crypto/hd" + "github.com/gnolang/gno/tm2/pkg/crypto/keys" + "github.com/gnolang/gno/tm2/pkg/crypto/keys/client" + "github.com/gnolang/gno/tm2/pkg/crypto/secp256k1" + "github.com/gnolang/gno/tm2/pkg/std" + "github.com/rogpeppe/go-internal/testscript" + "github.com/stretchr/testify/require" +) + +const nodeMaxLifespan = time.Second * 30 + +type envKey int + +const ( + envKeyGenesis envKey = iota + envKeyLogger + envKeyPkgsLoader + envKeyPrivValKey + envKeyExecCommand + envKeyExecBin +) + +type commandkind int + +const ( + // commandKindBin builds and uses an integration binary to run the testscript + // in a separate process. This should be used for any external package that + // wants to use test scripts. + commandKindBin commandkind = iota + // commandKindTesting uses the current testing binary to run the testscript + // in a separate process. This command cannot be used outside this package. + commandKindTesting + // commandKindInMemory runs testscripts in memory. + commandKindInMemory +) + +type tNodeProcess struct { + NodeProcess + cfg *gnoland.InMemoryNodeConfig + nGnoKeyExec uint // Counter for execution of gnokey. +} + +// NodesManager manages access to the nodes map with synchronization. +type NodesManager struct { + nodes map[string]*tNodeProcess + mu sync.RWMutex +} + +// NewNodesManager creates a new instance of NodesManager. +func NewNodesManager() *NodesManager { + return &NodesManager{ + nodes: make(map[string]*tNodeProcess), + } +} + +func (nm *NodesManager) IsNodeRunning(sid string) bool { + nm.mu.RLock() + defer nm.mu.RUnlock() + + _, ok := nm.nodes[sid] + return ok +} + +// Get retrieves a node by its SID. +func (nm *NodesManager) Get(sid string) (*tNodeProcess, bool) { + nm.mu.RLock() + defer nm.mu.RUnlock() + node, exists := nm.nodes[sid] + return node, exists +} + +// Set adds or updates a node in the map. +func (nm *NodesManager) Set(sid string, node *tNodeProcess) { + nm.mu.Lock() + defer nm.mu.Unlock() + nm.nodes[sid] = node +} + +// Delete removes a node from the map. +func (nm *NodesManager) Delete(sid string) { + nm.mu.Lock() + defer nm.mu.Unlock() + delete(nm.nodes, sid) +} + +func SetupGnolandTestscript(t *testing.T, p *testscript.Params) error { + t.Helper() + + gnoRootDir := gnoenv.RootDir() + + nodesManager := NewNodesManager() + + defaultPK, err := generatePrivKeyFromMnemonic(DefaultAccount_Seed, "", 0, 0) + require.NoError(t, err) + + var buildOnce sync.Once + var gnolandBin string + + // Store the original setup scripts for potential wrapping + origSetup := p.Setup + p.Setup = func(env *testscript.Env) error { + // If there's an original setup, execute it + if origSetup != nil { + if err := origSetup(env); err != nil { + return err + } + } + + cmd, isSet := env.Values[envKeyExecCommand].(commandkind) + switch { + case !isSet: + cmd = commandKindBin // fallback on commandKindBin + fallthrough + case cmd == commandKindBin: + buildOnce.Do(func() { + t.Logf("building the gnoland integration node") + start := time.Now() + gnolandBin = buildGnoland(t, gnoRootDir) + t.Logf("time to build the node: %v", time.Since(start).String()) + }) + + env.Values[envKeyExecBin] = gnolandBin + } + + tmpdir, dbdir := t.TempDir(), t.TempDir() + gnoHomeDir := filepath.Join(tmpdir, "gno") + + kb, err := keys.NewKeyBaseFromDir(gnoHomeDir) + if err != nil { + return err + } + + kb.ImportPrivKey(DefaultAccount_Name, defaultPK, "") + env.Setenv("USER_SEED_"+DefaultAccount_Name, DefaultAccount_Seed) + env.Setenv("USER_ADDR_"+DefaultAccount_Name, DefaultAccount_Address) + + // New private key + env.Values[envKeyPrivValKey] = ed25519.GenPrivKey() + env.Setenv("GNO_DBDIR", dbdir) + + // Generate node short id + var sid string + { + works := env.Getenv("WORK") + sum := crc32.ChecksumIEEE([]byte(works)) + sid = strconv.FormatUint(uint64(sum), 16) + env.Setenv("SID", sid) + } + + balanceFile := LoadDefaultGenesisBalanceFile(t, gnoRootDir) + genesisParamFile := LoadDefaultGenesisParamFile(t, gnoRootDir) + + // Track new user balances added via the `adduser` + // command and packages added with the `loadpkg` command. + // This genesis will be use when node is started. + genesis := &gnoland.GnoGenesisState{ + Balances: balanceFile, + Params: genesisParamFile, + Txs: []gnoland.TxWithMetadata{}, + } + + env.Values[envKeyGenesis] = genesis + env.Values[envKeyPkgsLoader] = NewPkgsLoader() + + env.Setenv("GNOROOT", gnoRootDir) + env.Setenv("GNOHOME", gnoHomeDir) + + env.Defer(func() { + // Gracefully stop the node, if any + n, exist := nodesManager.Get(sid) + if !exist { + return + } + + if err := n.Stop(); err != nil { + err = fmt.Errorf("unable to stop the node gracefully: %w", err) + env.T().Fatal(err.Error()) + } + }) + + return nil + } + + cmds := map[string]func(ts *testscript.TestScript, neg bool, args []string){ + "gnoland": gnolandCmd(t, nodesManager, gnoRootDir), + "gnokey": gnokeyCmd(nodesManager), + "adduser": adduserCmd(nodesManager), + "adduserfrom": adduserfromCmd(nodesManager), + "patchpkg": patchpkgCmd(), + "loadpkg": loadpkgCmd(gnoRootDir), + } + + // Initialize cmds map if needed + if p.Cmds == nil { + p.Cmds = make(map[string]func(ts *testscript.TestScript, neg bool, args []string)) + } + + // Register gnoland command + for cmd, call := range cmds { + if _, exist := p.Cmds[cmd]; exist { + panic(fmt.Errorf("unable register %q: command already exist", cmd)) + } + + p.Cmds[cmd] = call + } + + return nil +} + +func gnolandCmd(t *testing.T, nodesManager *NodesManager, gnoRootDir string) func(ts *testscript.TestScript, neg bool, args []string) { + t.Helper() + + return func(ts *testscript.TestScript, neg bool, args []string) { + sid := getNodeSID(ts) + + cmd, cmdargs := "", []string{} + if len(args) > 0 { + cmd, cmdargs = args[0], args[1:] + } + + var err error + switch cmd { + case "": + err = errors.New("no command provided") + case "start": + if nodesManager.IsNodeRunning(sid) { + err = fmt.Errorf("node already started") + break + } + + // XXX: this is a bit hacky, we should consider moving + // gnoland into his own package to be able to use it + // directly or use the config command for this. + fs := flag.NewFlagSet("start", flag.ContinueOnError) + nonVal := fs.Bool("non-validator", false, "set up node as a non-validator") + if err := fs.Parse(cmdargs); err != nil { + ts.Fatalf("unable to parse `gnoland start` flags: %s", err) + } + + pkgs := ts.Value(envKeyPkgsLoader).(*PkgsLoader) + creator := crypto.MustAddressFromString(DefaultAccount_Address) + defaultFee := std.NewFee(50000, std.MustParseCoin(ugnot.ValueString(1000000))) + pkgsTxs, err := pkgs.LoadPackages(creator, defaultFee, nil) + if err != nil { + ts.Fatalf("unable to load packages txs: %s", err) + } + + cfg := TestingMinimalNodeConfig(gnoRootDir) + genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState) + genesis.Txs = append(pkgsTxs, genesis.Txs...) + + cfg.Genesis.AppState = *genesis + if *nonVal { + pv := gnoland.NewMockedPrivValidator() + cfg.Genesis.Validators = []bft.GenesisValidator{ + { + Address: pv.GetPubKey().Address(), + PubKey: pv.GetPubKey(), + Power: 10, + Name: "none", + }, + } + } + + ctx, cancel := context.WithTimeout(context.Background(), nodeMaxLifespan) + ts.Defer(cancel) + + dbdir := ts.Getenv("GNO_DBDIR") + priv := ts.Value(envKeyPrivValKey).(ed25519.PrivKeyEd25519) + nodep := setupNode(ts, ctx, &ProcessNodeConfig{ + ValidatorKey: priv, + DBDir: dbdir, + RootDir: gnoRootDir, + TMConfig: cfg.TMConfig, + Genesis: NewMarshalableGenesisDoc(cfg.Genesis), + }) + + nodesManager.Set(sid, &tNodeProcess{NodeProcess: nodep, cfg: cfg}) + + ts.Setenv("RPC_ADDR", nodep.Address()) + fmt.Fprintln(ts.Stdout(), "node started successfully") + + case "restart": + node, exists := nodesManager.Get(sid) + if !exists { + err = fmt.Errorf("node must be started before being restarted") + break + } + + if err := node.Stop(); err != nil { + err = fmt.Errorf("unable to stop the node gracefully: %w", err) + break + } + + ctx, cancel := context.WithTimeout(context.Background(), nodeMaxLifespan) + ts.Defer(cancel) + + priv := ts.Value(envKeyPrivValKey).(ed25519.PrivKeyEd25519) + dbdir := ts.Getenv("GNO_DBDIR") + nodep := setupNode(ts, ctx, &ProcessNodeConfig{ + ValidatorKey: priv, + DBDir: dbdir, + RootDir: gnoRootDir, + TMConfig: node.cfg.TMConfig, + Genesis: NewMarshalableGenesisDoc(node.cfg.Genesis), + }) + + ts.Setenv("RPC_ADDR", nodep.Address()) + nodesManager.Set(sid, &tNodeProcess{NodeProcess: nodep, cfg: node.cfg}) + + fmt.Fprintln(ts.Stdout(), "node restarted successfully") + + case "stop": + node, exists := nodesManager.Get(sid) + if !exists { + err = fmt.Errorf("node not started cannot be stopped") + break + } + + if err := node.Stop(); err != nil { + err = fmt.Errorf("unable to stop the node gracefully: %w", err) + break + } + + fmt.Fprintln(ts.Stdout(), "node stopped successfully") + nodesManager.Delete(sid) + + default: + err = fmt.Errorf("not supported command: %q", cmd) + // XXX: support gnoland other commands + } + + tsValidateError(ts, strings.TrimSpace("gnoland "+cmd), neg, err) + } +} + +func gnokeyCmd(nodes *NodesManager) func(ts *testscript.TestScript, neg bool, args []string) { + return func(ts *testscript.TestScript, neg bool, args []string) { + gnoHomeDir := ts.Getenv("GNOHOME") + + sid := getNodeSID(ts) + + args, err := unquote(args) + if err != nil { + tsValidateError(ts, "gnokey", neg, err) + } + + io := commands.NewTestIO() + io.SetOut(commands.WriteNopCloser(ts.Stdout())) + io.SetErr(commands.WriteNopCloser(ts.Stderr())) + cmd := keyscli.NewRootCmd(io, client.DefaultBaseOptions) + + io.SetIn(strings.NewReader("\n")) + defaultArgs := []string{ + "-home", gnoHomeDir, + "-insecure-password-stdin=true", + } + + if n, ok := nodes.Get(sid); ok { + if raddr := n.Address(); raddr != "" { + defaultArgs = append(defaultArgs, "-remote", raddr) + } + + n.nGnoKeyExec++ + } + + args = append(defaultArgs, args...) + + err = cmd.ParseAndRun(context.Background(), args) + tsValidateError(ts, "gnokey", neg, err) + } +} + +func adduserCmd(nodesManager *NodesManager) func(ts *testscript.TestScript, neg bool, args []string) { + return func(ts *testscript.TestScript, neg bool, args []string) { + gnoHomeDir := ts.Getenv("GNOHOME") + + sid := getNodeSID(ts) + if nodesManager.IsNodeRunning(sid) { + tsValidateError(ts, "adduser", neg, errors.New("adduser must be used before starting node")) + return + } + + if len(args) == 0 { + ts.Fatalf("new user name required") + } + + kb, err := keys.NewKeyBaseFromDir(gnoHomeDir) + if err != nil { + ts.Fatalf("unable to get keybase") + } + + balance, err := createAccount(ts, kb, args[0]) + if err != nil { + ts.Fatalf("error creating account %s: %s", args[0], err) + } + + genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState) + genesis.Balances = append(genesis.Balances, balance) + } +} + +func adduserfromCmd(nodesManager *NodesManager) func(ts *testscript.TestScript, neg bool, args []string) { + return func(ts *testscript.TestScript, neg bool, args []string) { + gnoHomeDir := ts.Getenv("GNOHOME") + + sid := getNodeSID(ts) + if nodesManager.IsNodeRunning(sid) { + tsValidateError(ts, "adduserfrom", neg, errors.New("adduserfrom must be used before starting node")) + return + } + + var account, index uint64 + var err error + + switch len(args) { + case 2: + case 4: + index, err = strconv.ParseUint(args[3], 10, 32) + if err != nil { + ts.Fatalf("invalid index number %s", args[3]) + } + fallthrough + case 3: + account, err = strconv.ParseUint(args[2], 10, 32) + if err != nil { + ts.Fatalf("invalid account number %s", args[2]) + } + default: + ts.Fatalf("to create account from metadatas, user name and mnemonic are required ( account and index are optional )") + } + + kb, err := keys.NewKeyBaseFromDir(gnoHomeDir) + if err != nil { + ts.Fatalf("unable to get keybase") + } + + balance, err := createAccountFrom(ts, kb, args[0], args[1], uint32(account), uint32(index)) + if err != nil { + ts.Fatalf("error creating wallet %s", err) + } + + genesis := ts.Value(envKeyGenesis).(*gnoland.GnoGenesisState) + genesis.Balances = append(genesis.Balances, balance) + + fmt.Fprintf(ts.Stdout(), "Added %s(%s) to genesis", args[0], balance.Address) + } +} + +func patchpkgCmd() func(ts *testscript.TestScript, neg bool, args []string) { + return func(ts *testscript.TestScript, neg bool, args []string) { + args, err := unquote(args) + if err != nil { + tsValidateError(ts, "patchpkg", neg, err) + } + + if len(args) != 2 { + ts.Fatalf("`patchpkg`: should have exactly 2 arguments") + } + + pkgs := ts.Value(envKeyPkgsLoader).(*PkgsLoader) + replace, with := args[0], args[1] + pkgs.SetPatch(replace, with) + } +} + +func loadpkgCmd(gnoRootDir string) func(ts *testscript.TestScript, neg bool, args []string) { + return func(ts *testscript.TestScript, neg bool, args []string) { + workDir := ts.Getenv("WORK") + examplesDir := filepath.Join(gnoRootDir, "examples") + + pkgs := ts.Value(envKeyPkgsLoader).(*PkgsLoader) + + var path, name string + switch len(args) { + case 2: + name = args[0] + path = filepath.Clean(args[1]) + case 1: + path = filepath.Clean(args[0]) + case 0: + ts.Fatalf("`loadpkg`: no arguments specified") + default: + ts.Fatalf("`loadpkg`: too many arguments specified") + } + + if path == "all" { + ts.Logf("warning: loading all packages") + if err := pkgs.LoadAllPackagesFromDir(examplesDir); err != nil { + ts.Fatalf("unable to load packages from %q: %s", examplesDir, err) + } + + return + } + + if !strings.HasPrefix(path, workDir) { + path = filepath.Join(examplesDir, path) + } + + if err := pkgs.LoadPackage(examplesDir, path, name); err != nil { + ts.Fatalf("`loadpkg` unable to load package(s) from %q: %s", args[0], err) + } + + ts.Logf("%q package was added to genesis", args[0]) + } +} + +type tsLogWriter struct { + ts *testscript.TestScript +} + +func (l *tsLogWriter) Write(p []byte) (n int, err error) { + l.ts.Logf(string(p)) + return len(p), nil +} + +func setupNode(ts *testscript.TestScript, ctx context.Context, cfg *ProcessNodeConfig) NodeProcess { + pcfg := ProcessConfig{ + Node: cfg, + Stdout: &tsLogWriter{ts}, + Stderr: ts.Stderr(), + } + + // Setup coverdir provided + if coverdir := ts.Getenv("GOCOVERDIR"); coverdir != "" { + pcfg.CoverDir = coverdir + } + + val := ts.Value(envKeyExecCommand) + + switch cmd := val.(commandkind); cmd { + case commandKindInMemory: + nodep, err := RunInMemoryProcess(ctx, pcfg) + if err != nil { + ts.Fatalf("unable to start in memory node: %s", err) + } + + return nodep + + case commandKindTesting: + if !testing.Testing() { + ts.Fatalf("unable to invoke testing process while not testing") + } + + return runTestingNodeProcess(&testingTS{ts}, ctx, pcfg) + + case commandKindBin: + bin := ts.Value(envKeyExecBin).(string) + nodep, err := RunNodeProcess(ctx, pcfg, bin) + if err != nil { + ts.Fatalf("unable to start process node: %s", err) + } + + return nodep + + default: + ts.Fatalf("unknown command kind: %+v", cmd) + } + + return nil +} + +// `unquote` takes a slice of strings, resulting from splitting a string block by spaces, and +// processes them. The function handles quoted phrases and escape characters within these strings. +func unquote(args []string) ([]string, error) { + const quote = '"' + + parts := []string{} + var inQuote bool + + var part strings.Builder + for _, arg := range args { + var escaped bool + for _, c := range arg { + if escaped { + // If the character is meant to be escaped, it is processed with Unquote. + // We use `Unquote` here for two main reasons: + // 1. It will validate that the escape sequence is correct + // 2. It converts the escaped string to its corresponding raw character. + // For example, "\\t" becomes '\t'. + uc, err := strconv.Unquote(`"\` + string(c) + `"`) + if err != nil { + return nil, fmt.Errorf("unhandled escape sequence `\\%c`: %w", c, err) + } + + part.WriteString(uc) + escaped = false + continue + } + + // If we are inside a quoted string and encounter an escape character, + // flag the next character as `escaped` + if inQuote && c == '\\' { + escaped = true + continue + } + + // Detect quote and toggle inQuote state + if c == quote { + inQuote = !inQuote + continue + } + + // Handle regular character + part.WriteRune(c) + } + + // If we're inside a quote, add a single space. + // It reflects one or multiple spaces between args in the original string. + if inQuote { + part.WriteRune(' ') + continue + } + + // Finalize part, add to parts, and reset for next part + parts = append(parts, part.String()) + part.Reset() + } + + // Check if a quote is left open + if inQuote { + return nil, errors.New("unfinished quote") + } + + return parts, nil +} + +func getNodeSID(ts *testscript.TestScript) string { + return ts.Getenv("SID") +} + +func tsValidateError(ts *testscript.TestScript, cmd string, neg bool, err error) { + if err != nil { + fmt.Fprintf(ts.Stderr(), "%q error: %+v\n", cmd, err) + if !neg { + ts.Fatalf("unexpected %q command failure: %s", cmd, err) + } + } else { + if neg { + ts.Fatalf("unexpected %q command success", cmd) + } + } +} + +type envSetter interface { + Setenv(key, value string) +} + +// createAccount creates a new account with the given name and adds it to the keybase. +func createAccount(env envSetter, kb keys.Keybase, accountName string) (gnoland.Balance, error) { + var balance gnoland.Balance + entropy, err := bip39.NewEntropy(256) + if err != nil { + return balance, fmt.Errorf("error creating entropy: %w", err) + } + + mnemonic, err := bip39.NewMnemonic(entropy) + if err != nil { + return balance, fmt.Errorf("error generating mnemonic: %w", err) + } + + var keyInfo keys.Info + if keyInfo, err = kb.CreateAccount(accountName, mnemonic, "", "", 0, 0); err != nil { + return balance, fmt.Errorf("unable to create account: %w", err) + } + + address := keyInfo.GetAddress() + env.Setenv("USER_SEED_"+accountName, mnemonic) + env.Setenv("USER_ADDR_"+accountName, address.String()) + + return gnoland.Balance{ + Address: address, + Amount: std.Coins{std.NewCoin(ugnot.Denom, 10e6)}, + }, nil +} + +// createAccountFrom creates a new account with the given metadata and adds it to the keybase. +func createAccountFrom(env envSetter, kb keys.Keybase, accountName, mnemonic string, account, index uint32) (gnoland.Balance, error) { + var balance gnoland.Balance + + // check if mnemonic is valid + if !bip39.IsMnemonicValid(mnemonic) { + return balance, fmt.Errorf("invalid mnemonic") + } + + keyInfo, err := kb.CreateAccount(accountName, mnemonic, "", "", account, index) + if err != nil { + return balance, fmt.Errorf("unable to create account: %w", err) + } + + address := keyInfo.GetAddress() + env.Setenv("USER_SEED_"+accountName, mnemonic) + env.Setenv("USER_ADDR_"+accountName, address.String()) + + return gnoland.Balance{ + Address: address, + Amount: std.Coins{std.NewCoin(ugnot.Denom, 10e6)}, + }, nil +} + +func buildGnoland(t *testing.T, rootdir string) string { + t.Helper() + + bin := filepath.Join(t.TempDir(), "gnoland-test") + + t.Log("building gnoland integration binary...") + + // Build a fresh gno binary in a temp directory + gnoArgsBuilder := []string{"build", "-o", bin} + + os.Executable() + + // Forward `-covermode` settings if set + if coverMode := testing.CoverMode(); coverMode != "" { + gnoArgsBuilder = append(gnoArgsBuilder, + "-covermode", coverMode, + ) + } + + // Append the path to the gno command source + gnoArgsBuilder = append(gnoArgsBuilder, filepath.Join(rootdir, + "gno.land", "pkg", "integration", "process")) + + t.Logf("build command: %s", strings.Join(gnoArgsBuilder, " ")) + + cmd := exec.Command("go", gnoArgsBuilder...) + + var buff bytes.Buffer + cmd.Stderr, cmd.Stdout = &buff, &buff + defer buff.Reset() + + if err := cmd.Run(); err != nil { + require.FailNowf(t, "unable to build binary", "%q\n%s", + err.Error(), buff.String()) + } + + return bin +} + +// GeneratePrivKeyFromMnemonic generates a crypto.PrivKey from a mnemonic. +func generatePrivKeyFromMnemonic(mnemonic, bip39Passphrase string, account, index uint32) (crypto.PrivKey, error) { + // Generate Seed from Mnemonic + seed, err := bip39.NewSeedWithErrorChecking(mnemonic, bip39Passphrase) + if err != nil { + return nil, fmt.Errorf("failed to generate seed: %w", err) + } + + // Derive Private Key + coinType := crypto.CoinType // ensure this is set correctly in your context + hdPath := hd.NewFundraiserParams(account, coinType, index) + masterPriv, ch := hd.ComputeMastersFromSeed(seed) + derivedPriv, err := hd.DerivePrivateKeyForPath(masterPriv, ch, hdPath.String()) + if err != nil { + return nil, fmt.Errorf("failed to derive private key: %w", err) + } + + // Convert to secp256k1 private key + privKey := secp256k1.PrivKeySecp256k1(derivedPriv) + return privKey, nil +} diff --git a/gno.land/pkg/integration/integration_test.go b/gno.land/pkg/integration/testscript_gnoland_test.go similarity index 93% rename from gno.land/pkg/integration/integration_test.go rename to gno.land/pkg/integration/testscript_gnoland_test.go index 99a3e6c7eca..2c301064969 100644 --- a/gno.land/pkg/integration/integration_test.go +++ b/gno.land/pkg/integration/testscript_gnoland_test.go @@ -8,12 +8,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestTestdata(t *testing.T) { - t.Parallel() - - RunGnolandTestscripts(t, "testdata") -} - func TestUnquote(t *testing.T) { t.Parallel() diff --git a/gno.land/pkg/integration/testing.go b/gno.land/pkg/integration/testscript_testing.go similarity index 95% rename from gno.land/pkg/integration/testing.go rename to gno.land/pkg/integration/testscript_testing.go index 0cd3152d888..9eed180dd8b 100644 --- a/gno.land/pkg/integration/testing.go +++ b/gno.land/pkg/integration/testscript_testing.go @@ -2,6 +2,7 @@ package integration import ( "errors" + "testing" "github.com/rogpeppe/go-internal/testscript" "github.com/stretchr/testify/assert" @@ -16,6 +17,7 @@ var errFailNow = errors.New("fail now!") //nolint:stylecheck var ( _ require.TestingT = (*testingTS)(nil) _ assert.TestingT = (*testingTS)(nil) + _ TestingTS = &testing.T{} ) type TestingTS = require.TestingT diff --git a/gnovm/cmd/gno/testdata_test.go b/gnovm/cmd/gno/testdata_test.go index 6b1bbd1d459..c5cb0def04e 100644 --- a/gnovm/cmd/gno/testdata_test.go +++ b/gnovm/cmd/gno/testdata_test.go @@ -3,7 +3,6 @@ package main import ( "os" "path/filepath" - "strconv" "testing" "github.com/gnolang/gno/gnovm/pkg/integration" @@ -18,25 +17,23 @@ func Test_Scripts(t *testing.T) { testdirs, err := os.ReadDir(testdata) require.NoError(t, err) + homeDir, buildDir := t.TempDir(), t.TempDir() for _, dir := range testdirs { if !dir.IsDir() { continue } name := dir.Name() + t.Logf("testing: %s", name) t.Run(name, func(t *testing.T) { - updateScripts, _ := strconv.ParseBool(os.Getenv("UPDATE_SCRIPTS")) - p := testscript.Params{ - UpdateScripts: updateScripts, - Dir: filepath.Join(testdata, name), - } - + testdir := filepath.Join(testdata, name) + p := integration.NewTestingParams(t, testdir) if coverdir, ok := integration.ResolveCoverageDir(); ok { err := integration.SetupTestscriptsCoverage(&p, coverdir) require.NoError(t, err) } - err := integration.SetupGno(&p, t.TempDir()) + err := integration.SetupGno(&p, homeDir, buildDir) require.NoError(t, err) testscript.Run(t, p) diff --git a/gnovm/pkg/integration/testscript.go b/gnovm/pkg/integration/testscript.go new file mode 100644 index 00000000000..5b7c5c81a44 --- /dev/null +++ b/gnovm/pkg/integration/testscript.go @@ -0,0 +1,36 @@ +package integration + +import ( + "os" + "strconv" + "testing" + + "github.com/rogpeppe/go-internal/testscript" +) + +// NewTestingParams setup and initialize base params for testing. +func NewTestingParams(t *testing.T, testdir string) testscript.Params { + t.Helper() + + var params testscript.Params + params.Dir = testdir + + params.UpdateScripts, _ = strconv.ParseBool(os.Getenv("UPDATE_SCRIPTS")) + params.TestWork, _ = strconv.ParseBool(os.Getenv("TESTWORK")) + if deadline, ok := t.Deadline(); ok && params.Deadline.IsZero() { + params.Deadline = deadline + } + + // Store the original setup scripts for potential wrapping + params.Setup = func(env *testscript.Env) error { + // Set the UPDATE_SCRIPTS environment variable + env.Setenv("UPDATE_SCRIPTS", strconv.FormatBool(params.UpdateScripts)) + + // Set the environment variable + env.Setenv("TESTWORK", strconv.FormatBool(params.TestWork)) + + return nil + } + + return params +} diff --git a/gnovm/pkg/integration/coverage.go b/gnovm/pkg/integration/testscript_coverage.go similarity index 100% rename from gnovm/pkg/integration/coverage.go rename to gnovm/pkg/integration/testscript_coverage.go diff --git a/gnovm/pkg/integration/gno.go b/gnovm/pkg/integration/testscript_gno.go similarity index 87% rename from gnovm/pkg/integration/gno.go rename to gnovm/pkg/integration/testscript_gno.go index a389b6a9b24..03a3fcd6056 100644 --- a/gnovm/pkg/integration/gno.go +++ b/gnovm/pkg/integration/testscript_gno.go @@ -17,7 +17,7 @@ import ( // If the `gno` binary doesn't exist, it's built using the `go build` command into the specified buildDir. // The function also include the `gno` command into `p.Cmds` to and wrap environment into p.Setup // to correctly set up the environment variables needed for the `gno` command. -func SetupGno(p *testscript.Params, buildDir string) error { +func SetupGno(p *testscript.Params, homeDir, buildDir string) error { // Try to fetch `GNOROOT` from the environment variables gnoroot := gnoenv.RootDir() @@ -62,18 +62,10 @@ func SetupGno(p *testscript.Params, buildDir string) error { // Set the GNOROOT environment variable env.Setenv("GNOROOT", gnoroot) - // Create a temporary home directory because certain commands require access to $HOME/.cache/go-build - home, err := os.MkdirTemp("", "gno") - if err != nil { - return fmt.Errorf("unable to create temporary home directory: %w", err) - } - env.Setenv("HOME", home) + env.Setenv("HOME", homeDir) // Avoids go command printing errors relating to lack of go.mod. env.Setenv("GO111MODULE", "off") - // Cleanup home folder - env.Defer(func() { os.RemoveAll(home) }) - return nil } diff --git a/gnovm/pkg/test/test.go b/gnovm/pkg/test/test.go index 80f56e66d2e..d06540761d7 100644 --- a/gnovm/pkg/test/test.go +++ b/gnovm/pkg/test/test.go @@ -315,7 +315,7 @@ func (opts *TestOptions) runTestFiles( // RunFiles doesn't do that currently) // - Wrap here. m = Machine(gs, opts.Output, memPkg.Path) - m.Alloc = alloc + m.Alloc = alloc.Reset() m.SetActivePackage(pv) testingpv := m.Store.GetPackage("testing", false) diff --git a/gnovm/stdlibs/encoding/base32/base32.gno b/gnovm/stdlibs/encoding/base32/base32.gno new file mode 100644 index 00000000000..eba0a1d539e --- /dev/null +++ b/gnovm/stdlibs/encoding/base32/base32.gno @@ -0,0 +1,590 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package base32 implements base32 encoding as specified by RFC 4648. +package base32 + +import ( + "io" + "strconv" +) + +/* + * Encodings + */ + +// An Encoding is a radix 32 encoding/decoding scheme, defined by a +// 32-character alphabet. The most common is the "base32" encoding +// introduced for SASL GSSAPI and standardized in RFC 4648. +// The alternate "base32hex" encoding is used in DNSSEC. +type Encoding struct { + encode [32]byte // mapping of symbol index to symbol byte value + decodeMap [256]uint8 // mapping of symbol byte value to symbol index + padChar rune +} + +const ( + StdPadding rune = '=' // Standard padding character + NoPadding rune = -1 // No padding +) + +const ( + decodeMapInitialize = "" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + invalidIndex = '\xff' +) + +// NewEncoding returns a new padded Encoding defined by the given alphabet, +// which must be a 32-byte string that contains unique byte values and +// does not contain the padding character or CR / LF ('\r', '\n'). +// The alphabet is treated as a sequence of byte values +// without any special treatment for multi-byte UTF-8. +// The resulting Encoding uses the default padding character ('='), +// which may be changed or disabled via [Encoding.WithPadding]. +func NewEncoding(encoder string) *Encoding { + if len(encoder) != 32 { + panic("encoding alphabet is not 32-bytes long") + } + + e := new(Encoding) + e.padChar = StdPadding + copy(e.encode[:], encoder) + copy(e.decodeMap[:], decodeMapInitialize) + + for i := 0; i < len(encoder); i++ { + // Note: While we document that the alphabet cannot contain + // the padding character, we do not enforce it since we do not know + // if the caller intends to switch the padding from StdPadding later. + switch { + case encoder[i] == '\n' || encoder[i] == '\r': + panic("encoding alphabet contains newline character") + case e.decodeMap[encoder[i]] != invalidIndex: + panic("encoding alphabet includes duplicate symbols") + } + e.decodeMap[encoder[i]] = uint8(i) + } + return e +} + +// StdEncoding is the standard base32 encoding, as defined in RFC 4648. +var StdEncoding = NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZ234567") + +// HexEncoding is the “Extended Hex Alphabet” defined in RFC 4648. +// It is typically used in DNS. +var HexEncoding = NewEncoding("0123456789ABCDEFGHIJKLMNOPQRSTUV") + +// WithPadding creates a new encoding identical to enc except +// with a specified padding character, or NoPadding to disable padding. +// The padding character must not be '\r' or '\n', +// must not be contained in the encoding's alphabet, +// must not be negative, and must be a rune equal or below '\xff'. +// Padding characters above '\x7f' are encoded as their exact byte value +// rather than using the UTF-8 representation of the codepoint. +func (enc Encoding) WithPadding(padding rune) *Encoding { + switch { + case padding < NoPadding || padding == '\r' || padding == '\n' || padding > 0xff: + panic("invalid padding") + case padding != NoPadding && enc.decodeMap[byte(padding)] != invalidIndex: + panic("padding contained in alphabet") + } + enc.padChar = padding + return &enc +} + +/* + * Encoder + */ + +// Encode encodes src using the encoding enc, +// writing [Encoding.EncodedLen](len(src)) bytes to dst. +// +// The encoding pads the output to a multiple of 8 bytes, +// so Encode is not appropriate for use on individual blocks +// of a large data stream. Use [NewEncoder] instead. +func (enc *Encoding) Encode(dst, src []byte) { + if len(src) == 0 { + return + } + // enc is a pointer receiver, so the use of enc.encode within the hot + // loop below means a nil check at every operation. Lift that nil check + // outside of the loop to speed up the encoder. + _ = enc.encode + + di, si := 0, 0 + n := (len(src) / 5) * 5 + for si < n { + // Combining two 32 bit loads allows the same code to be used + // for 32 and 64 bit platforms. + hi := uint32(src[si+0])<<24 | uint32(src[si+1])<<16 | uint32(src[si+2])<<8 | uint32(src[si+3]) + lo := hi<<8 | uint32(src[si+4]) + + dst[di+0] = enc.encode[(hi>>27)&0x1F] + dst[di+1] = enc.encode[(hi>>22)&0x1F] + dst[di+2] = enc.encode[(hi>>17)&0x1F] + dst[di+3] = enc.encode[(hi>>12)&0x1F] + dst[di+4] = enc.encode[(hi>>7)&0x1F] + dst[di+5] = enc.encode[(hi>>2)&0x1F] + dst[di+6] = enc.encode[(lo>>5)&0x1F] + dst[di+7] = enc.encode[(lo)&0x1F] + + si += 5 + di += 8 + } + + // Add the remaining small block + remain := len(src) - si + if remain == 0 { + return + } + + // Encode the remaining bytes in reverse order. + val := uint32(0) + switch remain { + case 4: + val |= uint32(src[si+3]) + dst[di+6] = enc.encode[val<<3&0x1F] + dst[di+5] = enc.encode[val>>2&0x1F] + fallthrough + case 3: + val |= uint32(src[si+2]) << 8 + dst[di+4] = enc.encode[val>>7&0x1F] + fallthrough + case 2: + val |= uint32(src[si+1]) << 16 + dst[di+3] = enc.encode[val>>12&0x1F] + dst[di+2] = enc.encode[val>>17&0x1F] + fallthrough + case 1: + val |= uint32(src[si+0]) << 24 + dst[di+1] = enc.encode[val>>22&0x1F] + dst[di+0] = enc.encode[val>>27&0x1F] + } + + // Pad the final quantum + if enc.padChar != NoPadding { + nPad := (remain * 8 / 5) + 1 + for i := nPad; i < 8; i++ { + dst[di+i] = byte(enc.padChar) + } + } +} + +// AppendEncode appends the base32 encoded src to dst +// and returns the extended buffer. +func (enc *Encoding) AppendEncode(dst, src []byte) []byte { + n := enc.EncodedLen(len(src)) + dst = slicesGrow(dst, n) + enc.Encode(dst[len(dst):][:n], src) + return dst[:len(dst)+n] +} + +// XXX: non-generic slices.Grow +func slicesGrow(s []byte, n int) []byte { + if n -= cap(s) - len(s); n > 0 { + s = append(s[:cap(s)], make([]byte, n)...)[:len(s)] + } + return s +} + +// EncodeToString returns the base32 encoding of src. +func (enc *Encoding) EncodeToString(src []byte) string { + buf := make([]byte, enc.EncodedLen(len(src))) + enc.Encode(buf, src) + return string(buf) +} + +type encoder struct { + err error + enc *Encoding + w io.Writer + buf [5]byte // buffered data waiting to be encoded + nbuf int // number of bytes in buf + out [1024]byte // output buffer +} + +func (e *encoder) Write(p []byte) (n int, err error) { + if e.err != nil { + return 0, e.err + } + + // Leading fringe. + if e.nbuf > 0 { + var i int + for i = 0; i < len(p) && e.nbuf < 5; i++ { + e.buf[e.nbuf] = p[i] + e.nbuf++ + } + n += i + p = p[i:] + if e.nbuf < 5 { + return + } + e.enc.Encode(e.out[0:], e.buf[0:]) + if _, e.err = e.w.Write(e.out[0:8]); e.err != nil { + return n, e.err + } + e.nbuf = 0 + } + + // Large interior chunks. + for len(p) >= 5 { + nn := len(e.out) / 8 * 5 + if nn > len(p) { + nn = len(p) + nn -= nn % 5 + } + e.enc.Encode(e.out[0:], p[0:nn]) + if _, e.err = e.w.Write(e.out[0 : nn/5*8]); e.err != nil { + return n, e.err + } + n += nn + p = p[nn:] + } + + // Trailing fringe. + copy(e.buf[:], p) + e.nbuf = len(p) + n += len(p) + return +} + +// Close flushes any pending output from the encoder. +// It is an error to call Write after calling Close. +func (e *encoder) Close() error { + // If there's anything left in the buffer, flush it out + if e.err == nil && e.nbuf > 0 { + e.enc.Encode(e.out[0:], e.buf[0:e.nbuf]) + encodedLen := e.enc.EncodedLen(e.nbuf) + e.nbuf = 0 + _, e.err = e.w.Write(e.out[0:encodedLen]) + } + return e.err +} + +// NewEncoder returns a new base32 stream encoder. Data written to +// the returned writer will be encoded using enc and then written to w. +// Base32 encodings operate in 5-byte blocks; when finished +// writing, the caller must Close the returned encoder to flush any +// partially written blocks. +func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser { + return &encoder{enc: enc, w: w} +} + +// EncodedLen returns the length in bytes of the base32 encoding +// of an input buffer of length n. +func (enc *Encoding) EncodedLen(n int) int { + if enc.padChar == NoPadding { + return n/5*8 + (n%5*8+4)/5 + } + return (n + 4) / 5 * 8 +} + +/* + * Decoder + */ + +type CorruptInputError int64 + +func (e CorruptInputError) Error() string { + return "illegal base32 data at input byte " + strconv.FormatInt(int64(e), 10) +} + +// decode is like Decode but returns an additional 'end' value, which +// indicates if end-of-message padding was encountered and thus any +// additional data is an error. This method assumes that src has been +// stripped of all supported whitespace ('\r' and '\n'). +func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) { + // Lift the nil check outside of the loop. + _ = enc.decodeMap + + dsti := 0 + olen := len(src) + + for len(src) > 0 && !end { + // Decode quantum using the base32 alphabet + var dbuf [8]byte + dlen := 8 + + for j := 0; j < 8; { + + if len(src) == 0 { + if enc.padChar != NoPadding { + // We have reached the end and are missing padding + return n, false, CorruptInputError(olen - len(src) - j) + } + // We have reached the end and are not expecting any padding + dlen, end = j, true + break + } + in := src[0] + src = src[1:] + if in == byte(enc.padChar) && j >= 2 && len(src) < 8 { + // We've reached the end and there's padding + if len(src)+j < 8-1 { + // not enough padding + return n, false, CorruptInputError(olen) + } + for k := 0; k < 8-1-j; k++ { + if len(src) > k && src[k] != byte(enc.padChar) { + // incorrect padding + return n, false, CorruptInputError(olen - len(src) + k - 1) + } + } + dlen, end = j, true + // 7, 5 and 2 are not valid padding lengths, and so 1, 3 and 6 are not + // valid dlen values. See RFC 4648 Section 6 "Base 32 Encoding" listing + // the five valid padding lengths, and Section 9 "Illustrations and + // Examples" for an illustration for how the 1st, 3rd and 6th base32 + // src bytes do not yield enough information to decode a dst byte. + if dlen == 1 || dlen == 3 || dlen == 6 { + return n, false, CorruptInputError(olen - len(src) - 1) + } + break + } + dbuf[j] = enc.decodeMap[in] + if dbuf[j] == 0xFF { + return n, false, CorruptInputError(olen - len(src) - 1) + } + j++ + } + + // Pack 8x 5-bit source blocks into 5 byte destination + // quantum + switch dlen { + case 8: + dst[dsti+4] = dbuf[6]<<5 | dbuf[7] + n++ + fallthrough + case 7: + dst[dsti+3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3 + n++ + fallthrough + case 5: + dst[dsti+2] = dbuf[3]<<4 | dbuf[4]>>1 + n++ + fallthrough + case 4: + dst[dsti+1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4 + n++ + fallthrough + case 2: + dst[dsti+0] = dbuf[0]<<3 | dbuf[1]>>2 + n++ + } + dsti += 5 + } + return n, end, nil +} + +// Decode decodes src using the encoding enc. It writes at most +// [Encoding.DecodedLen](len(src)) bytes to dst and returns the number of bytes +// written. If src contains invalid base32 data, it will return the +// number of bytes successfully written and [CorruptInputError]. +// Newline characters (\r and \n) are ignored. +func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { + buf := make([]byte, len(src)) + l := stripNewlines(buf, src) + n, _, err = enc.decode(dst, buf[:l]) + return +} + +// AppendDecode appends the base32 decoded src to dst +// and returns the extended buffer. +// If the input is malformed, it returns the partially decoded src and an error. +func (enc *Encoding) AppendDecode(dst, src []byte) ([]byte, error) { + // Compute the output size without padding to avoid over allocating. + n := len(src) + for n > 0 && rune(src[n-1]) == enc.padChar { + n-- + } + n = decodedLen(n, NoPadding) + + dst = slicesGrow(dst, n) + n, err := enc.Decode(dst[len(dst):][:n], src) + return dst[:len(dst)+n], err +} + +// DecodeString returns the bytes represented by the base32 string s. +func (enc *Encoding) DecodeString(s string) ([]byte, error) { + buf := []byte(s) + l := stripNewlines(buf, buf) + n, _, err := enc.decode(buf, buf[:l]) + return buf[:n], err +} + +type decoder struct { + err error + enc *Encoding + r io.Reader + end bool // saw end of message + buf [1024]byte // leftover input + nbuf int + out []byte // leftover decoded output + outbuf [1024 / 8 * 5]byte +} + +func readEncodedData(r io.Reader, buf []byte, min int, expectsPadding bool) (n int, err error) { + for n < min && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + // data was read, less than min bytes could be read + if n < min && n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + // no data was read, the buffer already contains some data + // when padding is disabled this is not an error, as the message can be of + // any length + if expectsPadding && min < 8 && n == 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +func (d *decoder) Read(p []byte) (n int, err error) { + // Use leftover decoded output from last read. + if len(d.out) > 0 { + n = copy(p, d.out) + d.out = d.out[n:] + if len(d.out) == 0 { + return n, d.err + } + return n, nil + } + + if d.err != nil { + return 0, d.err + } + + // Read a chunk. + nn := (len(p) + 4) / 5 * 8 + if nn < 8 { + nn = 8 + } + if nn > len(d.buf) { + nn = len(d.buf) + } + + // Minimum amount of bytes that needs to be read each cycle + var min int + var expectsPadding bool + if d.enc.padChar == NoPadding { + min = 1 + expectsPadding = false + } else { + min = 8 - d.nbuf + expectsPadding = true + } + + nn, d.err = readEncodedData(d.r, d.buf[d.nbuf:nn], min, expectsPadding) + d.nbuf += nn + if d.nbuf < min { + return 0, d.err + } + if nn > 0 && d.end { + return 0, CorruptInputError(0) + } + + // Decode chunk into p, or d.out and then p if p is too small. + var nr int + if d.enc.padChar == NoPadding { + nr = d.nbuf + } else { + nr = d.nbuf / 8 * 8 + } + nw := d.enc.DecodedLen(d.nbuf) + + if nw > len(p) { + nw, d.end, err = d.enc.decode(d.outbuf[0:], d.buf[0:nr]) + d.out = d.outbuf[0:nw] + n = copy(p, d.out) + d.out = d.out[n:] + } else { + n, d.end, err = d.enc.decode(p, d.buf[0:nr]) + } + d.nbuf -= nr + for i := 0; i < d.nbuf; i++ { + d.buf[i] = d.buf[i+nr] + } + + if err != nil && (d.err == nil || d.err == io.EOF) { + d.err = err + } + + if len(d.out) > 0 { + // We cannot return all the decoded bytes to the caller in this + // invocation of Read, so we return a nil error to ensure that Read + // will be called again. The error stored in d.err, if any, will be + // returned with the last set of decoded bytes. + return n, nil + } + + return n, d.err +} + +type newlineFilteringReader struct { + wrapped io.Reader +} + +// stripNewlines removes newline characters and returns the number +// of non-newline characters copied to dst. +func stripNewlines(dst, src []byte) int { + offset := 0 + for _, b := range src { + if b == '\r' || b == '\n' { + continue + } + dst[offset] = b + offset++ + } + return offset +} + +func (r *newlineFilteringReader) Read(p []byte) (int, error) { + n, err := r.wrapped.Read(p) + for n > 0 { + s := p[0:n] + offset := stripNewlines(s, s) + if err != nil || offset > 0 { + return offset, err + } + // Previous buffer entirely whitespace, read again + n, err = r.wrapped.Read(p) + } + return n, err +} + +// NewDecoder constructs a new base32 stream decoder. +func NewDecoder(enc *Encoding, r io.Reader) io.Reader { + return &decoder{enc: enc, r: &newlineFilteringReader{r}} +} + +// DecodedLen returns the maximum length in bytes of the decoded data +// corresponding to n bytes of base32-encoded data. +func (enc *Encoding) DecodedLen(n int) int { + return decodedLen(n, enc.padChar) +} + +func decodedLen(n int, padChar rune) int { + if padChar == NoPadding { + return n/8*5 + n%8*5/8 + } + return n / 8 * 5 +} diff --git a/gnovm/stdlibs/encoding/base32/base32_test.gno b/gnovm/stdlibs/encoding/base32/base32_test.gno new file mode 100644 index 00000000000..fa09c2cd2f3 --- /dev/null +++ b/gnovm/stdlibs/encoding/base32/base32_test.gno @@ -0,0 +1,913 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base32 + +import ( + "bytes" + "errors" + "io" + "math" + "strconv" + "strings" + "testing" +) + +type testpair struct { + decoded, encoded string +} + +var pairs = []testpair{ + // RFC 4648 examples + {"", ""}, + {"f", "MY======"}, + {"fo", "MZXQ===="}, + {"foo", "MZXW6==="}, + {"foob", "MZXW6YQ="}, + {"fooba", "MZXW6YTB"}, + {"foobar", "MZXW6YTBOI======"}, + + // Wikipedia examples, converted to base32 + {"sure.", "ON2XEZJO"}, + {"sure", "ON2XEZI="}, + {"sur", "ON2XE==="}, + {"su", "ON2Q===="}, + {"leasure.", "NRSWC43VOJSS4==="}, + {"easure.", "MVQXG5LSMUXA===="}, + {"asure.", "MFZXK4TFFY======"}, + {"sure.", "ON2XEZJO"}, +} + +var bigtest = testpair{ + "Twas brillig, and the slithy toves", + "KR3WC4ZAMJZGS3DMNFTSYIDBNZSCA5DIMUQHG3DJORUHSIDUN53GK4Y=", +} + +func testEqual(t *testing.T, msg string, args ...interface{}) bool { + t.Helper() + if args[len(args)-2] != args[len(args)-1] { + t.Errorf(msg, args...) + return false + } + return true +} + +func TestEncode(t *testing.T) { + for _, p := range pairs { + got := StdEncoding.EncodeToString([]byte(p.decoded)) + testEqual(t, "Encode(%q) = %q, want %q", p.decoded, got, p.encoded) + dst := StdEncoding.AppendEncode([]byte("lead"), []byte(p.decoded)) + testEqual(t, `AppendEncode("lead", %q) = %q, want %q`, p.decoded, string(dst), "lead"+p.encoded) + } +} + +func TestEncoder(t *testing.T) { + for _, p := range pairs { + bb := &strings.Builder{} + encoder := NewEncoder(StdEncoding, bb) + encoder.Write([]byte(p.decoded)) + encoder.Close() + testEqual(t, "Encode(%q) = %q, want %q", p.decoded, bb.String(), p.encoded) + } +} + +func TestEncoderBuffering(t *testing.T) { + input := []byte(bigtest.decoded) + for bs := 1; bs <= 12; bs++ { + bb := &strings.Builder{} + encoder := NewEncoder(StdEncoding, bb) + for pos := 0; pos < len(input); pos += bs { + end := pos + bs + if end > len(input) { + end = len(input) + } + n, err := encoder.Write(input[pos:end]) + testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, error(nil)) + testEqual(t, "Write(%q) gave length %v, want %v", input[pos:end], n, end-pos) + } + err := encoder.Close() + testEqual(t, "Close gave error %v, want %v", err, error(nil)) + testEqual(t, "Encoding/%d of %q = %q, want %q", bs, bigtest.decoded, bb.String(), bigtest.encoded) + } +} + +func TestDecoderBufferingWithPadding(t *testing.T) { + for bs := 0; bs <= 12; bs++ { + for _, s := range pairs { + decoder := NewDecoder(StdEncoding, strings.NewReader(s.encoded)) + buf := make([]byte, len(s.decoded)+bs) + + var n int + var err error + n, err = decoder.Read(buf) + + if err != nil && err != io.EOF { + t.Errorf("Read from %q at pos %d = %d, unexpected error %v", s.encoded, len(s.decoded), n, err) + } + testEqual(t, "Decoding/%d of %q = %q, want %q\n", bs, s.encoded, string(buf[:n]), s.decoded) + } + } +} + +func TestDecoderBufferingWithoutPadding(t *testing.T) { + for bs := 0; bs <= 12; bs++ { + for _, s := range pairs { + encoded := strings.TrimRight(s.encoded, "=") + decoder := NewDecoder(StdEncoding.WithPadding(NoPadding), strings.NewReader(encoded)) + buf := make([]byte, len(s.decoded)+bs) + + var n int + var err error + n, err = decoder.Read(buf) + + if err != nil && err != io.EOF { + t.Errorf("Read from %q at pos %d = %d, unexpected error %v", encoded, len(s.decoded), n, err) + } + testEqual(t, "Decoding/%d of %q = %q, want %q\n", bs, encoded, string(buf[:n]), s.decoded) + } + } +} + +func TestDecode(t *testing.T) { + for _, p := range pairs { + dbuf := make([]byte, StdEncoding.DecodedLen(len(p.encoded))) + count, end, err := StdEncoding.decode(dbuf, []byte(p.encoded)) + testEqual(t, "Decode(%q) = error %v, want %v", p.encoded, err, error(nil)) + testEqual(t, "Decode(%q) = length %v, want %v", p.encoded, count, len(p.decoded)) + if len(p.encoded) > 0 { + testEqual(t, "Decode(%q) = end %v, want %v", p.encoded, end, (p.encoded[len(p.encoded)-1] == '=')) + } + testEqual(t, "Decode(%q) = %q, want %q", p.encoded, string(dbuf[0:count]), p.decoded) + + dbuf, err = StdEncoding.DecodeString(p.encoded) + testEqual(t, "DecodeString(%q) = error %v, want %v", p.encoded, err, error(nil)) + testEqual(t, "DecodeString(%q) = %q, want %q", p.encoded, string(dbuf), p.decoded) + + dst, err := StdEncoding.AppendDecode([]byte("lead"), []byte(p.encoded)) + testEqual(t, "AppendDecode(%q) = error %v, want %v", p.encoded, err, error(nil)) + testEqual(t, `AppendDecode("lead", %q) = %q, want %q`, p.encoded, string(dst), "lead"+p.decoded) + + dst2, err := StdEncoding.AppendDecode(dst[:0:len(p.decoded)], []byte(p.encoded)) + testEqual(t, "AppendDecode(%q) = error %v, want %v", p.encoded, err, error(nil)) + testEqual(t, `AppendDecode("", %q) = %q, want %q`, p.encoded, string(dst2), p.decoded) + if len(dst) > 0 && len(dst2) > 0 && &dst[0] != &dst2[0] { + t.Errorf("unexpected capacity growth: got %d, want %d", cap(dst2), cap(dst)) + } + } +} + +func TestDecoder(t *testing.T) { + for _, p := range pairs { + decoder := NewDecoder(StdEncoding, strings.NewReader(p.encoded)) + dbuf := make([]byte, StdEncoding.DecodedLen(len(p.encoded))) + count, err := decoder.Read(dbuf) + if err != nil && err != io.EOF { + t.Fatal("Read failed", err) + } + testEqual(t, "Read from %q = length %v, want %v", p.encoded, count, len(p.decoded)) + testEqual(t, "Decoding of %q = %q, want %q", p.encoded, string(dbuf[0:count]), p.decoded) + if err != io.EOF { + _, err = decoder.Read(dbuf) + } + testEqual(t, "Read from %q = %v, want %v", p.encoded, err, io.EOF) + } +} + +type badReader struct { + data []byte + errs []error + called int + limit int +} + +// Populates p with data, returns a count of the bytes written and an +// error. The error returned is taken from badReader.errs, with each +// invocation of Read returning the next error in this slice, or io.EOF, +// if all errors from the slice have already been returned. The +// number of bytes returned is determined by the size of the input buffer +// the test passes to decoder.Read and will be a multiple of 8, unless +// badReader.limit is non zero. +func (b *badReader) Read(p []byte) (int, error) { + lim := len(p) + if b.limit != 0 && b.limit < lim { + lim = b.limit + } + if len(b.data) < lim { + lim = len(b.data) + } + for i := range p[:lim] { + p[i] = b.data[i] + } + b.data = b.data[lim:] + err := io.EOF + if b.called < len(b.errs) { + err = b.errs[b.called] + } + b.called++ + return lim, err +} + +// TestIssue20044 tests that decoder.Read behaves correctly when the caller +// supplied reader returns an error. +func TestIssue20044(t *testing.T) { + badErr := errors.New("bad reader error") + testCases := []struct { + r badReader + res string + err error + dbuflen int + }{ + // Check valid input data accompanied by an error is processed and the error is propagated. + {r: badReader{data: []byte("MY======"), errs: []error{badErr}}, + res: "f", err: badErr}, + // Check a read error accompanied by input data consisting of newlines only is propagated. + {r: badReader{data: []byte("\n\n\n\n\n\n\n\n"), errs: []error{badErr, nil}}, + res: "", err: badErr}, + // Reader will be called twice. The first time it will return 8 newline characters. The + // second time valid base32 encoded data and an error. The data should be decoded + // correctly and the error should be propagated. + {r: badReader{data: []byte("\n\n\n\n\n\n\n\nMY======"), errs: []error{nil, badErr}}, + res: "f", err: badErr, dbuflen: 8}, + // Reader returns invalid input data (too short) and an error. Verify the reader + // error is returned. + {r: badReader{data: []byte("MY====="), errs: []error{badErr}}, + res: "", err: badErr}, + // Reader returns invalid input data (too short) but no error. Verify io.ErrUnexpectedEOF + // is returned. + {r: badReader{data: []byte("MY====="), errs: []error{nil}}, + res: "", err: io.ErrUnexpectedEOF}, + // Reader returns invalid input data and an error. Verify the reader and not the + // decoder error is returned. + {r: badReader{data: []byte("Ma======"), errs: []error{badErr}}, + res: "", err: badErr}, + // Reader returns valid data and io.EOF. Check data is decoded and io.EOF is propagated. + {r: badReader{data: []byte("MZXW6YTB"), errs: []error{io.EOF}}, + res: "fooba", err: io.EOF}, + // Check errors are properly reported when decoder.Read is called multiple times. + // decoder.Read will be called 8 times, badReader.Read will be called twice, returning + // valid data both times but an error on the second call. + {r: badReader{data: []byte("NRSWC43VOJSS4==="), errs: []error{nil, badErr}}, + res: "leasure.", err: badErr, dbuflen: 1}, + // Check io.EOF is properly reported when decoder.Read is called multiple times. + // decoder.Read will be called 8 times, badReader.Read will be called twice, returning + // valid data both times but io.EOF on the second call. + {r: badReader{data: []byte("NRSWC43VOJSS4==="), errs: []error{nil, io.EOF}}, + res: "leasure.", err: io.EOF, dbuflen: 1}, + // The following two test cases check that errors are propagated correctly when more than + // 8 bytes are read at a time. + {r: badReader{data: []byte("NRSWC43VOJSS4==="), errs: []error{io.EOF}}, + res: "leasure.", err: io.EOF, dbuflen: 11}, + {r: badReader{data: []byte("NRSWC43VOJSS4==="), errs: []error{badErr}}, + res: "leasure.", err: badErr, dbuflen: 11}, + // Check that errors are correctly propagated when the reader returns valid bytes in + // groups that are not divisible by 8. The first read will return 11 bytes and no + // error. The second will return 7 and an error. The data should be decoded correctly + // and the error should be propagated. + {r: badReader{data: []byte("NRSWC43VOJSS4==="), errs: []error{nil, badErr}, limit: 11}, + res: "leasure.", err: badErr}, + } + + for _, tc := range testCases { + input := tc.r.data + decoder := NewDecoder(StdEncoding, &tc.r) + var dbuflen int + if tc.dbuflen > 0 { + dbuflen = tc.dbuflen + } else { + dbuflen = StdEncoding.DecodedLen(len(input)) + } + dbuf := make([]byte, dbuflen) + var err error + var res []byte + for err == nil { + var n int + n, err = decoder.Read(dbuf) + if n > 0 { + res = append(res, dbuf[:n]...) + } + } + + testEqual(t, "Decoding of %q = %q, want %q", string(input), string(res), tc.res) + testEqual(t, "Decoding of %q err = %v, expected %v", string(input), err, tc.err) + } +} + +// TestDecoderError verifies decode errors are propagated when there are no read +// errors. +func TestDecoderError(t *testing.T) { + for _, readErr := range []error{io.EOF, nil} { + input := "MZXW6YTb" + dbuf := make([]byte, StdEncoding.DecodedLen(len(input))) + br := badReader{data: []byte(input), errs: []error{readErr}} + decoder := NewDecoder(StdEncoding, &br) + n, err := decoder.Read(dbuf) + testEqual(t, "Read after EOF, n = %d, expected %d", n, 0) + if _, ok := err.(CorruptInputError); !ok { + t.Errorf("Corrupt input error expected. Found %T", err) + } + } +} + +// TestReaderEOF ensures decoder.Read behaves correctly when input data is +// exhausted. +func TestReaderEOF(t *testing.T) { + for _, readErr := range []error{io.EOF, nil} { + input := "MZXW6YTB" + br := badReader{data: []byte(input), errs: []error{nil, readErr}} + decoder := NewDecoder(StdEncoding, &br) + dbuf := make([]byte, StdEncoding.DecodedLen(len(input))) + n, err := decoder.Read(dbuf) + testEqual(t, "Decoding of %q err = %v, expected %v", input, err, error(nil)) + n, err = decoder.Read(dbuf) + testEqual(t, "Read after EOF, n = %d, expected %d", n, 0) + testEqual(t, "Read after EOF, err = %v, expected %v", err, io.EOF) + n, err = decoder.Read(dbuf) + testEqual(t, "Read after EOF, n = %d, expected %d", n, 0) + testEqual(t, "Read after EOF, err = %v, expected %v", err, io.EOF) + } +} + +func TestDecoderBuffering(t *testing.T) { + for bs := 1; bs <= 12; bs++ { + decoder := NewDecoder(StdEncoding, strings.NewReader(bigtest.encoded)) + buf := make([]byte, len(bigtest.decoded)+12) + var total int + var n int + var err error + for total = 0; total < len(bigtest.decoded) && err == nil; { + n, err = decoder.Read(buf[total : total+bs]) + total += n + } + if err != nil && err != io.EOF { + t.Errorf("Read from %q at pos %d = %d, unexpected error %v", bigtest.encoded, total, n, err) + } + testEqual(t, "Decoding/%d of %q = %q, want %q", bs, bigtest.encoded, string(buf[0:total]), bigtest.decoded) + } +} + +func TestDecodeCorrupt(t *testing.T) { + testCases := []struct { + input string + offset int // -1 means no corruption. + }{ + {"", -1}, + {"!!!!", 0}, + {"x===", 0}, + {"AA=A====", 2}, + {"AAA=AAAA", 3}, + {"MMMMMMMMM", 8}, + {"MMMMMM", 0}, + {"A=", 1}, + {"AA=", 3}, + {"AA==", 4}, + {"AA===", 5}, + {"AAAA=", 5}, + {"AAAA==", 6}, + {"AAAAA=", 6}, + {"AAAAA==", 7}, + {"A=======", 1}, + {"AA======", -1}, + {"AAA=====", 3}, + {"AAAA====", -1}, + {"AAAAA===", -1}, + {"AAAAAA==", 6}, + {"AAAAAAA=", -1}, + {"AAAAAAAA", -1}, + } + for _, tc := range testCases { + dbuf := make([]byte, StdEncoding.DecodedLen(len(tc.input))) + _, err := StdEncoding.Decode(dbuf, []byte(tc.input)) + if tc.offset == -1 { + if err != nil { + t.Error("Decoder wrongly detected corruption in", tc.input) + } + continue + } + switch err := err.(type) { + case CorruptInputError: + testEqual(t, "Corruption in %q at offset %v, want %v", tc.input, int(err), tc.offset) + default: + t.Error("Decoder failed to detect corruption in", tc) + } + } +} + +func TestBig(t *testing.T) { + n := 3*1000 + 1 + raw := make([]byte, n) + const alpha = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + for i := 0; i < n; i++ { + raw[i] = alpha[i%len(alpha)] + } + encoded := new(bytes.Buffer) + w := NewEncoder(StdEncoding, encoded) + nn, err := w.Write(raw) + if nn != n || err != nil { + t.Fatalf("Encoder.Write(raw) = %d, %v want %d, nil", nn, err, n) + } + err = w.Close() + if err != nil { + t.Fatalf("Encoder.Close() = %v want nil", err) + } + decoded, err := io.ReadAll(NewDecoder(StdEncoding, encoded)) + if err != nil { + t.Fatalf("io.ReadAll(NewDecoder(...)): %v", err) + } + + if !bytes.Equal(raw, decoded) { + var i int + for i = 0; i < len(decoded) && i < len(raw); i++ { + if decoded[i] != raw[i] { + break + } + } + t.Errorf("Decode(Encode(%d-byte string)) failed at offset %d", n, i) + } +} + +func testStringEncoding(t *testing.T, expected string, examples []string) { + for _, e := range examples { + buf, err := StdEncoding.DecodeString(e) + if err != nil { + t.Errorf("Decode(%q) failed: %v", e, err) + continue + } + if s := string(buf); s != expected { + t.Errorf("Decode(%q) = %q, want %q", e, s, expected) + } + } +} + +func TestNewLineCharacters(t *testing.T) { + // Each of these should decode to the string "sure", without errors. + examples := []string{ + "ON2XEZI=", + "ON2XEZI=\r", + "ON2XEZI=\n", + "ON2XEZI=\r\n", + "ON2XEZ\r\nI=", + "ON2X\rEZ\nI=", + "ON2X\nEZ\rI=", + "ON2XEZ\nI=", + "ON2XEZI\n=", + } + testStringEncoding(t, "sure", examples) + + // Each of these should decode to the string "foobar", without errors. + examples = []string{ + "MZXW6YTBOI======", + "MZXW6YTBOI=\r\n=====", + } + testStringEncoding(t, "foobar", examples) +} + +func TestDecoderIssue4779(t *testing.T) { + encoded := `JRXXEZLNEBUXA43VNUQGI33MN5ZCA43JOQQGC3LFOQWCAY3PNZZWKY3UMV2HK4 +RAMFSGS4DJONUWG2LOM4QGK3DJOQWCA43FMQQGI3YKMVUXK43NN5SCA5DFNVYG64RANFXGG2LENFSH +K3TUEB2XIIDMMFRG64TFEBSXIIDEN5WG64TFEBWWCZ3OMEQGC3DJOF2WCLRAKV2CAZLONFWQUYLEEB +WWS3TJNUQHMZLONFQW2LBAOF2WS4ZANZXXG5DSOVSCAZLYMVZGG2LUMF2GS33OEB2WY3DBNVRW6IDM +MFRG64TJOMQG42LTNEQHK5AKMFWGS4LVNFYCAZLYEBSWCIDDN5WW233EN4QGG33OONSXC5LBOQXCAR +DVNFZSAYLVORSSA2LSOVZGKIDEN5WG64RANFXAU4TFOBZGK2DFNZSGK4TJOQQGS3RAOZXWY5LQORQX +IZJAOZSWY2LUEBSXG43FEBRWS3DMOVWSAZDPNRXXEZJAMV2SAZTVM5UWC5BANZ2WY3DBBJYGC4TJMF +2HK4ROEBCXQY3FOB2GK5LSEBZWS3TUEBXWGY3BMVRWC5BAMN2XA2LEMF2GC5BANZXW4IDQOJXWSZDF +NZ2CYIDTOVXHIIDJNYFGG5LMOBQSA4LVNEQG6ZTGNFRWSYJAMRSXGZLSOVXHIIDNN5WGY2LUEBQW42 +LNEBUWIIDFON2CA3DBMJXXE5LNFY== +====` + encodedShort := strings.ReplaceAll(encoded, "\n", "") + + dec := NewDecoder(StdEncoding, strings.NewReader(encoded)) + res1, err := io.ReadAll(dec) + if err != nil { + t.Errorf("ReadAll failed: %v", err) + } + + dec = NewDecoder(StdEncoding, strings.NewReader(encodedShort)) + var res2 []byte + res2, err = io.ReadAll(dec) + if err != nil { + t.Errorf("ReadAll failed: %v", err) + } + + if !bytes.Equal(res1, res2) { + t.Error("Decoded results not equal") + } +} + +func BenchmarkEncode(b *testing.B) { + data := make([]byte, 8192) + buf := make([]byte, StdEncoding.EncodedLen(len(data))) + b.SetBytes(int64(len(data))) + for i := 0; i < b.N; i++ { + StdEncoding.Encode(buf, data) + } +} + +func BenchmarkEncodeToString(b *testing.B) { + data := make([]byte, 8192) + b.SetBytes(int64(len(data))) + for i := 0; i < b.N; i++ { + StdEncoding.EncodeToString(data) + } +} + +func BenchmarkDecode(b *testing.B) { + data := make([]byte, StdEncoding.EncodedLen(8192)) + StdEncoding.Encode(data, make([]byte, 8192)) + buf := make([]byte, 8192) + b.SetBytes(int64(len(data))) + for i := 0; i < b.N; i++ { + StdEncoding.Decode(buf, data) + } +} + +func BenchmarkDecodeString(b *testing.B) { + data := StdEncoding.EncodeToString(make([]byte, 8192)) + b.SetBytes(int64(len(data))) + for i := 0; i < b.N; i++ { + StdEncoding.DecodeString(data) + } +} + +func TestWithCustomPadding(t *testing.T) { + for _, testcase := range pairs { + defaultPadding := StdEncoding.EncodeToString([]byte(testcase.decoded)) + customPadding := StdEncoding.WithPadding('@').EncodeToString([]byte(testcase.decoded)) + expected := strings.ReplaceAll(defaultPadding, "=", "@") + + if expected != customPadding { + t.Errorf("Expected custom %s, got %s", expected, customPadding) + } + if testcase.encoded != defaultPadding { + t.Errorf("Expected %s, got %s", testcase.encoded, defaultPadding) + } + } +} + +func TestWithoutPadding(t *testing.T) { + for _, testcase := range pairs { + defaultPadding := StdEncoding.EncodeToString([]byte(testcase.decoded)) + customPadding := StdEncoding.WithPadding(NoPadding).EncodeToString([]byte(testcase.decoded)) + expected := strings.TrimRight(defaultPadding, "=") + + if expected != customPadding { + t.Errorf("Expected custom %s, got %s", expected, customPadding) + } + if testcase.encoded != defaultPadding { + t.Errorf("Expected %s, got %s", testcase.encoded, defaultPadding) + } + } +} + +func TestDecodeWithPadding(t *testing.T) { + encodings := []*Encoding{ + StdEncoding, + StdEncoding.WithPadding('-'), + StdEncoding.WithPadding(NoPadding), + } + + for i, enc := range encodings { + for _, pair := range pairs { + + input := pair.decoded + encoded := enc.EncodeToString([]byte(input)) + + decoded, err := enc.DecodeString(encoded) + if err != nil { + t.Errorf("DecodeString Error for encoding %d (%q): %v", i, input, err) + } + + if input != string(decoded) { + t.Errorf("Unexpected result for encoding %d: got %q; want %q", i, decoded, input) + } + } + } +} + +func TestDecodeWithWrongPadding(t *testing.T) { + encoded := StdEncoding.EncodeToString([]byte("foobar")) + + _, err := StdEncoding.WithPadding('-').DecodeString(encoded) + if err == nil { + t.Error("expected error") + } + + _, err = StdEncoding.WithPadding(NoPadding).DecodeString(encoded) + if err == nil { + t.Error("expected error") + } +} + +func TestBufferedDecodingSameError(t *testing.T) { + testcases := []struct { + prefix string + chunkCombinations [][]string + expected error + }{ + // NBSWY3DPO5XXE3DE == helloworld + // Test with "ZZ" as extra input + {"helloworld", [][]string{ + {"NBSW", "Y3DP", "O5XX", "E3DE", "ZZ"}, + {"NBSWY3DPO5XXE3DE", "ZZ"}, + {"NBSWY3DPO5XXE3DEZZ"}, + {"NBS", "WY3", "DPO", "5XX", "E3D", "EZZ"}, + {"NBSWY3DPO5XXE3", "DEZZ"}, + }, io.ErrUnexpectedEOF}, + + // Test with "ZZY" as extra input + {"helloworld", [][]string{ + {"NBSW", "Y3DP", "O5XX", "E3DE", "ZZY"}, + {"NBSWY3DPO5XXE3DE", "ZZY"}, + {"NBSWY3DPO5XXE3DEZZY"}, + {"NBS", "WY3", "DPO", "5XX", "E3D", "EZZY"}, + {"NBSWY3DPO5XXE3", "DEZZY"}, + }, io.ErrUnexpectedEOF}, + + // Normal case, this is valid input + {"helloworld", [][]string{ + {"NBSW", "Y3DP", "O5XX", "E3DE"}, + {"NBSWY3DPO5XXE3DE"}, + {"NBS", "WY3", "DPO", "5XX", "E3D", "E"}, + {"NBSWY3DPO5XXE3", "DE"}, + }, nil}, + + // MZXW6YTB = fooba + {"fooba", [][]string{ + {"MZXW6YTBZZ"}, + {"MZXW6YTBZ", "Z"}, + {"MZXW6YTB", "ZZ"}, + {"MZXW6YT", "BZZ"}, + {"MZXW6Y", "TBZZ"}, + {"MZXW6Y", "TB", "ZZ"}, + {"MZXW6", "YTBZZ"}, + {"MZXW6", "YTB", "ZZ"}, + {"MZXW6", "YT", "BZZ"}, + }, io.ErrUnexpectedEOF}, + + // Normal case, this is valid input + {"fooba", [][]string{ + {"MZXW6YTB"}, + {"MZXW6YT", "B"}, + {"MZXW6Y", "TB"}, + {"MZXW6", "YTB"}, + {"MZXW6", "YT", "B"}, + {"MZXW", "6YTB"}, + {"MZXW", "6Y", "TB"}, + }, nil}, + } + + for _, testcase := range testcases { + for _, chunks := range testcase.chunkCombinations { + r := &chunkReader{chunks: chunks} + + decoder := NewDecoder(StdEncoding, r) + _, err := io.ReadAll(decoder) + + if err != testcase.expected { + t.Errorf("Expected %v, got %v; case %s %+v", testcase.expected, err, testcase.prefix, chunks) + } + } + } +} + +// XXX: +// chunkReader exists as an alternative to the original behaviour of these +// tests in go, which used io.Pipe and a separate goroutine. +type chunkReader struct { + chunks []string + nChunk int + read int +} + +func (c *chunkReader) Read(b []byte) (int, error) { + if c.nChunk >= len(c.chunks) { + return 0, io.EOF + } + chunk := c.chunks[c.nChunk] + read := copy(b, chunk[c.read:]) + c.read += read + if c.read == len(chunk) { + c.nChunk++ + c.read = 0 + } + return read, nil +} + +func TestBufferedDecodingPadding(t *testing.T) { + testcases := []struct { + chunks []string + expectedError string + }{ + {[]string{ + "I4======", + "==", + }, "unexpected EOF"}, + + {[]string{ + "I4======N4======", + }, "illegal base32 data at input byte 2"}, + + {[]string{ + "I4======", + "N4======", + }, "illegal base32 data at input byte 0"}, + + {[]string{ + "I4======", + "========", + }, "illegal base32 data at input byte 0"}, + + {[]string{ + "I4I4I4I4", + "I4======", + "I4======", + }, "illegal base32 data at input byte 0"}, + } + + for _, testcase := range testcases { + r := &chunkReader{chunks: testcase.chunks} + + decoder := NewDecoder(StdEncoding, r) + _, err := io.ReadAll(decoder) + + if err == nil && len(testcase.expectedError) != 0 { + t.Errorf("case %q: got nil error, want %v", testcase.chunks, testcase.expectedError) + } else if err.Error() != testcase.expectedError { + t.Errorf("case %q: got %v, want %v", testcase.chunks, err, testcase.expectedError) + } + } +} + +func TestEncodedLen(t *testing.T) { + var rawStdEncoding = StdEncoding.WithPadding(NoPadding) + type test struct { + enc *Encoding + n int + want int64 + } + tests := []test{ + {StdEncoding, 0, 0}, + {StdEncoding, 1, 8}, + {StdEncoding, 2, 8}, + {StdEncoding, 3, 8}, + {StdEncoding, 4, 8}, + {StdEncoding, 5, 8}, + {StdEncoding, 6, 16}, + {StdEncoding, 10, 16}, + {StdEncoding, 11, 24}, + {rawStdEncoding, 0, 0}, + {rawStdEncoding, 1, 2}, + {rawStdEncoding, 2, 4}, + {rawStdEncoding, 3, 5}, + {rawStdEncoding, 4, 7}, + {rawStdEncoding, 5, 8}, + {rawStdEncoding, 6, 10}, + {rawStdEncoding, 7, 12}, + {rawStdEncoding, 10, 16}, + {rawStdEncoding, 11, 18}, + } + // check overflow + switch strconv.IntSize { + case 32: + tests = append(tests, test{rawStdEncoding, (math.MaxInt-4)/8 + 1, 429496730}) + tests = append(tests, test{rawStdEncoding, math.MaxInt/8*5 + 4, math.MaxInt}) + case 64: + tests = append(tests, test{rawStdEncoding, (math.MaxInt-4)/8 + 1, 1844674407370955162}) + tests = append(tests, test{rawStdEncoding, math.MaxInt/8*5 + 4, math.MaxInt}) + } + for _, tt := range tests { + if got := tt.enc.EncodedLen(tt.n); int64(got) != tt.want { + t.Errorf("EncodedLen(%d): got %d, want %d", tt.n, got, tt.want) + } + } +} + +func TestDecodedLen(t *testing.T) { + var rawStdEncoding = StdEncoding.WithPadding(NoPadding) + type test struct { + enc *Encoding + n int + want int64 + } + tests := []test{ + {StdEncoding, 0, 0}, + {StdEncoding, 8, 5}, + {StdEncoding, 16, 10}, + {StdEncoding, 24, 15}, + {rawStdEncoding, 0, 0}, + {rawStdEncoding, 2, 1}, + {rawStdEncoding, 4, 2}, + {rawStdEncoding, 5, 3}, + {rawStdEncoding, 7, 4}, + {rawStdEncoding, 8, 5}, + {rawStdEncoding, 10, 6}, + {rawStdEncoding, 12, 7}, + {rawStdEncoding, 16, 10}, + {rawStdEncoding, 18, 11}, + } + // check overflow + switch strconv.IntSize { + case 32: + tests = append(tests, test{rawStdEncoding, math.MaxInt/5 + 1, 268435456}) + tests = append(tests, test{rawStdEncoding, math.MaxInt, 1342177279}) + case 64: + tests = append(tests, test{rawStdEncoding, math.MaxInt/5 + 1, 1152921504606846976}) + tests = append(tests, test{rawStdEncoding, math.MaxInt, 5764607523034234879}) + } + for _, tt := range tests { + if got := tt.enc.DecodedLen(tt.n); int64(got) != tt.want { + t.Errorf("DecodedLen(%d): got %d, want %d", tt.n, got, tt.want) + } + } +} + +func TestWithoutPaddingClose(t *testing.T) { + encodings := []*Encoding{ + StdEncoding, + StdEncoding.WithPadding(NoPadding), + } + + for _, encoding := range encodings { + for _, testpair := range pairs { + + var buf strings.Builder + encoder := NewEncoder(encoding, &buf) + encoder.Write([]byte(testpair.decoded)) + encoder.Close() + + expected := testpair.encoded + if encoding.padChar == NoPadding { + expected = strings.ReplaceAll(expected, "=", "") + } + + res := buf.String() + + if res != expected { + t.Errorf("Expected %s got %s; padChar=%d", expected, res, encoding.padChar) + } + } + } +} + +func TestDecodeReadAll(t *testing.T) { + encodings := []*Encoding{ + StdEncoding, + StdEncoding.WithPadding(NoPadding), + } + + for _, pair := range pairs { + for encIndex, encoding := range encodings { + encoded := pair.encoded + if encoding.padChar == NoPadding { + encoded = strings.ReplaceAll(encoded, "=", "") + } + + decReader, err := io.ReadAll(NewDecoder(encoding, strings.NewReader(encoded))) + if err != nil { + t.Errorf("NewDecoder error: %v", err) + } + + if pair.decoded != string(decReader) { + t.Errorf("Expected %s got %s; Encoding %d", pair.decoded, decReader, encIndex) + } + } + } +} + +func TestDecodeSmallBuffer(t *testing.T) { + encodings := []*Encoding{ + StdEncoding, + StdEncoding.WithPadding(NoPadding), + } + + for bufferSize := 1; bufferSize < 200; bufferSize++ { + for _, pair := range pairs { + for encIndex, encoding := range encodings { + encoded := pair.encoded + if encoding.padChar == NoPadding { + encoded = strings.ReplaceAll(encoded, "=", "") + } + + decoder := NewDecoder(encoding, strings.NewReader(encoded)) + + var allRead []byte + + for { + buf := make([]byte, bufferSize) + n, err := decoder.Read(buf) + allRead = append(allRead, buf[0:n]...) + if err == io.EOF { + break + } + if err != nil { + t.Error(err) + } + } + + if pair.decoded != string(allRead) { + t.Errorf("Expected %s got %s; Encoding %d; bufferSize %d", pair.decoded, allRead, encIndex, bufferSize) + } + } + } + } +} diff --git a/gnovm/stdlibs/encoding/base32/example_test.gno b/gnovm/stdlibs/encoding/base32/example_test.gno new file mode 100644 index 00000000000..251624f0bd8 --- /dev/null +++ b/gnovm/stdlibs/encoding/base32/example_test.gno @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Keep in sync with ../base64/example_test.go. + +package base32_test + +import ( + "encoding/base32" + "fmt" + "os" +) + +func ExampleEncoding_EncodeToString() { + data := []byte("any + old & data") + str := base32.StdEncoding.EncodeToString(data) + fmt.Println(str) + // Output: + // MFXHSIBLEBXWYZBAEYQGIYLUME====== +} + +func ExampleEncoding_Encode() { + data := []byte("Hello, world!") + dst := make([]byte, base32.StdEncoding.EncodedLen(len(data))) + base32.StdEncoding.Encode(dst, data) + fmt.Println(string(dst)) + // Output: + // JBSWY3DPFQQHO33SNRSCC=== +} + +func ExampleEncoding_DecodeString() { + str := "ONXW2ZJAMRQXIYJAO5UXI2BAAAQGC3TEEDX3XPY=" + data, err := base32.StdEncoding.DecodeString(str) + if err != nil { + fmt.Println("error:", err) + return + } + fmt.Printf("%q\n", data) + // Output: + // "some data with \x00 and \ufeff" +} + +func ExampleEncoding_Decode() { + str := "JBSWY3DPFQQHO33SNRSCC===" + dst := make([]byte, base32.StdEncoding.DecodedLen(len(str))) + n, err := base32.StdEncoding.Decode(dst, []byte(str)) + if err != nil { + fmt.Println("decode error:", err) + return + } + dst = dst[:n] + fmt.Printf("%q\n", dst) + // Output: + // "Hello, world!" +} + +func ExampleNewEncoder() { + input := []byte("foo\x00bar") + encoder := base32.NewEncoder(base32.StdEncoding, os.Stdout) + encoder.Write(input) + // Must close the encoder when finished to flush any partial blocks. + // If you comment out the following line, the last partial block "r" + // won't be encoded. + encoder.Close() + // Output: + // MZXW6ADCMFZA==== +} diff --git a/gnovm/stdlibs/encoding/binary/varint.gno b/gnovm/stdlibs/encoding/binary/varint.gno new file mode 100644 index 00000000000..7b14fb2b631 --- /dev/null +++ b/gnovm/stdlibs/encoding/binary/varint.gno @@ -0,0 +1,166 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package binary + +// This file implements "varint" encoding of 64-bit integers. +// The encoding is: +// - unsigned integers are serialized 7 bits at a time, starting with the +// least significant bits +// - the most significant bit (msb) in each output byte indicates if there +// is a continuation byte (msb = 1) +// - signed integers are mapped to unsigned integers using "zig-zag" +// encoding: Positive values x are written as 2*x + 0, negative values +// are written as 2*(^x) + 1; that is, negative numbers are complemented +// and whether to complement is encoded in bit 0. +// +// Design note: +// At most 10 bytes are needed for 64-bit values. The encoding could +// be more dense: a full 64-bit value needs an extra byte just to hold bit 63. +// Instead, the msb of the previous byte could be used to hold bit 63 since we +// know there can't be more than 64 bits. This is a trivial improvement and +// would reduce the maximum encoding length to 9 bytes. However, it breaks the +// invariant that the msb is always the "continuation bit" and thus makes the +// format incompatible with a varint encoding for larger numbers (say 128-bit). + +import ( + "errors" + "io" +) + +// MaxVarintLenN is the maximum length of a varint-encoded N-bit integer. +const ( + MaxVarintLen16 = 3 + MaxVarintLen32 = 5 + MaxVarintLen64 = 10 +) + +// AppendUvarint appends the varint-encoded form of x, +// as generated by PutUvarint, to buf and returns the extended buffer. +func AppendUvarint(buf []byte, x uint64) []byte { + for x >= 0x80 { + buf = append(buf, byte(x)|0x80) + x >>= 7 + } + return append(buf, byte(x)) +} + +// PutUvarint encodes a uint64 into buf and returns the number of bytes written. +// If the buffer is too small, PutUvarint will panic. +func PutUvarint(buf []byte, x uint64) int { + i := 0 + for x >= 0x80 { + buf[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + buf[i] = byte(x) + return i + 1 +} + +// Uvarint decodes a uint64 from buf and returns that value and the +// number of bytes read (> 0). If an error occurred, the value is 0 +// and the number of bytes n is <= 0 meaning: +// +// n == 0: buf too small +// n < 0: value larger than 64 bits (overflow) +// and -n is the number of bytes read +func Uvarint(buf []byte) (uint64, int) { + var x uint64 + var s uint + for i, b := range buf { + if i == MaxVarintLen64 { + // Catch byte reads past MaxVarintLen64. + // See issue https://golang.org/issues/41185 + return 0, -(i + 1) // overflow + } + if b < 0x80 { + if i == MaxVarintLen64-1 && b > 1 { + return 0, -(i + 1) // overflow + } + return x | uint64(b)< 0). If an error occurred, the value is 0 +// and the number of bytes n is <= 0 with the following meaning: +// +// n == 0: buf too small +// n < 0: value larger than 64 bits (overflow) +// and -n is the number of bytes read +func Varint(buf []byte) (int64, int) { + ux, n := Uvarint(buf) // ok to continue in presence of error + x := int64(ux >> 1) + if ux&1 != 0 { + x = ^x + } + return x, n +} + +var errOverflow = errors.New("binary: varint overflows a 64-bit integer") + +// ReadUvarint reads an encoded unsigned integer from r and returns it as a uint64. +// The error is EOF only if no bytes were read. +// If an EOF happens after reading some but not all the bytes, +// ReadUvarint returns io.ErrUnexpectedEOF. +func ReadUvarint(r io.ByteReader) (uint64, error) { + var x uint64 + var s uint + for i := 0; i < MaxVarintLen64; i++ { + b, err := r.ReadByte() + if err != nil { + if i > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return x, err + } + if b < 0x80 { + if i == MaxVarintLen64-1 && b > 1 { + return x, errOverflow + } + return x | uint64(b)<> 1) + if ux&1 != 0 { + x = ^x + } + return x, err +} diff --git a/gnovm/stdlibs/encoding/binary/varint_test.gno b/gnovm/stdlibs/encoding/binary/varint_test.gno new file mode 100644 index 00000000000..274ac74d0dc --- /dev/null +++ b/gnovm/stdlibs/encoding/binary/varint_test.gno @@ -0,0 +1,245 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package binary + +import ( + "bytes" + "io" + "math" + "testing" +) + +func testConstant(t *testing.T, w uint, max int) { + buf := make([]byte, MaxVarintLen64) + n := PutUvarint(buf, 1< 0 { + wantErr = io.ErrUnexpectedEOF + } + if x != 0 || err != wantErr { + t.Errorf("ReadUvarint(%v): got x = %d, err = %s", buf, x, err) + } + } +} + +// Ensure that we catch overflows of bytes going past MaxVarintLen64. +// See issue https://golang.org/issues/41185 +func TestBufferTooBigWithOverflow(t *testing.T) { + tests := []struct { + in []byte + name string + wantN int + wantValue uint64 + }{ + { + name: "invalid: 1000 bytes", + in: func() []byte { + b := make([]byte, 1000) + for i := range b { + b[i] = 0xff + } + b[999] = 0 + return b + }(), + wantN: -11, + wantValue: 0, + }, + { + name: "valid: math.MaxUint64-40", + in: []byte{0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01}, + wantValue: math.MaxUint64 - 40, + wantN: 10, + }, + { + name: "invalid: with more than MaxVarintLen64 bytes", + in: []byte{0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01}, + wantN: -11, + wantValue: 0, + }, + { + name: "invalid: 10th byte", + in: []byte{0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f}, + wantN: -10, + wantValue: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + value, n := Uvarint(tt.in) + if g, w := n, tt.wantN; g != w { + t.Errorf("bytes returned=%d, want=%d", g, w) + } + if g, w := value, tt.wantValue; g != w { + t.Errorf("value=%d, want=%d", g, w) + } + }) + } +} + +func testOverflow(t *testing.T, buf []byte, x0 uint64, n0 int, err0 error) { + x, n := Uvarint(buf) + if x != 0 || n != n0 { + t.Errorf("Uvarint(% X): got x = %d, n = %d; want 0, %d", buf, x, n, n0) + } + + r := bytes.NewReader(buf) + ln := r.Len() + x, err := ReadUvarint(r) + if x != x0 || err != err0 { + t.Errorf("ReadUvarint(%v): got x = %d, err = %s; want %d, %s", buf, x, err, x0, err0) + } + if read := ln - r.Len(); read > MaxVarintLen64 { + t.Errorf("ReadUvarint(%v): read more than MaxVarintLen64 bytes, got %d", buf, read) + } +} + +func TestOverflow(t *testing.T) { + testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x2}, 0, -10, errOverflow) + testOverflow(t, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x1, 0, 0}, 0, -11, errOverflow) + testOverflow(t, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 1<<64-1, -11, errOverflow) // 11 bytes, should overflow +} + +func TestNonCanonicalZero(t *testing.T) { + buf := []byte{0x80, 0x80, 0x80, 0} + x, n := Uvarint(buf) + if x != 0 || n != 4 { + t.Errorf("Uvarint(%v): got x = %d, n = %d; want 0, 4", buf, x, n) + } +} + +func BenchmarkPutUvarint32(b *testing.B) { + buf := make([]byte, MaxVarintLen32) + b.SetBytes(4) + for i := 0; i < b.N; i++ { + for j := uint(0); j < MaxVarintLen32; j++ { + PutUvarint(buf, 1<<(j*7)) + } + } +} + +func BenchmarkPutUvarint64(b *testing.B) { + buf := make([]byte, MaxVarintLen64) + b.SetBytes(8) + for i := 0; i < b.N; i++ { + for j := uint(0); j < MaxVarintLen64; j++ { + PutUvarint(buf, 1<<(j*7)) + } + } +} diff --git a/gnovm/stdlibs/encoding/csv/reader.gno b/gnovm/stdlibs/encoding/csv/reader.gno new file mode 100644 index 00000000000..889b7ff5588 --- /dev/null +++ b/gnovm/stdlibs/encoding/csv/reader.gno @@ -0,0 +1,467 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package csv reads and writes comma-separated values (CSV) files. +// There are many kinds of CSV files; this package supports the format +// described in RFC 4180. +// +// A csv file contains zero or more records of one or more fields per record. +// Each record is separated by the newline character. The final record may +// optionally be followed by a newline character. +// +// field1,field2,field3 +// +// White space is considered part of a field. +// +// Carriage returns before newline characters are silently removed. +// +// Blank lines are ignored. A line with only whitespace characters (excluding +// the ending newline character) is not considered a blank line. +// +// Fields which start and stop with the quote character " are called +// quoted-fields. The beginning and ending quote are not part of the +// field. +// +// The source: +// +// normal string,"quoted-field" +// +// results in the fields +// +// {`normal string`, `quoted-field`} +// +// Within a quoted-field a quote character followed by a second quote +// character is considered a single quote. +// +// "the ""word"" is true","a ""quoted-field""" +// +// results in +// +// {`the "word" is true`, `a "quoted-field"`} +// +// Newlines and commas may be included in a quoted-field +// +// "Multi-line +// field","comma is ," +// +// results in +// +// {`Multi-line +// field`, `comma is ,`} +package csv + +import ( + "bufio" + "bytes" + "errors" + "io" + "strconv" + "unicode" + "unicode/utf8" +) + +// A ParseError is returned for parsing errors. +// Line and column numbers are 1-indexed. +type ParseError struct { + StartLine int // Line where the record starts + Line int // Line where the error occurred + Column int // Column (1-based byte index) where the error occurred + Err error // The actual error +} + +func (e *ParseError) Error() string { + if e.Err == ErrFieldCount { + return "record on line " + strconv.Itoa(e.Line) + ": " + e.Err.Error() + } + if e.StartLine != e.Line { + return "record on line " + strconv.Itoa(e.StartLine) + ": parse error on line " + strconv.Itoa(e.Line) + + ", column " + strconv.Itoa(e.Column) + ": " + e.Err.Error() + } + return "parse error on line " + strconv.Itoa(e.Line) + ", column " + strconv.Itoa(e.Column) + ": " + e.Err.Error() +} + +func (e *ParseError) Unwrap() error { return e.Err } + +// These are the errors that can be returned in [ParseError.Err]. +var ( + ErrBareQuote = errors.New("bare \" in non-quoted-field") + ErrQuote = errors.New("extraneous or missing \" in quoted-field") + ErrFieldCount = errors.New("wrong number of fields") + + // Deprecated: ErrTrailingComma is no longer used. + ErrTrailingComma = errors.New("extra delimiter at end of line") +) + +var ErrInvalidDelim = errors.New("csv: invalid field or comment delimiter") + +func validDelim(r rune) bool { + return r != 0 && r != '"' && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError +} + +// A Reader reads records from a CSV-encoded file. +// +// As returned by [NewReader], a Reader expects input conforming to RFC 4180. +// The exported fields can be changed to customize the details before the +// first call to [Reader.Read] or [Reader.ReadAll]. +// +// The Reader converts all \r\n sequences in its input to plain \n, +// including in multiline field values, so that the returned data does +// not depend on which line-ending convention an input file uses. +type Reader struct { + // Comma is the field delimiter. + // It is set to comma (',') by NewReader. + // Comma must be a valid rune and must not be \r, \n, + // or the Unicode replacement character (0xFFFD). + Comma rune + + // Comment, if not 0, is the comment character. Lines beginning with the + // Comment character without preceding whitespace are ignored. + // With leading whitespace the Comment character becomes part of the + // field, even if TrimLeadingSpace is true. + // Comment must be a valid rune and must not be \r, \n, + // or the Unicode replacement character (0xFFFD). + // It must also not be equal to Comma. + Comment rune + + // FieldsPerRecord is the number of expected fields per record. + // If FieldsPerRecord is positive, Read requires each record to + // have the given number of fields. If FieldsPerRecord is 0, Read sets it to + // the number of fields in the first record, so that future records must + // have the same field count. If FieldsPerRecord is negative, no check is + // made and records may have a variable number of fields. + FieldsPerRecord int + + // If LazyQuotes is true, a quote may appear in an unquoted field and a + // non-doubled quote may appear in a quoted field. + LazyQuotes bool + + // If TrimLeadingSpace is true, leading white space in a field is ignored. + // This is done even if the field delimiter, Comma, is white space. + TrimLeadingSpace bool + + // ReuseRecord controls whether calls to Read may return a slice sharing + // the backing array of the previous call's returned slice for performance. + // By default, each call to Read returns newly allocated memory owned by the caller. + ReuseRecord bool + + // Deprecated: TrailingComma is no longer used. + TrailingComma bool + + r *bufio.Reader + + // numLine is the current line being read in the CSV file. + numLine int + + // offset is the input stream byte offset of the current reader position. + offset int64 + + // rawBuffer is a line buffer only used by the readLine method. + rawBuffer []byte + + // recordBuffer holds the unescaped fields, one after another. + // The fields can be accessed by using the indexes in fieldIndexes. + // E.g., For the row `a,"b","c""d",e`, recordBuffer will contain `abc"de` + // and fieldIndexes will contain the indexes [1, 2, 5, 6]. + recordBuffer []byte + + // fieldIndexes is an index of fields inside recordBuffer. + // The i'th field ends at offset fieldIndexes[i] in recordBuffer. + fieldIndexes []int + + // fieldPositions is an index of field positions for the + // last record returned by Read. + fieldPositions []position + + // lastRecord is a record cache and only used when ReuseRecord == true. + lastRecord []string +} + +// NewReader returns a new Reader that reads from r. +func NewReader(r io.Reader) *Reader { + return &Reader{ + Comma: ',', + r: bufio.NewReader(r), + } +} + +// Read reads one record (a slice of fields) from r. +// If the record has an unexpected number of fields, +// Read returns the record along with the error [ErrFieldCount]. +// If the record contains a field that cannot be parsed, +// Read returns a partial record along with the parse error. +// The partial record contains all fields read before the error. +// If there is no data left to be read, Read returns nil, [io.EOF]. +// If [Reader.ReuseRecord] is true, the returned slice may be shared +// between multiple calls to Read. +func (r *Reader) Read() (record []string, err error) { + if r.ReuseRecord { + record, err = r.readRecord(r.lastRecord) + r.lastRecord = record + } else { + record, err = r.readRecord(nil) + } + return record, err +} + +// FieldPos returns the line and column corresponding to +// the start of the field with the given index in the slice most recently +// returned by [Reader.Read]. Numbering of lines and columns starts at 1; +// columns are counted in bytes, not runes. +// +// If this is called with an out-of-bounds index, it panics. +func (r *Reader) FieldPos(field int) (line, column int) { + if field < 0 || field >= len(r.fieldPositions) { + panic("out of range index passed to FieldPos") + } + p := &r.fieldPositions[field] + return p.line, p.col +} + +// InputOffset returns the input stream byte offset of the current reader +// position. The offset gives the location of the end of the most recently +// read row and the beginning of the next row. +func (r *Reader) InputOffset() int64 { + return r.offset +} + +// pos holds the position of a field in the current line. +type position struct { + line, col int +} + +// ReadAll reads all the remaining records from r. +// Each record is a slice of fields. +// A successful call returns err == nil, not err == [io.EOF]. Because ReadAll is +// defined to read until EOF, it does not treat end of file as an error to be +// reported. +func (r *Reader) ReadAll() (records [][]string, err error) { + for { + record, err := r.readRecord(nil) + if err == io.EOF { + return records, nil + } + if err != nil { + return nil, err + } + records = append(records, record) + } +} + +// readLine reads the next line (with the trailing endline). +// If EOF is hit without a trailing endline, it will be omitted. +// If some bytes were read, then the error is never [io.EOF]. +// The result is only valid until the next call to readLine. +func (r *Reader) readLine() ([]byte, error) { + line, err := r.r.ReadSlice('\n') + if err == bufio.ErrBufferFull { + r.rawBuffer = append(r.rawBuffer[:0], line...) + for err == bufio.ErrBufferFull { + line, err = r.r.ReadSlice('\n') + r.rawBuffer = append(r.rawBuffer, line...) + } + line = r.rawBuffer + } + readSize := len(line) + if readSize > 0 && err == io.EOF { + err = nil + // For backwards compatibility, drop trailing \r before EOF. + if line[readSize-1] == '\r' { + line = line[:readSize-1] + } + } + r.numLine++ + r.offset += int64(readSize) + // Normalize \r\n to \n on all input lines. + if n := len(line); n >= 2 && line[n-2] == '\r' && line[n-1] == '\n' { + line[n-2] = '\n' + line = line[:n-1] + } + return line, err +} + +// lengthNL reports the number of bytes for the trailing \n. +func lengthNL(b []byte) int { + if len(b) > 0 && b[len(b)-1] == '\n' { + return 1 + } + return 0 +} + +// nextRune returns the next rune in b or utf8.RuneError. +func nextRune(b []byte) rune { + r, _ := utf8.DecodeRune(b) + return r +} + +func (r *Reader) readRecord(dst []string) ([]string, error) { + if r.Comma == r.Comment || !validDelim(r.Comma) || (r.Comment != 0 && !validDelim(r.Comment)) { + return nil, ErrInvalidDelim + } + + // Read line (automatically skipping past empty lines and any comments). + var line []byte + var errRead error + for errRead == nil { + line, errRead = r.readLine() + if r.Comment != 0 && nextRune(line) == r.Comment { + line = nil + continue // Skip comment lines + } + if errRead == nil && len(line) == lengthNL(line) { + line = nil + continue // Skip empty lines + } + break + } + if errRead == io.EOF { + return nil, errRead + } + + // Parse each field in the record. + var err error + const quoteLen = len(`"`) + commaLen := utf8.RuneLen(r.Comma) + recLine := r.numLine // Starting line for record + r.recordBuffer = r.recordBuffer[:0] + r.fieldIndexes = r.fieldIndexes[:0] + r.fieldPositions = r.fieldPositions[:0] + pos := position{line: r.numLine, col: 1} +parseField: + for { + if r.TrimLeadingSpace { + i := bytes.IndexFunc(line, func(r rune) bool { + return !unicode.IsSpace(r) + }) + if i < 0 { + i = len(line) + pos.col -= lengthNL(line) + } + line = line[i:] + pos.col += i + } + if len(line) == 0 || line[0] != '"' { + // Non-quoted string field + i := bytes.IndexRune(line, r.Comma) + field := line + if i >= 0 { + field = field[:i] + } else { + field = field[:len(field)-lengthNL(field)] + } + // Check to make sure a quote does not appear in field. + if !r.LazyQuotes { + if j := bytes.IndexByte(field, '"'); j >= 0 { + col := pos.col + j + err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrBareQuote} + break parseField + } + } + r.recordBuffer = append(r.recordBuffer, field...) + r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer)) + r.fieldPositions = append(r.fieldPositions, pos) + if i >= 0 { + line = line[i+commaLen:] + pos.col += i + commaLen + continue parseField + } + break parseField + } else { + // Quoted string field + fieldPos := pos + line = line[quoteLen:] + pos.col += quoteLen + for { + i := bytes.IndexByte(line, '"') + if i >= 0 { + // Hit next quote. + r.recordBuffer = append(r.recordBuffer, line[:i]...) + line = line[i+quoteLen:] + pos.col += i + quoteLen + switch rn := nextRune(line); { + case rn == '"': + // `""` sequence (append quote). + r.recordBuffer = append(r.recordBuffer, '"') + line = line[quoteLen:] + pos.col += quoteLen + case rn == r.Comma: + // `",` sequence (end of field). + line = line[commaLen:] + pos.col += commaLen + r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer)) + r.fieldPositions = append(r.fieldPositions, fieldPos) + continue parseField + case lengthNL(line) == len(line): + // `"\n` sequence (end of line). + r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer)) + r.fieldPositions = append(r.fieldPositions, fieldPos) + break parseField + case r.LazyQuotes: + // `"` sequence (bare quote). + r.recordBuffer = append(r.recordBuffer, '"') + default: + // `"*` sequence (invalid non-escaped quote). + err = &ParseError{StartLine: recLine, Line: r.numLine, Column: pos.col - quoteLen, Err: ErrQuote} + break parseField + } + } else if len(line) > 0 { + // Hit end of line (copy all data so far). + r.recordBuffer = append(r.recordBuffer, line...) + if errRead != nil { + break parseField + } + pos.col += len(line) + line, errRead = r.readLine() + if len(line) > 0 { + pos.line++ + pos.col = 1 + } + if errRead == io.EOF { + errRead = nil + } + } else { + // Abrupt end of file (EOF or error). + if !r.LazyQuotes && errRead == nil { + err = &ParseError{StartLine: recLine, Line: pos.line, Column: pos.col, Err: ErrQuote} + break parseField + } + r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer)) + r.fieldPositions = append(r.fieldPositions, fieldPos) + break parseField + } + } + } + } + if err == nil { + err = errRead + } + + // Create a single string and create slices out of it. + // This pins the memory of the fields together, but allocates once. + str := string(r.recordBuffer) // Convert to string once to batch allocations + dst = dst[:0] + if cap(dst) < len(r.fieldIndexes) { + dst = make([]string, len(r.fieldIndexes)) + } + dst = dst[:len(r.fieldIndexes)] + var preIdx int + for i, idx := range r.fieldIndexes { + dst[i] = str[preIdx:idx] + preIdx = idx + } + + // Check or update the expected fields per record. + if r.FieldsPerRecord > 0 { + if len(dst) != r.FieldsPerRecord && err == nil { + err = &ParseError{ + StartLine: recLine, + Line: recLine, + Column: 1, + Err: ErrFieldCount, + } + } + } else if r.FieldsPerRecord == 0 { + r.FieldsPerRecord = len(dst) + } + return dst, err +} diff --git a/gnovm/stdlibs/encoding/csv/reader_test.gno b/gnovm/stdlibs/encoding/csv/reader_test.gno new file mode 100644 index 00000000000..fa4ecdce3ed --- /dev/null +++ b/gnovm/stdlibs/encoding/csv/reader_test.gno @@ -0,0 +1,706 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package csv + +import ( + "fmt" + "io" + + // "reflect" + "strings" + "testing" + "unicode/utf8" +) + +type readTest struct { + Name string + Input string + Output [][]string + Positions [][][2]int + Errors []error + + // These fields are copied into the Reader + Comma rune + Comment rune + UseFieldsPerRecord bool // false (default) means FieldsPerRecord is -1 + FieldsPerRecord int + LazyQuotes bool + TrimLeadingSpace bool + ReuseRecord bool +} + +// In these tests, the §, ¶ and ∑ characters in readTest.Input are used to denote +// the start of a field, a record boundary and the position of an error respectively. +// They are removed before parsing and are used to verify the position +// information reported by FieldPos. + +var readTests = []readTest{{ + Name: "Simple", + Input: "§a,§b,§c\n", + Output: [][]string{{"a", "b", "c"}}, +}, { + Name: "CRLF", + Input: "§a,§b\r\n¶§c,§d\r\n", + Output: [][]string{{"a", "b"}, {"c", "d"}}, +}, { + Name: "BareCR", + Input: "§a,§b\rc,§d\r\n", + Output: [][]string{{"a", "b\rc", "d"}}, +}, { + Name: "RFC4180test", + Input: `§#field1,§field2,§field3 +¶§"aaa",§"bb +b",§"ccc" +¶§"a,a",§"b""bb",§"ccc" +¶§zzz,§yyy,§xxx +`, + Output: [][]string{ + {"#field1", "field2", "field3"}, + {"aaa", "bb\nb", "ccc"}, + {"a,a", `b"bb`, "ccc"}, + {"zzz", "yyy", "xxx"}, + }, + UseFieldsPerRecord: true, + FieldsPerRecord: 0, +}, { + Name: "NoEOLTest", + Input: "§a,§b,§c", + Output: [][]string{{"a", "b", "c"}}, +}, { + Name: "Semicolon", + Input: "§a;§b;§c\n", + Output: [][]string{{"a", "b", "c"}}, + Comma: ';', +}, { + Name: "MultiLine", + Input: `§"two +line",§"one line",§"three +line +field"`, + Output: [][]string{{"two\nline", "one line", "three\nline\nfield"}}, +}, { + Name: "BlankLine", + Input: "§a,§b,§c\n\n¶§d,§e,§f\n\n", + Output: [][]string{ + {"a", "b", "c"}, + {"d", "e", "f"}, + }, +}, { + Name: "BlankLineFieldCount", + Input: "§a,§b,§c\n\n¶§d,§e,§f\n\n", + Output: [][]string{ + {"a", "b", "c"}, + {"d", "e", "f"}, + }, + UseFieldsPerRecord: true, + FieldsPerRecord: 0, +}, { + Name: "TrimSpace", + Input: " §a, §b, §c\n", + Output: [][]string{{"a", "b", "c"}}, + TrimLeadingSpace: true, +}, { + Name: "LeadingSpace", + Input: "§ a,§ b,§ c\n", + Output: [][]string{{" a", " b", " c"}}, +}, { + Name: "Comment", + Input: "#1,2,3\n§a,§b,§c\n#comment", + Output: [][]string{{"a", "b", "c"}}, + Comment: '#', +}, { + Name: "NoComment", + Input: "§#1,§2,§3\n¶§a,§b,§c", + Output: [][]string{{"#1", "2", "3"}, {"a", "b", "c"}}, +}, { + Name: "LazyQuotes", + Input: `§a "word",§"1"2",§a",§"b`, + Output: [][]string{{`a "word"`, `1"2`, `a"`, `b`}}, + LazyQuotes: true, +}, { + Name: "BareQuotes", + Input: `§a "word",§"1"2",§a"`, + Output: [][]string{{`a "word"`, `1"2`, `a"`}}, + LazyQuotes: true, +}, { + Name: "BareDoubleQuotes", + Input: `§a""b,§c`, + Output: [][]string{{`a""b`, `c`}}, + LazyQuotes: true, +}, { + Name: "BadDoubleQuotes", + Input: `§a∑""b,c`, + Errors: []error{&ParseError{Err: ErrBareQuote}}, +}, { + Name: "TrimQuote", + Input: ` §"a",§" b",§c`, + Output: [][]string{{"a", " b", "c"}}, + TrimLeadingSpace: true, +}, { + Name: "BadBareQuote", + Input: `§a ∑"word","b"`, + Errors: []error{&ParseError{Err: ErrBareQuote}}, +}, { + Name: "BadTrailingQuote", + Input: `§"a word",b∑"`, + Errors: []error{&ParseError{Err: ErrBareQuote}}, +}, { + Name: "ExtraneousQuote", + Input: `§"a ∑"word","b"`, + Errors: []error{&ParseError{Err: ErrQuote}}, +}, { + Name: "BadFieldCount", + Input: "§a,§b,§c\n¶∑§d,§e", + Errors: []error{nil, &ParseError{Err: ErrFieldCount}}, + Output: [][]string{{"a", "b", "c"}, {"d", "e"}}, + UseFieldsPerRecord: true, + FieldsPerRecord: 0, +}, { + Name: "BadFieldCountMultiple", + Input: "§a,§b,§c\n¶∑§d,§e\n¶∑§f", + Errors: []error{nil, &ParseError{Err: ErrFieldCount}, &ParseError{Err: ErrFieldCount}}, + Output: [][]string{{"a", "b", "c"}, {"d", "e"}, {"f"}}, + UseFieldsPerRecord: true, + FieldsPerRecord: 0, +}, { + Name: "BadFieldCount1", + Input: `§∑a,§b,§c`, + Errors: []error{&ParseError{Err: ErrFieldCount}}, + Output: [][]string{{"a", "b", "c"}}, + UseFieldsPerRecord: true, + FieldsPerRecord: 2, +}, { + Name: "FieldCount", + Input: "§a,§b,§c\n¶§d,§e", + Output: [][]string{{"a", "b", "c"}, {"d", "e"}}, +}, { + Name: "TrailingCommaEOF", + Input: "§a,§b,§c,§", + Output: [][]string{{"a", "b", "c", ""}}, +}, { + Name: "TrailingCommaEOL", + Input: "§a,§b,§c,§\n", + Output: [][]string{{"a", "b", "c", ""}}, +}, { + Name: "TrailingCommaSpaceEOF", + Input: "§a,§b,§c, §", + Output: [][]string{{"a", "b", "c", ""}}, + TrimLeadingSpace: true, +}, { + Name: "TrailingCommaSpaceEOL", + Input: "§a,§b,§c, §\n", + Output: [][]string{{"a", "b", "c", ""}}, + TrimLeadingSpace: true, +}, { + Name: "TrailingCommaLine3", + Input: "§a,§b,§c\n¶§d,§e,§f\n¶§g,§hi,§", + Output: [][]string{{"a", "b", "c"}, {"d", "e", "f"}, {"g", "hi", ""}}, + TrimLeadingSpace: true, +}, { + Name: "NotTrailingComma3", + Input: "§a,§b,§c,§ \n", + Output: [][]string{{"a", "b", "c", " "}}, +}, { + Name: "CommaFieldTest", + Input: `§x,§y,§z,§w +¶§x,§y,§z,§ +¶§x,§y,§,§ +¶§x,§,§,§ +¶§,§,§,§ +¶§"x",§"y",§"z",§"w" +¶§"x",§"y",§"z",§"" +¶§"x",§"y",§"",§"" +¶§"x",§"",§"",§"" +¶§"",§"",§"",§"" +`, + Output: [][]string{ + {"x", "y", "z", "w"}, + {"x", "y", "z", ""}, + {"x", "y", "", ""}, + {"x", "", "", ""}, + {"", "", "", ""}, + {"x", "y", "z", "w"}, + {"x", "y", "z", ""}, + {"x", "y", "", ""}, + {"x", "", "", ""}, + {"", "", "", ""}, + }, +}, { + Name: "TrailingCommaIneffective1", + Input: "§a,§b,§\n¶§c,§d,§e", + Output: [][]string{ + {"a", "b", ""}, + {"c", "d", "e"}, + }, + TrimLeadingSpace: true, +}, { + Name: "ReadAllReuseRecord", + Input: "§a,§b\n¶§c,§d", + Output: [][]string{ + {"a", "b"}, + {"c", "d"}, + }, + ReuseRecord: true, +}, { + Name: "StartLine1", // Issue 19019 + Input: "§a,\"b\nc∑\"d,e", + Errors: []error{&ParseError{Err: ErrQuote}}, +}, { + Name: "StartLine2", + Input: "§a,§b\n¶§\"d\n\n,e∑", + Errors: []error{nil, &ParseError{Err: ErrQuote}}, + Output: [][]string{{"a", "b"}}, +}, { + Name: "CRLFInQuotedField", // Issue 21201 + Input: "§A,§\"Hello\r\nHi\",§B\r\n", + Output: [][]string{ + {"A", "Hello\nHi", "B"}, + }, +}, { + Name: "BinaryBlobField", // Issue 19410 + Input: "§x09\x41\xb4\x1c,§aktau", + Output: [][]string{{"x09A\xb4\x1c", "aktau"}}, +}, { + Name: "TrailingCR", + Input: "§field1,§field2\r", + Output: [][]string{{"field1", "field2"}}, +}, { + Name: "QuotedTrailingCR", + Input: "§\"field\"\r", + Output: [][]string{{"field"}}, +}, { + Name: "QuotedTrailingCRCR", + Input: "§\"field∑\"\r\r", + Errors: []error{&ParseError{Err: ErrQuote}}, +}, { + Name: "FieldCR", + Input: "§field\rfield\r", + Output: [][]string{{"field\rfield"}}, +}, { + Name: "FieldCRCR", + Input: "§field\r\rfield\r\r", + Output: [][]string{{"field\r\rfield\r"}}, +}, { + Name: "FieldCRCRLF", + Input: "§field\r\r\n¶§field\r\r\n", + Output: [][]string{{"field\r"}, {"field\r"}}, +}, { + Name: "FieldCRCRLFCR", + Input: "§field\r\r\n¶§\rfield\r\r\n\r", + Output: [][]string{{"field\r"}, {"\rfield\r"}}, +}, { + Name: "FieldCRCRLFCRCR", + Input: "§field\r\r\n¶§\r\rfield\r\r\n¶§\r\r", + Output: [][]string{{"field\r"}, {"\r\rfield\r"}, {"\r"}}, +}, { + Name: "MultiFieldCRCRLFCRCR", + Input: "§field1,§field2\r\r\n¶§\r\rfield1,§field2\r\r\n¶§\r\r,§", + Output: [][]string{ + {"field1", "field2\r"}, + {"\r\rfield1", "field2\r"}, + {"\r\r", ""}, + }, +}, { + Name: "NonASCIICommaAndComment", + Input: "§a£§b,c£ \t§d,e\n€ comment\n", + Output: [][]string{{"a", "b,c", "d,e"}}, + TrimLeadingSpace: true, + Comma: '£', + Comment: '€', +}, { + Name: "NonASCIICommaAndCommentWithQuotes", + Input: "§a€§\" b,\"€§ c\nλ comment\n", + Output: [][]string{{"a", " b,", " c"}}, + Comma: '€', + Comment: 'λ', +}, { + // λ and θ start with the same byte. + // This tests that the parser doesn't confuse such characters. + Name: "NonASCIICommaConfusion", + Input: "§\"abθcd\"λ§efθgh", + Output: [][]string{{"abθcd", "efθgh"}}, + Comma: 'λ', + Comment: '€', +}, { + Name: "NonASCIICommentConfusion", + Input: "§λ\n¶§λ\nθ\n¶§λ\n", + Output: [][]string{{"λ"}, {"λ"}, {"λ"}}, + Comment: 'θ', +}, { + Name: "QuotedFieldMultipleLF", + Input: "§\"\n\n\n\n\"", + Output: [][]string{{"\n\n\n\n"}}, +}, { + Name: "MultipleCRLF", + Input: "\r\n\r\n\r\n\r\n", +}, { + // The implementation may read each line in several chunks if it doesn't fit entirely + // in the read buffer, so we should test the code to handle that condition. + Name: "HugeLines", + Input: strings.Repeat("#ignore\n", 10000) + "§" + strings.Repeat("@", 5000) + ",§" + strings.Repeat("*", 5000), + Output: [][]string{{strings.Repeat("@", 5000), strings.Repeat("*", 5000)}}, + Comment: '#', +}, { + Name: "QuoteWithTrailingCRLF", + Input: "§\"foo∑\"bar\"\r\n", + Errors: []error{&ParseError{Err: ErrQuote}}, +}, { + Name: "LazyQuoteWithTrailingCRLF", + Input: "§\"foo\"bar\"\r\n", + Output: [][]string{{`foo"bar`}}, + LazyQuotes: true, +}, { + Name: "DoubleQuoteWithTrailingCRLF", + Input: "§\"foo\"\"bar\"\r\n", + Output: [][]string{{`foo"bar`}}, +}, { + Name: "EvenQuotes", + Input: `§""""""""`, + Output: [][]string{{`"""`}}, +}, { + Name: "OddQuotes", + Input: `§"""""""∑`, + Errors: []error{&ParseError{Err: ErrQuote}}, +}, { + Name: "LazyOddQuotes", + Input: `§"""""""`, + Output: [][]string{{`"""`}}, + LazyQuotes: true, +}, { + Name: "BadComma1", + Comma: '\n', + Errors: []error{ErrInvalidDelim}, +}, { + Name: "BadComma2", + Comma: '\r', + Errors: []error{ErrInvalidDelim}, +}, { + Name: "BadComma3", + Comma: '"', + Errors: []error{ErrInvalidDelim}, +}, { + Name: "BadComma4", + Comma: utf8.RuneError, + Errors: []error{ErrInvalidDelim}, +}, { + Name: "BadComment1", + Comment: '\n', + Errors: []error{ErrInvalidDelim}, +}, { + Name: "BadComment2", + Comment: '\r', + Errors: []error{ErrInvalidDelim}, +}, { + Name: "BadComment3", + Comment: utf8.RuneError, + Errors: []error{ErrInvalidDelim}, +}, { + Name: "BadCommaComment", + Comma: 'X', + Comment: 'X', + Errors: []error{ErrInvalidDelim}, +}} + +func TestRead(t *testing.T) { + newReader := func(tt readTest) (*Reader, [][][2]int, map[int][2]int, string) { + positions, errPositions, input := makePositions(tt.Input) + r := NewReader(strings.NewReader(input)) + + if tt.Comma != 0 { + r.Comma = tt.Comma + } + r.Comment = tt.Comment + if tt.UseFieldsPerRecord { + r.FieldsPerRecord = tt.FieldsPerRecord + } else { + r.FieldsPerRecord = -1 + } + r.LazyQuotes = tt.LazyQuotes + r.TrimLeadingSpace = tt.TrimLeadingSpace + r.ReuseRecord = tt.ReuseRecord + return r, positions, errPositions, input + } + + for _, tt := range readTests { + t.Run(tt.Name, func(t *testing.T) { + r, positions, errPositions, input := newReader(tt) + out, err := r.ReadAll() + if wantErr := firstError(tt.Errors, positions, errPositions); wantErr != nil { + if !deepEqual(err, wantErr) { + t.Fatalf("ReadAll() error mismatch:\ngot %v (%#v)\nwant %v (%#v)", err, err, wantErr, wantErr) + } + if out != nil { + t.Fatalf("ReadAll() output:\ngot %q\nwant nil", out) + } + } else { + if err != nil { + t.Fatalf("unexpected Readall() error: %v", err) + } + if !deepEqual(out, tt.Output) { + t.Fatalf("ReadAll() output:\ngot %q\nwant %q", out, tt.Output) + } + } + + // Check input offset after call ReadAll() + inputByteSize := len(input) + inputOffset := r.InputOffset() + if err == nil && int64(inputByteSize) != inputOffset { + t.Errorf("wrong input offset after call ReadAll():\ngot: %d\nwant: %d\ninput: %s", inputOffset, inputByteSize, input) + } + + // Check field and error positions. + r, _, _, _ = newReader(tt) + for recNum := 0; ; recNum++ { + rec, err := r.Read() + var wantErr error + if recNum < len(tt.Errors) && tt.Errors[recNum] != nil { + wantErr = errorWithPosition(tt.Errors[recNum], recNum, positions, errPositions) + } else if recNum >= len(tt.Output) { + wantErr = io.EOF + } + if !deepEqual(err, wantErr) { + t.Fatalf("Read() error at record %d:\ngot %v (%#v)\nwant %v (%#v)", recNum, err, err, wantErr, wantErr) + } + // ErrFieldCount is explicitly non-fatal. + if err != nil && !isErr(err, ErrFieldCount) { + if recNum < len(tt.Output) { + t.Fatalf("need more records; got %d want %d", recNum, len(tt.Output)) + } + break + } + if got, want := rec, tt.Output[recNum]; !deepEqual(got, want) { + t.Errorf("Read vs ReadAll mismatch;\ngot %q\nwant %q", got, want) + } + pos := positions[recNum] + if len(pos) != len(rec) { + t.Fatalf("mismatched position length at record %d", recNum) + } + for i := range rec { + line, col := r.FieldPos(i) + if got, want := [2]int{line, col}, pos[i]; got != want { + t.Errorf("position mismatch at record %d, field %d;\ngot %v\nwant %v", recNum, i, got, want) + } + } + } + }) + } +} + +// XXX: substitute for errors.Is +func isErr(err, match error) bool { + if err == match { + return true + } + if w, ok := err.(interface{ Unwrap() error }); ok { + return isErr(w.Unwrap(), match) + } + return false +} + +// XXX: substitute for reflect.DeepEqual +func deepEqual(v, x interface{}) bool { + if v == nil { + return x == nil + } + // dumb deep equal, handling only a few cases + switch v := v.(type) { + case error: + return v.Error() == x.(error).Error() + case string: + return v == x.(string) + case []string: + x := x.([]string) + if len(v) != len(x) { + return false + } + for i := range v { + if !deepEqual(v[i], x[i]) { + return false + } + } + return true + case [][]string: + x := x.([][]string) + if len(v) != len(x) { + return false + } + for i := range v { + if !deepEqual(v[i], x[i]) { + return false + } + } + return true + default: + panic("not handled " + fmt.Sprintf("%T %T", v, x)) + } +} + +// firstError returns the first non-nil error in errs, +// with the position adjusted according to the error's +// index inside positions. +func firstError(errs []error, positions [][][2]int, errPositions map[int][2]int) error { + for i, err := range errs { + if err != nil { + return errorWithPosition(err, i, positions, errPositions) + } + } + return nil +} + +func errorWithPosition(err error, recNum int, positions [][][2]int, errPositions map[int][2]int) error { + parseErr, ok := err.(*ParseError) + if !ok { + return err + } + if recNum >= len(positions) { + panic(fmt.Errorf("no positions found for error at record %d", recNum)) + } + errPos, ok := errPositions[recNum] + if !ok { + panic(fmt.Errorf("no error position found for error at record %d", recNum)) + } + parseErr1 := *parseErr + parseErr1.StartLine = positions[recNum][0][0] + parseErr1.Line = errPos[0] + parseErr1.Column = errPos[1] + return &parseErr1 +} + +// makePositions returns the expected field positions of all +// the fields in text, the positions of any errors, and the text with the position markers +// removed. +// +// The start of each field is marked with a § symbol; +// CSV lines are separated by ¶ symbols; +// Error positions are marked with ∑ symbols. +func makePositions(text string) ([][][2]int, map[int][2]int, string) { + buf := make([]byte, 0, len(text)) + var positions [][][2]int + errPositions := make(map[int][2]int) + line, col := 1, 1 + recNum := 0 + + for len(text) > 0 { + r, size := utf8.DecodeRuneInString(text) + switch r { + case '\n': + line++ + col = 1 + buf = append(buf, '\n') + case '§': + if len(positions) == 0 { + positions = append(positions, [][2]int{}) + } + positions[len(positions)-1] = append(positions[len(positions)-1], [2]int{line, col}) + case '¶': + positions = append(positions, [][2]int{}) + recNum++ + case '∑': + errPositions[recNum] = [2]int{line, col} + default: + buf = append(buf, text[:size]...) + col += size + } + text = text[size:] + } + return positions, errPositions, string(buf) +} + +// nTimes is an io.Reader which yields the string s n times. +type nTimes struct { + s string + n int + off int +} + +func (r *nTimes) Read(p []byte) (n int, err error) { + for { + if r.n <= 0 || r.s == "" { + return n, io.EOF + } + n0 := copy(p, r.s[r.off:]) + p = p[n0:] + n += n0 + r.off += n0 + if r.off == len(r.s) { + r.off = 0 + r.n-- + } + if len(p) == 0 { + return + } + } +} + +// benchmarkRead measures reading the provided CSV rows data. +// initReader, if non-nil, modifies the Reader before it's used. +func benchmarkRead(b *testing.B, initReader func(*Reader), rows string) { + b.ReportAllocs() + r := NewReader(&nTimes{s: rows, n: b.N}) + if initReader != nil { + initReader(r) + } + for { + _, err := r.Read() + if err == io.EOF { + break + } + if err != nil { + b.Fatal(err) + } + } +} + +const benchmarkCSVData = `x,y,z,w +x,y,z, +x,y,, +x,,, +,,, +"x","y","z","w" +"x","y","z","" +"x","y","","" +"x","","","" +"","","","" +` + +func BenchmarkRead(b *testing.B) { + benchmarkRead(b, nil, benchmarkCSVData) +} + +func BenchmarkReadWithFieldsPerRecord(b *testing.B) { + benchmarkRead(b, func(r *Reader) { r.FieldsPerRecord = 4 }, benchmarkCSVData) +} + +func BenchmarkReadWithoutFieldsPerRecord(b *testing.B) { + benchmarkRead(b, func(r *Reader) { r.FieldsPerRecord = -1 }, benchmarkCSVData) +} + +func BenchmarkReadLargeFields(b *testing.B) { + benchmarkRead(b, nil, strings.Repeat(`xxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv +xxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvv +,,zzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv +xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv +`, 3)) +} + +func BenchmarkReadReuseRecord(b *testing.B) { + benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true }, benchmarkCSVData) +} + +func BenchmarkReadReuseRecordWithFieldsPerRecord(b *testing.B) { + benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true; r.FieldsPerRecord = 4 }, benchmarkCSVData) +} + +func BenchmarkReadReuseRecordWithoutFieldsPerRecord(b *testing.B) { + benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true; r.FieldsPerRecord = -1 }, benchmarkCSVData) +} + +func BenchmarkReadReuseRecordLargeFields(b *testing.B) { + benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true }, strings.Repeat(`xxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv +xxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvv +,,zzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv +xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv +`, 3)) +} diff --git a/gnovm/stdlibs/encoding/csv/writer.gno b/gnovm/stdlibs/encoding/csv/writer.gno new file mode 100644 index 00000000000..f874765bd4e --- /dev/null +++ b/gnovm/stdlibs/encoding/csv/writer.gno @@ -0,0 +1,184 @@ +package csv + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +import ( + "bufio" + "io" + "strings" + "unicode" + "unicode/utf8" +) + +// A Writer writes records using CSV encoding. +// +// As returned by [NewWriter], a Writer writes records terminated by a +// newline and uses ',' as the field delimiter. The exported fields can be +// changed to customize the details before +// the first call to [Writer.Write] or [Writer.WriteAll]. +// +// [Writer.Comma] is the field delimiter. +// +// If [Writer.UseCRLF] is true, +// the Writer ends each output line with \r\n instead of \n. +// +// The writes of individual records are buffered. +// After all data has been written, the client should call the +// [Writer.Flush] method to guarantee all data has been forwarded to +// the underlying [io.Writer]. Any errors that occurred should +// be checked by calling the [Writer.Error] method. +type Writer struct { + Comma rune // Field delimiter (set to ',' by NewWriter) + UseCRLF bool // True to use \r\n as the line terminator + w *bufio.Writer +} + +// NewWriter returns a new Writer that writes to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + Comma: ',', + w: bufio.NewWriter(w), + } +} + +// Write writes a single CSV record to w along with any necessary quoting. +// A record is a slice of strings with each string being one field. +// Writes are buffered, so [Writer.Flush] must eventually be called to ensure +// that the record is written to the underlying [io.Writer]. +func (w *Writer) Write(record []string) error { + if !validDelim(w.Comma) { + return ErrInvalidDelim + } + + for n, field := range record { + if n > 0 { + if _, err := w.w.WriteRune(w.Comma); err != nil { + return err + } + } + + // If we don't have to have a quoted field then just + // write out the field and continue to the next field. + if !w.fieldNeedsQuotes(field) { + if _, err := w.w.WriteString(field); err != nil { + return err + } + continue + } + + if err := w.w.WriteByte('"'); err != nil { + return err + } + for len(field) > 0 { + // Search for special characters. + i := strings.IndexAny(field, "\"\r\n") + if i < 0 { + i = len(field) + } + + // Copy verbatim everything before the special character. + if _, err := w.w.WriteString(field[:i]); err != nil { + return err + } + field = field[i:] + + // Encode the special character. + if len(field) > 0 { + var err error + switch field[0] { + case '"': + _, err = w.w.WriteString(`""`) + case '\r': + if !w.UseCRLF { + err = w.w.WriteByte('\r') + } + case '\n': + if w.UseCRLF { + _, err = w.w.WriteString("\r\n") + } else { + err = w.w.WriteByte('\n') + } + } + field = field[1:] + if err != nil { + return err + } + } + } + if err := w.w.WriteByte('"'); err != nil { + return err + } + } + var err error + if w.UseCRLF { + _, err = w.w.WriteString("\r\n") + } else { + err = w.w.WriteByte('\n') + } + return err +} + +// Flush writes any buffered data to the underlying [io.Writer]. +// To check if an error occurred during Flush, call [Writer.Error]. +func (w *Writer) Flush() { + w.w.Flush() +} + +// Error reports any error that has occurred during +// a previous [Writer.Write] or [Writer.Flush]. +func (w *Writer) Error() error { + _, err := w.w.Write(nil) + return err +} + +// WriteAll writes multiple CSV records to w using [Writer.Write] and +// then calls [Writer.Flush], returning any error from the Flush. +func (w *Writer) WriteAll(records [][]string) error { + for _, record := range records { + err := w.Write(record) + if err != nil { + return err + } + } + return w.w.Flush() +} + +// fieldNeedsQuotes reports whether our field must be enclosed in quotes. +// Fields with a Comma, fields with a quote or newline, and +// fields which start with a space must be enclosed in quotes. +// We used to quote empty strings, but we do not anymore (as of Go 1.4). +// The two representations should be equivalent, but Postgres distinguishes +// quoted vs non-quoted empty string during database imports, and it has +// an option to force the quoted behavior for non-quoted CSV but it has +// no option to force the non-quoted behavior for quoted CSV, making +// CSV with quoted empty strings strictly less useful. +// Not quoting the empty string also makes this package match the behavior +// of Microsoft Excel and Google Drive. +// For Postgres, quote the data terminating string `\.`. +func (w *Writer) fieldNeedsQuotes(field string) bool { + if field == "" { + return false + } + + if field == `\.` { + return true + } + + if w.Comma < utf8.RuneSelf { + for i := 0; i < len(field); i++ { + c := field[i] + if c == '\n' || c == '\r' || c == '"' || c == byte(w.Comma) { + return true + } + } + } else { + if strings.ContainsRune(field, w.Comma) || strings.ContainsAny(field, "\"\r\n") { + return true + } + } + + r1, _ := utf8.DecodeRuneInString(field) + return unicode.IsSpace(r1) +} diff --git a/gnovm/stdlibs/encoding/csv/writer_test.gno b/gnovm/stdlibs/encoding/csv/writer_test.gno new file mode 100644 index 00000000000..1407f3c670a --- /dev/null +++ b/gnovm/stdlibs/encoding/csv/writer_test.gno @@ -0,0 +1,134 @@ +package csv + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +import ( + "bytes" + "encoding/csv" + "errors" + "strings" + "testing" +) + +func TestWriteCSV(t *testing.T) { + records := [][]string{ + {"first_name", "last_name", "username"}, + {"Rob", "Pike", "rob"}, + {"Ken", "Thompson", "ken"}, + {"Robert", "Griesemer", "gri"}, + } + + var buf bytes.Buffer + w := csv.NewWriter(&buf) + err := w.WriteAll(records) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + expected := "first_name,last_name,username\nRob,Pike,rob\nKen,Thompson,ken\nRobert,Griesemer,gri\n" + if buf.String() != expected { + t.Errorf("Unexpected output:\ngot %q\nwant %q", buf.String(), expected) + } +} + +var writeTests = []struct { + Input [][]string + Output string + Error error + UseCRLF bool + Comma rune +}{ + {Input: [][]string{{"abc"}}, Output: "abc\n"}, + {Input: [][]string{{"abc"}}, Output: "abc\r\n", UseCRLF: true}, + {Input: [][]string{{`"abc"`}}, Output: `"""abc"""` + "\n"}, + {Input: [][]string{{`a"b`}}, Output: `"a""b"` + "\n"}, + {Input: [][]string{{`"a"b"`}}, Output: `"""a""b"""` + "\n"}, + {Input: [][]string{{" abc"}}, Output: `" abc"` + "\n"}, + {Input: [][]string{{"abc,def"}}, Output: `"abc,def"` + "\n"}, + {Input: [][]string{{"abc", "def"}}, Output: "abc,def\n"}, + {Input: [][]string{{"abc"}, {"def"}}, Output: "abc\ndef\n"}, + {Input: [][]string{{"abc\ndef"}}, Output: "\"abc\ndef\"\n"}, + {Input: [][]string{{"abc\ndef"}}, Output: "\"abc\r\ndef\"\r\n", UseCRLF: true}, + {Input: [][]string{{"abc\rdef"}}, Output: "\"abcdef\"\r\n", UseCRLF: true}, + {Input: [][]string{{"abc\rdef"}}, Output: "\"abc\rdef\"\n", UseCRLF: false}, + {Input: [][]string{{""}}, Output: "\n"}, + {Input: [][]string{{"", ""}}, Output: ",\n"}, + {Input: [][]string{{"", "", ""}}, Output: ",,\n"}, + {Input: [][]string{{"", "", "a"}}, Output: ",,a\n"}, + {Input: [][]string{{"", "a", ""}}, Output: ",a,\n"}, + {Input: [][]string{{"", "a", "a"}}, Output: ",a,a\n"}, + {Input: [][]string{{"a", "", ""}}, Output: "a,,\n"}, + {Input: [][]string{{"a", "", "a"}}, Output: "a,,a\n"}, + {Input: [][]string{{"a", "a", ""}}, Output: "a,a,\n"}, + {Input: [][]string{{"a", "a", "a"}}, Output: "a,a,a\n"}, + {Input: [][]string{{`\.`}}, Output: "\"\\.\"\n"}, + {Input: [][]string{{"x09\x41\xb4\x1c", "aktau"}}, Output: "x09\x41\xb4\x1c,aktau\n"}, + {Input: [][]string{{",x09\x41\xb4\x1c", "aktau"}}, Output: "\",x09\x41\xb4\x1c\",aktau\n"}, + {Input: [][]string{{"a", "a", ""}}, Output: "a|a|\n", Comma: '|'}, + {Input: [][]string{{",", ",", ""}}, Output: ",|,|\n", Comma: '|'}, + {Input: [][]string{{"foo"}}, Comma: '"', Error: ErrInvalidDelim}, +} + +func TestWrite(t *testing.T) { + for n, tt := range writeTests { + b := &strings.Builder{} + f := csv.NewWriter(b) + f.UseCRLF = tt.UseCRLF + if tt.Comma != 0 { + f.Comma = tt.Comma + } + err := f.WriteAll(tt.Input) + if err != tt.Error { + t.Errorf("Unexpected error:\ngot %v\nwant %v", err, tt.Error) + } + out := b.String() + if out != tt.Output { + t.Errorf("#%d: out=%q want %q", n, out, tt.Output) + } + } +} + +type errorWriter struct{} + +func (e errorWriter) Write(b []byte) (int, error) { + return 0, errors.New("Test") +} + +func TestError(t *testing.T) { + b := &bytes.Buffer{} + f := csv.NewWriter(b) + f.Write([]string{"abc"}) + f.Flush() + err := f.Error() + if err != nil { + t.Errorf("Unexpected error: %s\n", err) + } + + f = csv.NewWriter(errorWriter{}) + f.Write([]string{"abc"}) + f.Flush() + err = f.Error() + + if err == nil { + t.Error("Error should not be nil") + } +} + +var benchmarkWriteData = [][]string{ + {"abc", "def", "12356", "1234567890987654311234432141542132"}, + {"abc", "def", "12356", "1234567890987654311234432141542132"}, + {"abc", "def", "12356", "1234567890987654311234432141542132"}, +} + +func BenchmarkWrite(b *testing.B) { + for i := 0; i < b.N; i++ { + w := csv.NewWriter(&bytes.Buffer{}) + err := w.WriteAll(benchmarkWriteData) + if err != nil { + b.Fatal(err) + } + w.Flush() + } +} diff --git a/gnovm/stdlibs/generated.go b/gnovm/stdlibs/generated.go index 67b492a34b2..c1198e5f351 100644 --- a/gnovm/stdlibs/generated.go +++ b/gnovm/stdlibs/generated.go @@ -988,7 +988,9 @@ var initOrder = [...]string{ "crypto/ed25519", "crypto/sha256", "encoding", + "encoding/base32", "encoding/base64", + "encoding/csv", "encoding/hex", "hash", "hash/adler32", diff --git a/tm2/pkg/bft/config/config.go b/tm2/pkg/bft/config/config.go index f9e9a0cd899..d290dba6b26 100644 --- a/tm2/pkg/bft/config/config.go +++ b/tm2/pkg/bft/config/config.go @@ -372,6 +372,10 @@ func (cfg BaseConfig) NodeKeyFile() string { // DBDir returns the full path to the database directory func (cfg BaseConfig) DBDir() string { + if filepath.IsAbs(cfg.DBPath) { + return cfg.DBPath + } + return filepath.Join(cfg.RootDir, cfg.DBPath) } diff --git a/tm2/pkg/bft/config/config_test.go b/tm2/pkg/bft/config/config_test.go index 77f7c0d5e16..ea37e6e1763 100644 --- a/tm2/pkg/bft/config/config_test.go +++ b/tm2/pkg/bft/config/config_test.go @@ -185,3 +185,28 @@ func TestConfig_ValidateBaseConfig(t *testing.T) { assert.ErrorIs(t, c.BaseConfig.ValidateBasic(), errInvalidProfListenAddress) }) } + +func TestConfig_DBDir(t *testing.T) { + t.Parallel() + + t.Run("DB path is absolute", func(t *testing.T) { + t.Parallel() + + c := DefaultConfig() + c.RootDir = "/root" + c.DBPath = "/abs/path" + + assert.Equal(t, c.DBPath, c.DBDir()) + assert.NotEqual(t, filepath.Join(c.RootDir, c.DBPath), c.DBDir()) + }) + + t.Run("DB path is relative", func(t *testing.T) { + t.Parallel() + + c := DefaultConfig() + c.RootDir = "/root" + c.DBPath = "relative/path" + + assert.Equal(t, filepath.Join(c.RootDir, c.DBPath), c.DBDir()) + }) +}