-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathsync_state.go
126 lines (100 loc) · 3.35 KB
/
sync_state.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
package automerge
// #include "automerge.h"
import "C"
import (
"runtime"
)
// SyncState represents the state of syncing between a local copy of
// a doc and a peer; and lets you optimize bandwidth used to ensure
// two docs are always in sync.
type SyncState struct {
Doc *Doc
item *item
cSyncState *C.AMsyncState
}
// NewSyncState returns a new sync state to sync with a peer
func NewSyncState(d *Doc) *SyncState {
ss := must(wrap(C.AMsyncStateInit()).item()).syncState()
ss.Doc = d
return ss
}
// LoadSyncState lets you resume syncing with a peer from where you left off.
func LoadSyncState(d *Doc, raw []byte) (*SyncState, error) {
cBytes, free := toByteSpan(raw)
defer free()
item, err := wrap(C.AMsyncStateDecode(cBytes.src, cBytes.count)).item()
if err != nil {
return nil, err
}
ss := item.syncState()
ss.Doc = d
return ss, nil
}
// ReceiveMessage should be called with every message created by GenerateMessage
// on the peer side.
func (ss *SyncState) ReceiveMessage(msg []byte) (*SyncMessage, error) {
sm, err := LoadSyncMessage(msg)
if err != nil {
return nil, err
}
defer runtime.KeepAlive(ss)
defer runtime.KeepAlive(sm)
cDoc, unlock := ss.Doc.lock()
defer unlock()
return sm, wrap(C.AMreceiveSyncMessage(cDoc, ss.cSyncState, sm.cSyncMessage)).void()
}
// GenerateMessage generates the next message to send to the client.
// If `valid` is false the clients are currently in sync and there are
// no more messages to send (until you either modify the underlying document)
func (ss *SyncState) GenerateMessage() (sm *SyncMessage, valid bool) {
defer runtime.KeepAlive(ss)
cDoc, unlock := ss.Doc.lock()
defer unlock()
sm = must(wrap(C.AMgenerateSyncMessage(cDoc, ss.cSyncState)).item()).syncMessage()
if sm == nil {
return nil, false
}
return sm, true
}
// Save serializes the sync state so that you can resume it later.
// This is an optimization to reduce the number of round-trips required
// to get two peers in sync at a later date.
func (ss *SyncState) Save() []byte {
defer runtime.KeepAlive(ss)
return must(wrap(C.AMsyncStateEncode(ss.cSyncState)).item()).bytes()
}
// SyncMessage is sent between peers to keep copies of a document in sync.
type SyncMessage struct {
item *item
cSyncMessage *C.AMsyncMessage
}
// LoadSyncMessage decodes a sync message from a byte slice for inspection.
func LoadSyncMessage(msg []byte) (*SyncMessage, error) {
cBytes, free := toByteSpan(msg)
defer free()
item, err := wrap(C.AMsyncMessageDecode(cBytes.src, cBytes.count)).item()
if err != nil {
return nil, err
}
return item.syncMessage(), nil
}
// Changes returns any changes included in this SyncMessage
func (sm *SyncMessage) Changes() []*Change {
defer runtime.KeepAlive(sm)
items := must(wrap(C.AMsyncMessageChanges(sm.cSyncMessage)).items())
return mapItems(items, func(i *item) *Change { return i.change() })
}
// Heads gives the heads of the peer that generated the SyncMessage
func (sm *SyncMessage) Heads() []ChangeHash {
defer runtime.KeepAlive(sm)
items := must(wrap(C.AMsyncMessageHeads(sm.cSyncMessage)).items())
return mapItems(items, func(i *item) ChangeHash { return i.changeHash() })
}
// Bytes returns a representation for sending over the network.
func (sm *SyncMessage) Bytes() []byte {
if sm == nil {
return nil
}
defer runtime.KeepAlive(sm)
return must(wrap(C.AMsyncMessageEncode(sm.cSyncMessage)).item()).bytes()
}