diff --git a/Cargo.lock b/Cargo.lock index 54dc99d565801..a9b7f88dcd72e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -68,11 +68,13 @@ dependencies = [ "affine_schema", "anyhow", "chrono", + "dashmap", "dotenvy", "napi", "napi-build", "napi-derive", "sqlx", + "thiserror 2.0.9", "tokio", "uniffi", ] @@ -273,6 +275,19 @@ dependencies = [ "nom", ] +[[package]] +name = "async-compat" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bab94bde396a3f7b4962e396fdad640e241ed797d4d8d77fc8c237d14c58fc0" +dependencies = [ + "futures-core", + "futures-io", + "once_cell", + "pin-project-lite", + "tokio", +] + [[package]] name = "async-lock" version = "3.4.0" @@ -2940,6 +2955,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc7687007d2546c454d8ae609b105daceb88175477dac280707ad6d95bcd6f1f" dependencies = [ "anyhow", + "async-compat", "bytes", "log", "once_cell", diff --git a/blocksuite/framework/store/shim.d.ts b/blocksuite/framework/store/shim.d.ts index 0498b36afc654..c8c2ea92c286e 100644 --- a/blocksuite/framework/store/shim.d.ts +++ b/blocksuite/framework/store/shim.d.ts @@ -16,4 +16,5 @@ declare module 'y-protocols/awareness.js' { value: State[Field] ): void; } + export { applyAwarenessUpdate, encodeAwarenessUpdate, modifyAwarenessUpdate, removeAwarenessStates } from 'y-protocols/awareness' } diff --git a/packages/common/env/src/ua-helper.ts b/packages/common/env/src/ua-helper.ts index 1e458bacc6abf..9984bfb8a500f 100644 --- a/packages/common/env/src/ua-helper.ts +++ b/packages/common/env/src/ua-helper.ts @@ -32,6 +32,9 @@ export class UaHelper { } private isStandaloneMode() { + if (typeof window === 'undefined') { + return false; + } if ('standalone' in window.navigator) { return !!window.navigator.standalone; } diff --git a/packages/common/infra/src/op/__tests__/client.spec.ts b/packages/common/infra/src/op/__tests__/client.spec.ts index ffc24a669060e..fa90f47aed764 100644 --- a/packages/common/infra/src/op/__tests__/client.spec.ts +++ b/packages/common/infra/src/op/__tests__/client.spec.ts @@ -27,7 +27,9 @@ describe('op client', () => { port1.postMessage = vi.fn(port1.postMessage); // @ts-expect-error patch postMessage ctx.postMessage = port1.postMessage; - ctx.producer = new OpClient(port1); + ctx.producer = new OpClient(port1, { + timeout: 1000, + }); // @ts-expect-error internal api ctx.handlers = ctx.producer.handlers; vi.useFakeTimers(); diff --git a/packages/common/infra/src/op/client.ts b/packages/common/infra/src/op/client.ts index c469028f96c69..3a368913c0b40 100644 --- a/packages/common/infra/src/op/client.ts +++ b/packages/common/infra/src/op/client.ts @@ -31,7 +31,7 @@ export class OpClient extends AutoMessageHandler { private readonly pendingCalls = new Map(); private readonly obs = new Map>(); private readonly options: OpClientOptions = { - timeout: 3000, + timeout: Infinity, }; constructor(port: MessageCommunicapable, options: OpClientOptions = {}) { @@ -139,9 +139,12 @@ export class OpClient extends AutoMessageHandler { raise('canceled'); }; - const timeout = setTimeout(() => { - raise('timeout'); - }, this.options.timeout); + const timeout = + this.options.timeout === Infinity + ? 0 + : setTimeout(() => { + raise('timeout'); + }, this.options.timeout); const transferables = fetchTransferables(payload); diff --git a/packages/common/nbstore/package.json b/packages/common/nbstore/package.json index 2ea84ad0331ac..2645092051108 100644 --- a/packages/common/nbstore/package.json +++ b/packages/common/nbstore/package.json @@ -6,7 +6,8 @@ "sideEffects": false, "exports": { ".": "./src/index.ts", - "./worker": "./src/worker/index.ts", + "./worker/client": "./src/worker/client.ts", + "./worker/consumer": "./src/worker/consumer.ts", "./idb": "./src/impls/idb/index.ts", "./idb/v1": "./src/impls/idb/v1/index.ts", "./cloud": "./src/impls/cloud/index.ts", @@ -24,7 +25,6 @@ "yjs": "^13.6.21" }, "devDependencies": { - "@affine/electron-api": "workspace:*", "@affine/graphql": "workspace:*", "fake-indexeddb": "^6.0.0", "idb": "^8.0.0", @@ -32,7 +32,6 @@ "vitest": "2.1.8" }, "peerDependencies": { - "@affine/electron-api": "workspace:*", "@affine/graphql": "workspace:*", "idb": "^8.0.0", "socket.io-client": "^4.7.5" diff --git a/packages/common/nbstore/src/__tests__/frontend.spec.ts b/packages/common/nbstore/src/__tests__/frontend.spec.ts index 9acfca646f98a..b0e5357e74e4a 100644 --- a/packages/common/nbstore/src/__tests__/frontend.spec.ts +++ b/packages/common/nbstore/src/__tests__/frontend.spec.ts @@ -9,6 +9,7 @@ import { DocFrontend } from '../frontend/doc'; import { BroadcastChannelAwarenessStorage } from '../impls/broadcast-channel/awareness'; import { IndexedDBDocStorage } from '../impls/idb'; import { AwarenessSyncImpl } from '../sync/awareness'; +import { DocSyncImpl } from '../sync/doc'; import { expectYjsEqual } from './utils'; test('doc', async () => { @@ -19,7 +20,7 @@ test('doc', async () => { const docStorage = new IndexedDBDocStorage({ id: 'ws1', - peer: 'a', + flavour: 'a', type: 'workspace', }); @@ -27,7 +28,7 @@ test('doc', async () => { await docStorage.connection.waitForConnected(); - const frontend1 = new DocFrontend(docStorage, null); + const frontend1 = new DocFrontend(docStorage, DocSyncImpl.dummy); frontend1.start(); frontend1.addDoc(doc1); await vitest.waitFor(async () => { @@ -42,7 +43,7 @@ test('doc', async () => { const doc2 = new YDoc({ guid: 'test-doc', }); - const frontend2 = new DocFrontend(docStorage, null); + const frontend2 = new DocFrontend(docStorage, DocSyncImpl.dummy); frontend2.start(); frontend2.addDoc(doc2); @@ -57,15 +58,11 @@ test('doc', async () => { test('awareness', async () => { const storage1 = new BroadcastChannelAwarenessStorage({ - id: 'ws1', - peer: 'a', - type: 'workspace', + id: 'ws1:a', }); const storage2 = new BroadcastChannelAwarenessStorage({ - id: 'ws1', - peer: 'b', - type: 'workspace', + id: 'ws1:b', }); storage1.connection.connect(); @@ -90,13 +87,23 @@ test('awareness', async () => { const awarenessC = new Awareness(docC); { - const sync = new AwarenessSyncImpl(storage1, [storage2]); + const sync = new AwarenessSyncImpl({ + local: storage1, + remotes: { + b: storage2, + }, + }); const frontend = new AwarenessFrontend(sync); frontend.connect(awarenessA); frontend.connect(awarenessB); } { - const sync = new AwarenessSyncImpl(storage2, [storage1]); + const sync = new AwarenessSyncImpl({ + local: storage2, + remotes: { + a: storage1, + }, + }); const frontend = new AwarenessFrontend(sync); frontend.connect(awarenessC); } diff --git a/packages/common/nbstore/src/__tests__/sync.spec.ts b/packages/common/nbstore/src/__tests__/sync.spec.ts index 48aacff5cffb0..c6179bd679127 100644 --- a/packages/common/nbstore/src/__tests__/sync.spec.ts +++ b/packages/common/nbstore/src/__tests__/sync.spec.ts @@ -19,30 +19,37 @@ test('doc', async () => { const peerADoc = new IndexedDBDocStorage({ id: 'ws1', - peer: 'a', + flavour: 'a', type: 'workspace', }); const peerASync = new IndexedDBSyncStorage({ id: 'ws1', - peer: 'a', + flavour: 'a', type: 'workspace', }); const peerBDoc = new IndexedDBDocStorage({ id: 'ws1', - peer: 'b', + flavour: 'b', type: 'workspace', }); const peerCDoc = new IndexedDBDocStorage({ id: 'ws1', - peer: 'c', + flavour: 'c', type: 'workspace', }); - const peerA = new SpaceStorage([peerADoc, peerASync]); - const peerB = new SpaceStorage([peerBDoc]); - const peerC = new SpaceStorage([peerCDoc]); + const peerA = new SpaceStorage({ + doc: peerADoc, + sync: peerASync, + }); + const peerB = new SpaceStorage({ + doc: peerBDoc, + }); + const peerC = new SpaceStorage({ + doc: peerCDoc, + }); peerA.connect(); peerB.connect(); @@ -57,7 +64,13 @@ test('doc', async () => { bin: update, }); - const sync = new Sync(peerA, [peerB, peerC]); + const sync = new Sync({ + local: peerA, + remotes: { + b: peerB, + c: peerC, + }, + }); sync.start(); await new Promise(resolve => setTimeout(resolve, 1000)); @@ -109,25 +122,31 @@ test('doc', async () => { test('blob', async () => { const a = new IndexedDBBlobStorage({ id: 'ws1', - peer: 'a', + flavour: 'a', type: 'workspace', }); const b = new IndexedDBBlobStorage({ id: 'ws1', - peer: 'b', + flavour: 'b', type: 'workspace', }); const c = new IndexedDBBlobStorage({ id: 'ws1', - peer: 'c', + flavour: 'c', type: 'workspace', }); - const peerA = new SpaceStorage([a]); - const peerB = new SpaceStorage([b]); - const peerC = new SpaceStorage([c]); + const peerA = new SpaceStorage({ + blob: a, + }); + const peerB = new SpaceStorage({ + blob: b, + }); + const peerC = new SpaceStorage({ + blob: c, + }); peerA.connect(); peerB.connect(); @@ -151,7 +170,13 @@ test('blob', async () => { createdAt: new Date(100), }); - const sync = new Sync(peerA, [peerB, peerC]); + const sync = new Sync({ + local: peerA, + remotes: { + b: peerB, + c: peerC, + }, + }); sync.start(); await new Promise(resolve => setTimeout(resolve, 1000)); diff --git a/packages/common/nbstore/src/connection/connection.ts b/packages/common/nbstore/src/connection/connection.ts index cd15248d32624..8ceea47e11ab1 100644 --- a/packages/common/nbstore/src/connection/connection.ts +++ b/packages/common/nbstore/src/connection/connection.ts @@ -92,6 +92,7 @@ export abstract class AutoReconnectConnection }) .catch(error => { if (!this.connectingAbort?.signal.aborted) { + console.error('failed to connect', error); this.setStatus('error', error as any); } }); diff --git a/packages/common/nbstore/src/frontend/awareness.ts b/packages/common/nbstore/src/frontend/awareness.ts index 524c790fba2f9..76aaf7f5cea84 100644 --- a/packages/common/nbstore/src/frontend/awareness.ts +++ b/packages/common/nbstore/src/frontend/awareness.ts @@ -3,7 +3,7 @@ import { applyAwarenessUpdate, type Awareness, encodeAwarenessUpdate, -} from 'y-protocols/awareness.js'; +} from 'y-protocols/awareness'; import type { AwarenessRecord } from '../storage/awareness'; import type { AwarenessSync } from '../sync/awareness'; diff --git a/packages/common/nbstore/src/frontend/blob.ts b/packages/common/nbstore/src/frontend/blob.ts index 7bfc76ab452e7..4586b83e1c4b1 100644 --- a/packages/common/nbstore/src/frontend/blob.ts +++ b/packages/common/nbstore/src/frontend/blob.ts @@ -3,21 +3,33 @@ import type { BlobSync } from '../sync/blob'; export class BlobFrontend { constructor( - readonly storage: BlobStorage, - readonly sync?: BlobSync + public readonly storage: BlobStorage, + private readonly sync: BlobSync ) {} get(blobId: string) { - return this.sync - ? this.sync.downloadBlob(blobId) - : this.storage.get(blobId); + return this.sync.downloadBlob(blobId); } set(blob: BlobRecord) { - return this.sync ? this.sync.uploadBlob(blob) : this.storage.set(blob); + return this.sync.uploadBlob(blob); + } + + fullSync() { + return this.sync.fullSync(); } addPriority(_id: string, _priority: number) { // not support yet } + + readonly state$ = this.sync.state$; + + setMaxBlobSize(max: number) { + this.sync.setMaxBlobSize(max); + } + + onReachedMaxBlobSize(cb: (byteSize: number) => void): () => void { + return this.sync.onReachedMaxBlobSize(cb); + } } diff --git a/packages/common/nbstore/src/frontend/doc.ts b/packages/common/nbstore/src/frontend/doc.ts index 9cbfc301b76a6..4265af902b4ad 100644 --- a/packages/common/nbstore/src/frontend/doc.ts +++ b/packages/common/nbstore/src/frontend/doc.ts @@ -1,6 +1,7 @@ import { groupBy } from 'lodash-es'; import { nanoid } from 'nanoid'; -import { Subject } from 'rxjs'; +import type { Subscription } from 'rxjs'; +import { combineLatest, map, Observable, Subject } from 'rxjs'; import { applyUpdate, type Doc as YDoc, @@ -12,7 +13,7 @@ import type { DocRecord, DocStorage } from '../storage'; import type { DocSync } from '../sync/doc'; import { AsyncPriorityQueue } from '../utils/async-priority-queue'; import { isEmptyUpdate } from '../utils/is-empty-update'; -import { throwIfAborted } from '../utils/throw-if-aborted'; +import { MANUALLY_STOP, throwIfAborted } from '../utils/throw-if-aborted'; const NBSTORE_ORIGIN = 'nbstore-frontend'; @@ -36,6 +37,64 @@ interface DocFrontendOptions { mergeUpdates?: (updates: Uint8Array[]) => Promise | Uint8Array; } +export type DocFrontendDocState = { + /** + * some data is available in yjs doc instance + */ + ready: boolean; + /** + * data is loaded from local doc storage and applied to yjs doc instance + */ + loaded: boolean; + /** + * some data is being applied to yjs doc instance, or some data is being saved to local doc storage + */ + updating: boolean; + /** + * the doc is syncing with remote peers + */ + syncing: boolean; + /** + * the doc is synced with remote peers + */ + synced: boolean; + /** + * the doc is retrying to sync with remote peers + */ + syncRetrying: boolean; + /** + * the error message when syncing with remote peers + */ + syncErrorMessage: string | null; +}; + +export type DocFrontendState = { + /** + * total number of docs + */ + total: number; + /** + * number of docs that have been loaded to yjs doc instance + */ + loaded: number; + /** + * number of docs that are syncing with remote peers + */ + syncing: number; + /** + * whether all docs are synced with remote peers + */ + synced: boolean; + /** + * whether the doc is retrying to sync with remote peers + */ + syncRetrying: boolean; + /** + * the error message when syncing with remote peers + */ + syncErrorMessage: string | null; +}; + export class DocFrontend { private readonly uniqueId = `frontend:${nanoid()}`; @@ -55,11 +114,68 @@ export class DocFrontend { private readonly abort = new AbortController(); constructor( - private readonly storage: DocStorage, - private readonly sync: DocSync | null, + public readonly storage: DocStorage, + private readonly sync: DocSync, readonly options: DocFrontendOptions = {} ) {} + docState$(docId: string): Observable { + const frontendState$ = new Observable<{ + ready: boolean; + loaded: boolean; + updating: boolean; + }>(subscribe => { + const next = () => { + subscribe.next({ + ready: this.status.readyDocs.has(docId), + loaded: this.status.connectedDocs.has(docId), + updating: + (this.status.jobMap.get(docId)?.length ?? 0) > 0 || + this.status.currentJob?.docId === docId, + }); + }; + next(); + return this.statusUpdatedSubject$.subscribe(updatedId => { + if (updatedId === docId) next(); + }); + }); + const syncState$ = this.sync.docState$(docId); + return combineLatest([frontendState$, syncState$]).pipe( + map(([frontend, sync]) => ({ + ...frontend, + synced: sync.synced, + syncing: sync.syncing, + syncRetrying: sync.retrying, + syncErrorMessage: sync.errorMessage, + })) + ); + } + + state$ = combineLatest([ + new Observable<{ total: number; loaded: number }>(subscriber => { + const next = () => { + subscriber.next({ + total: this.status.docs.size, + loaded: this.status.connectedDocs.size, + }); + }; + next(); + return this.statusUpdatedSubject$.subscribe(() => { + next(); + }); + }), + this.sync.state$, + ]).pipe( + map(([frontend, sync]) => ({ + total: sync.total ?? frontend.total, + loaded: frontend.loaded, + syncing: sync.syncing, + synced: sync.synced, + syncRetrying: sync.retrying, + syncErrorMessage: sync.errorMessage, + })) + ) satisfies Observable; + start() { if (this.abort.signal.aborted) { throw new Error('doc frontend can only start once'); @@ -70,10 +186,11 @@ export class DocFrontend { } stop() { - this.abort.abort(); + this.abort.abort(MANUALLY_STOP); } private async mainLoop(signal?: AbortSignal) { + await this.storage.connection.waitForConnected(signal); const dispose = this.storage.subscribeDocUpdate((record, origin) => { this.event.onStorageUpdate(record, origin); }); @@ -314,4 +431,96 @@ export class DocFrontend { return merge(updates.filter(bin => !isEmptyUpdate(bin))); } + + async waitForSynced(abort?: AbortSignal) { + let sub: Subscription | undefined = undefined; + return Promise.race([ + new Promise(resolve => { + sub = this.state$?.subscribe(status => { + if (status.synced) { + resolve(); + } + }); + }), + new Promise((_, reject) => { + if (abort?.aborted) { + reject(abort?.reason); + } + abort?.addEventListener('abort', () => { + reject(abort.reason); + }); + }), + ]).finally(() => { + sub?.unsubscribe(); + }); + } + + async waitForDocLoaded(docId: string, abort?: AbortSignal) { + let sub: Subscription | undefined = undefined; + return Promise.race([ + new Promise(resolve => { + sub = this.docState$(docId).subscribe(state => { + if (state.loaded) { + resolve(); + } + }); + }), + new Promise((_, reject) => { + if (abort?.aborted) { + reject(abort?.reason); + } + abort?.addEventListener('abort', () => { + reject(abort.reason); + }); + }), + ]).finally(() => { + sub?.unsubscribe(); + }); + } + + async waitForDocSynced(docId: string, abort?: AbortSignal) { + let sub: Subscription | undefined = undefined; + return Promise.race([ + new Promise(resolve => { + sub = this.docState$(docId).subscribe(state => { + if (state.syncing) { + resolve(); + } + }); + }), + new Promise((_, reject) => { + if (abort?.aborted) { + reject(abort?.reason); + } + abort?.addEventListener('abort', () => { + reject(abort.reason); + }); + }), + ]).finally(() => { + sub?.unsubscribe(); + }); + } + + async waitForDocReady(docId: string, abort?: AbortSignal) { + let sub: Subscription | undefined = undefined; + return Promise.race([ + new Promise(resolve => { + sub = this.docState$(docId).subscribe(state => { + if (state.ready) { + resolve(); + } + }); + }), + new Promise((_, reject) => { + if (abort?.aborted) { + reject(abort?.reason); + } + abort?.addEventListener('abort', () => { + reject(abort.reason); + }); + }), + ]).finally(() => { + sub?.unsubscribe(); + }); + } } diff --git a/packages/common/nbstore/src/frontend/index.ts b/packages/common/nbstore/src/frontend/index.ts new file mode 100644 index 0000000000000..f4829dcdbfe8a --- /dev/null +++ b/packages/common/nbstore/src/frontend/index.ts @@ -0,0 +1,3 @@ +export * from './awareness'; +export * from './blob'; +export * from './doc'; diff --git a/packages/common/nbstore/src/impls/broadcast-channel/awareness.ts b/packages/common/nbstore/src/impls/broadcast-channel/awareness.ts index b4ec7a67deba3..4e9b2c5985ff1 100644 --- a/packages/common/nbstore/src/impls/broadcast-channel/awareness.ts +++ b/packages/common/nbstore/src/impls/broadcast-channel/awareness.ts @@ -22,13 +22,27 @@ type ChannelMessage = collectId: string; }; +interface BroadcastChannelAwarenessStorageOptions { + id: string; +} + export class BroadcastChannelAwarenessStorage extends AwarenessStorageBase { + static readonly identifier = 'BroadcastChannelAwarenessStorage'; + override readonly storageType = 'awareness'; - override readonly connection = new BroadcastChannelConnection(this.options); + override readonly connection = new BroadcastChannelConnection({ + id: this.options.id, + }); get channel() { return this.connection.inner; } + constructor( + private readonly options: BroadcastChannelAwarenessStorageOptions + ) { + super(); + } + private readonly subscriptions = new Map< string, Set<{ diff --git a/packages/common/nbstore/src/impls/broadcast-channel/channel.ts b/packages/common/nbstore/src/impls/broadcast-channel/channel.ts index fae9fbb75054f..a29d1e830da37 100644 --- a/packages/common/nbstore/src/impls/broadcast-channel/channel.ts +++ b/packages/common/nbstore/src/impls/broadcast-channel/channel.ts @@ -1,10 +1,13 @@ import { AutoReconnectConnection } from '../../connection'; -import type { StorageOptions } from '../../storage'; + +export interface BroadcastChannelConnectionOptions { + id: string; +} export class BroadcastChannelConnection extends AutoReconnectConnection { - readonly channelName = `channel:${this.opts.peer}:${this.opts.type}:${this.opts.id}`; + readonly channelName = `channel:${this.opts.id}`; - constructor(private readonly opts: StorageOptions) { + constructor(private readonly opts: BroadcastChannelConnectionOptions) { super(); } diff --git a/packages/common/nbstore/src/impls/broadcast-channel/index.ts b/packages/common/nbstore/src/impls/broadcast-channel/index.ts new file mode 100644 index 0000000000000..587a90f44a5e1 --- /dev/null +++ b/packages/common/nbstore/src/impls/broadcast-channel/index.ts @@ -0,0 +1,6 @@ +import type { StorageConstructor } from '..'; +import { BroadcastChannelAwarenessStorage } from './awareness'; + +export const broadcastChannelStorages = [ + BroadcastChannelAwarenessStorage, +] satisfies StorageConstructor[]; diff --git a/packages/common/nbstore/src/impls/cloud/awareness.ts b/packages/common/nbstore/src/impls/cloud/awareness.ts index 5ee8ccd0e96ba..022a8d1e347f2 100644 --- a/packages/common/nbstore/src/impls/cloud/awareness.ts +++ b/packages/common/nbstore/src/impls/cloud/awareness.ts @@ -4,21 +4,33 @@ import { share } from '../../connection'; import { type AwarenessRecord, AwarenessStorageBase, - type AwarenessStorageOptions, } from '../../storage/awareness'; +import type { SpaceType } from '../../utils/universal-id'; import { base64ToUint8Array, SocketConnection, uint8ArrayToBase64, } from './socket'; -interface CloudAwarenessStorageOptions extends AwarenessStorageOptions { - socketOptions: SocketOptions; +interface CloudAwarenessStorageOptions { + socketOptions?: SocketOptions; + serverBaseUrl: string; + type: SpaceType; + id: string; } -export class CloudAwarenessStorage extends AwarenessStorageBase { +export class CloudAwarenessStorage extends AwarenessStorageBase { + static readonly identifier = 'CloudAwarenessStorage'; + + constructor(private readonly options: CloudAwarenessStorageOptions) { + super(); + } + connection = share( - new SocketConnection(this.peer, this.options.socketOptions) + new SocketConnection( + `${this.options.serverBaseUrl}/`, + this.options.socketOptions + ) ); private get socket() { @@ -28,8 +40,8 @@ export class CloudAwarenessStorage extends AwarenessStorageBase { const encodedUpdate = await uint8ArrayToBase64(record.bin); this.socket.emit('space:update-awareness', { - spaceType: this.spaceType, - spaceId: this.spaceId, + spaceType: this.options.type, + spaceId: this.options.id, docId: record.docId, awarenessUpdate: encodedUpdate, }); @@ -44,8 +56,8 @@ export class CloudAwarenessStorage extends AwarenessStorageBase { this.socket.emit('space:leave-awareness', { - spaceType: this.spaceType, - spaceId: this.spaceId, + spaceType: this.options.type, + spaceId: this.options.id, docId: id, }); }; @@ -53,14 +65,14 @@ export class CloudAwarenessStorage extends AwarenessStorageBase { await this.socket.emitWithAck('space:join-awareness', { - spaceType: this.spaceType, - spaceId: this.spaceId, + spaceType: this.options.type, + spaceId: this.options.id, docId: id, clientVersion: BUILD_CONFIG.appVersion, }); this.socket.emit('space:load-awarenesses', { - spaceType: this.spaceType, - spaceId: this.spaceId, + spaceType: this.options.type, + spaceId: this.options.id, docId: id, }); }; @@ -87,8 +99,8 @@ export class CloudAwarenessStorage extends AwarenessStorageBase { if ( - spaceId === this.spaceId && - spaceType === this.spaceType && + spaceId === this.options.id && + spaceType === this.options.type && docId === id ) { (async () => { @@ -96,8 +108,8 @@ export class CloudAwarenessStorage extends AwarenessStorageBase { if ( - spaceId === this.spaceId && - spaceType === this.spaceType && + spaceId === this.options.id && + spaceType === this.options.type && docId === id ) { onUpdate({ diff --git a/packages/common/nbstore/src/impls/cloud/blob.ts b/packages/common/nbstore/src/impls/cloud/blob.ts index 2dac5f6ed944d..b6ed2ce4484e5 100644 --- a/packages/common/nbstore/src/impls/cloud/blob.ts +++ b/packages/common/nbstore/src/impls/cloud/blob.ts @@ -1,35 +1,30 @@ import { deleteBlobMutation, - gqlFetcherFactory, listBlobsQuery, releaseDeletedBlobsMutation, setBlobMutation, } from '@affine/graphql'; -import { DummyConnection } from '../../connection'; -import { - type BlobRecord, - BlobStorageBase, - type BlobStorageOptions, -} from '../../storage'; +import { type BlobRecord, BlobStorageBase } from '../../storage'; +import { HttpConnection } from './http'; -interface CloudBlobStorageOptions extends BlobStorageOptions { - apiBaseUrl: string; +interface CloudBlobStorageOptions { + serverBaseUrl: string; + id: string; } -export class CloudBlobStorage extends BlobStorageBase { - private readonly gql = gqlFetcherFactory( - this.options.apiBaseUrl + '/graphql' - ); - override connection = new DummyConnection(); +export class CloudBlobStorage extends BlobStorageBase { + static readonly identifier = 'CloudBlobStorage'; + + constructor(private readonly options: CloudBlobStorageOptions) { + super(); + } + + readonly connection = new HttpConnection(this.options.serverBaseUrl); override async get(key: string) { - const res = await fetch( - this.options.apiBaseUrl + - '/api/workspaces/' + - this.spaceId + - '/blobs/' + - key, + const res = await this.connection.fetch( + '/api/workspaces/' + this.options.id + '/blobs/' + key, { cache: 'default', headers: { @@ -38,49 +33,53 @@ export class CloudBlobStorage extends BlobStorageBase { } ); - if (!res.ok) { + if (res.status === 404) { return null; } - const data = await res.arrayBuffer(); + try { + const blob = await res.blob(); - return { - key, - data: new Uint8Array(data), - mime: res.headers.get('content-type') || '', - size: data.byteLength, - createdAt: new Date(res.headers.get('last-modified') || Date.now()), - }; + return { + key, + data: new Uint8Array(await blob.arrayBuffer()), + mime: blob.type, + size: blob.size, + createdAt: new Date(res.headers.get('last-modified') || Date.now()), + }; + } catch (err) { + throw new Error('blob download error: ' + err); + } } override async set(blob: BlobRecord) { - await this.gql({ + await this.connection.gql({ query: setBlobMutation, variables: { - workspaceId: this.spaceId, + workspaceId: this.options.id, blob: new File([blob.data], blob.key, { type: blob.mime }), }, }); } override async delete(key: string, permanently: boolean) { - await this.gql({ + await this.connection.gql({ query: deleteBlobMutation, - variables: { workspaceId: this.spaceId, key, permanently }, + variables: { workspaceId: this.options.id, key, permanently }, }); } override async release() { - await this.gql({ + await this.connection.gql({ query: releaseDeletedBlobsMutation, - variables: { workspaceId: this.spaceId }, + variables: { workspaceId: this.options.id }, }); } override async list() { - const res = await this.gql({ + const res = await this.connection.gql({ query: listBlobsQuery, - variables: { workspaceId: this.spaceId }, + variables: { workspaceId: this.options.id }, }); return res.workspace.blobs.map(blob => ({ diff --git a/packages/common/nbstore/src/impls/cloud/doc-static.ts b/packages/common/nbstore/src/impls/cloud/doc-static.ts new file mode 100644 index 0000000000000..6d698067a3966 --- /dev/null +++ b/packages/common/nbstore/src/impls/cloud/doc-static.ts @@ -0,0 +1,82 @@ +import { + type DocClock, + type DocClocks, + type DocRecord, + DocStorageBase, + type DocStorageOptions, + type DocUpdate, +} from '../../storage'; +import { HttpConnection } from './http'; + +interface CloudDocStorageOptions extends DocStorageOptions { + serverBaseUrl: string; +} + +export class StaticCloudDocStorage extends DocStorageBase { + static readonly identifier = 'StaticCloudDocStorage'; + + constructor(options: CloudDocStorageOptions) { + super({ ...options, readonlyMode: true }); + } + + override connection = new HttpConnection(this.options.serverBaseUrl); + override async pushDocUpdate( + update: DocUpdate, + _origin?: string + ): Promise { + // http is readonly + return { docId: update.docId, timestamp: new Date() }; + } + override async getDocTimestamp(docId: string): Promise { + // http doesn't support this, so we just return a new timestamp + return { + docId, + timestamp: new Date(), + }; + } + override async getDocTimestamps(): Promise { + // http doesn't support this + return {}; + } + override deleteDoc(_docId: string): Promise { + // http is readonly + return Promise.resolve(); + } + protected override async getDocSnapshot( + docId: string + ): Promise { + const arrayBuffer = await this.connection.fetchArrayBuffer( + `/api/workspaces/${this.spaceId}/docs/${docId}`, + { + priority: 'high', + headers: { + Accept: 'application/octet-stream', // this is necessary for ios native fetch to return arraybuffer + }, + } + ); + if (!arrayBuffer) { + return null; + } + return { + docId: docId, + bin: new Uint8Array(arrayBuffer), + timestamp: new Date(), + }; + } + protected override setDocSnapshot( + _snapshot: DocRecord, + _prevSnapshot: DocRecord | null + ): Promise { + // http is readonly + return Promise.resolve(false); + } + protected override getDocUpdates(_docId: string): Promise { + return Promise.resolve([]); + } + protected override markUpdatesMerged( + _docId: string, + _updates: DocRecord[] + ): Promise { + return Promise.resolve(0); + } +} diff --git a/packages/common/nbstore/src/impls/cloud/doc.ts b/packages/common/nbstore/src/impls/cloud/doc.ts index 757b93351c513..34511b3518b11 100644 --- a/packages/common/nbstore/src/impls/cloud/doc.ts +++ b/packages/common/nbstore/src/impls/cloud/doc.ts @@ -12,6 +12,7 @@ import { type DocStorageOptions, type DocUpdate, } from '../../storage'; +import type { SpaceType } from '../../utils/universal-id'; import { base64ToUint8Array, type ServerEventsMap, @@ -20,15 +21,20 @@ import { } from './socket'; interface CloudDocStorageOptions extends DocStorageOptions { - socketOptions: SocketOptions; + socketOptions?: SocketOptions; serverBaseUrl: string; + type: SpaceType; } export class CloudDocStorage extends DocStorageBase { + static readonly identifier = 'CloudDocStorage'; + get socket() { return this.connection.inner; } + readonly spaceType = this.options.type; + onServerUpdate: ServerEventsMap['space:broadcast-doc-update'] = message => { if ( this.spaceType === message.spaceType && diff --git a/packages/common/nbstore/src/impls/cloud/http.ts b/packages/common/nbstore/src/impls/cloud/http.ts new file mode 100644 index 0000000000000..560e76cb7fcac --- /dev/null +++ b/packages/common/nbstore/src/impls/cloud/http.ts @@ -0,0 +1,69 @@ +import { gqlFetcherFactory } from '@affine/graphql'; + +import { DummyConnection } from '../../connection'; + +export class HttpConnection extends DummyConnection { + readonly fetch = async (input: string, init?: RequestInit) => { + const externalSignal = init?.signal; + if (externalSignal?.aborted) { + throw externalSignal.reason; + } + const abortController = new AbortController(); + externalSignal?.addEventListener('abort', reason => { + abortController.abort(reason); + }); + + const timeout = 15000; + const timeoutId = setTimeout(() => { + abortController.abort('timeout'); + }, timeout); + + const res = await globalThis + .fetch(new URL(input, this.serverBaseUrl), { + ...init, + signal: abortController.signal, + headers: { + ...init?.headers, + 'x-affine-version': BUILD_CONFIG.appVersion, + }, + }) + .catch(err => { + throw new Error('fetch error: ' + err); + }); + clearTimeout(timeoutId); + if (!res.ok && res.status !== 404) { + let reason: string | any = ''; + if (res.headers.get('Content-Type')?.includes('application/json')) { + try { + reason = await res.json(); + } catch { + // ignore + } + } + throw new Error('fetch error status: ' + res.status + ' ' + reason); + } + return res; + }; + + readonly fetchArrayBuffer = async (input: string, init?: RequestInit) => { + const res = await this.fetch(input, init); + if (res.status === 404) { + // 404 + return null; + } + try { + return await res.arrayBuffer(); + } catch (err) { + throw new Error('fetch download error: ' + err); + } + }; + + readonly gql = gqlFetcherFactory( + new URL('/graphql', this.serverBaseUrl).href, + this.fetch + ); + + constructor(private readonly serverBaseUrl: string) { + super(); + } +} diff --git a/packages/common/nbstore/src/impls/cloud/index.ts b/packages/common/nbstore/src/impls/cloud/index.ts index f4829dcdbfe8a..bb55860065a73 100644 --- a/packages/common/nbstore/src/impls/cloud/index.ts +++ b/packages/common/nbstore/src/impls/cloud/index.ts @@ -1,3 +1,17 @@ +import type { StorageConstructor } from '..'; +import { CloudAwarenessStorage } from './awareness'; +import { CloudBlobStorage } from './blob'; +import { CloudDocStorage } from './doc'; +import { StaticCloudDocStorage } from './doc-static'; + export * from './awareness'; export * from './blob'; export * from './doc'; +export * from './doc-static'; + +export const cloudStorages = [ + CloudDocStorage, + StaticCloudDocStorage, + CloudBlobStorage, + CloudAwarenessStorage, +] satisfies StorageConstructor[]; diff --git a/packages/common/nbstore/src/impls/cloud/socket.ts b/packages/common/nbstore/src/impls/cloud/socket.ts index 558afb56f27c8..8783333399749 100644 --- a/packages/common/nbstore/src/impls/cloud/socket.ts +++ b/packages/common/nbstore/src/impls/cloud/socket.ts @@ -162,7 +162,7 @@ export class SocketConnection extends AutoReconnectConnection { constructor( private readonly endpoint: string, - private readonly socketOptions: SocketOptions + private readonly socketOptions?: SocketOptions ) { super(); } diff --git a/packages/common/nbstore/src/impls/idb/blob.ts b/packages/common/nbstore/src/impls/idb/blob.ts index 0ea7fc7821f60..d3ff484ff2d6d 100644 --- a/packages/common/nbstore/src/impls/idb/blob.ts +++ b/packages/common/nbstore/src/impls/idb/blob.ts @@ -4,11 +4,17 @@ import { BlobStorageBase, type ListedBlobRecord, } from '../../storage'; -import { IDBConnection } from './db'; +import { IDBConnection, type IDBConnectionOptions } from './db'; export class IndexedDBBlobStorage extends BlobStorageBase { + static readonly identifier = 'IndexedDBBlobStorage'; + readonly connection = share(new IDBConnection(this.options)); + constructor(private readonly options: IDBConnectionOptions) { + super(); + } + get db() { return this.connection.inner.db; } diff --git a/packages/common/nbstore/src/impls/idb/db.ts b/packages/common/nbstore/src/impls/idb/db.ts index c7ba4282aa2d1..130e619e10283 100644 --- a/packages/common/nbstore/src/impls/idb/db.ts +++ b/packages/common/nbstore/src/impls/idb/db.ts @@ -1,20 +1,26 @@ import { type IDBPDatabase, openDB } from 'idb'; import { AutoReconnectConnection } from '../../connection'; -import type { StorageOptions } from '../../storage'; +import type { SpaceType } from '../../utils/universal-id'; import { type DocStorageSchema, migrator } from './schema'; +export interface IDBConnectionOptions { + flavour: string; + type: SpaceType; + id: string; +} + export class IDBConnection extends AutoReconnectConnection<{ db: IDBPDatabase; channel: BroadcastChannel; }> { - readonly dbName = `${this.opts.peer}:${this.opts.type}:${this.opts.id}`; + readonly dbName = `${this.opts.flavour}:${this.opts.type}:${this.opts.id}`; override get shareId() { return `idb(${migrator.version}):${this.dbName}`; } - constructor(private readonly opts: StorageOptions) { + constructor(private readonly opts: IDBConnectionOptions) { super(); } diff --git a/packages/common/nbstore/src/impls/idb/doc.ts b/packages/common/nbstore/src/impls/idb/doc.ts index 086b4ca29af9f..3a239b01809a4 100644 --- a/packages/common/nbstore/src/impls/idb/doc.ts +++ b/packages/common/nbstore/src/impls/idb/doc.ts @@ -3,10 +3,9 @@ import { type DocClocks, type DocRecord, DocStorageBase, - type DocStorageOptions, type DocUpdate, } from '../../storage'; -import { IDBConnection } from './db'; +import { IDBConnection, type IDBConnectionOptions } from './db'; import { IndexedDBLocker } from './lock'; interface ChannelMessage { @@ -15,7 +14,9 @@ interface ChannelMessage { origin?: string; } -export class IndexedDBDocStorage extends DocStorageBase { +export class IndexedDBDocStorage extends DocStorageBase { + static readonly identifier = 'IndexedDBDocStorage'; + readonly connection = new IDBConnection(this.options); get db() { @@ -30,10 +31,6 @@ export class IndexedDBDocStorage extends DocStorageBase { private _lastTimestamp = new Date(0); - constructor(options: DocStorageOptions) { - super(options); - } - private generateTimestamp() { const timestamp = new Date(); if (timestamp.getTime() <= this._lastTimestamp.getTime()) { diff --git a/packages/common/nbstore/src/impls/idb/index.ts b/packages/common/nbstore/src/impls/idb/index.ts index debe733b43801..8959391c45280 100644 --- a/packages/common/nbstore/src/impls/idb/index.ts +++ b/packages/common/nbstore/src/impls/idb/index.ts @@ -1,3 +1,20 @@ +import type { StorageConstructor } from '..'; +import { IndexedDBBlobStorage } from './blob'; +import { IndexedDBDocStorage } from './doc'; +import { IndexedDBSyncStorage } from './sync'; +import { IndexedDBV1BlobStorage, IndexedDBV1DocStorage } from './v1'; + export * from './blob'; export * from './doc'; export * from './sync'; + +export const idbStorages = [ + IndexedDBDocStorage, + IndexedDBBlobStorage, + IndexedDBSyncStorage, +] satisfies StorageConstructor[]; + +export const idbv1Storages = [ + IndexedDBV1DocStorage, + IndexedDBV1BlobStorage, +] satisfies StorageConstructor[]; diff --git a/packages/common/nbstore/src/impls/idb/sync.ts b/packages/common/nbstore/src/impls/idb/sync.ts index 77c9568bdc093..62a4143ff4ff3 100644 --- a/packages/common/nbstore/src/impls/idb/sync.ts +++ b/packages/common/nbstore/src/impls/idb/sync.ts @@ -1,7 +1,14 @@ import { share } from '../../connection'; -import { BasicSyncStorage, type DocClock, type DocClocks } from '../../storage'; -import { IDBConnection } from './db'; -export class IndexedDBSyncStorage extends BasicSyncStorage { +import { type DocClock, type DocClocks, SyncStorageBase } from '../../storage'; +import { IDBConnection, type IDBConnectionOptions } from './db'; + +export class IndexedDBSyncStorage extends SyncStorageBase { + static readonly identifier = 'IndexedDBSyncStorage'; + + constructor(private readonly options: IDBConnectionOptions) { + super(); + } + readonly connection = share(new IDBConnection(this.options)); get db() { diff --git a/packages/common/nbstore/src/impls/idb/v1/blob.ts b/packages/common/nbstore/src/impls/idb/v1/blob.ts index 508bb851e62b5..261911911bf76 100644 --- a/packages/common/nbstore/src/impls/idb/v1/blob.ts +++ b/packages/common/nbstore/src/impls/idb/v1/blob.ts @@ -1,12 +1,18 @@ import { share } from '../../../connection'; import { BlobStorageBase, type ListedBlobRecord } from '../../../storage'; -import { BlobIDBConnection } from './db'; +import { BlobIDBConnection, type BlobIDBConnectionOptions } from './db'; /** * @deprecated readonly */ export class IndexedDBV1BlobStorage extends BlobStorageBase { - readonly connection = share(new BlobIDBConnection(this.spaceId)); + static readonly identifier = 'IndexedDBV1BlobStorage'; + + constructor(private readonly options: BlobIDBConnectionOptions) { + super(); + } + + readonly connection = share(new BlobIDBConnection(this.options)); get db() { return this.connection.inner; diff --git a/packages/common/nbstore/src/impls/idb/v1/db.ts b/packages/common/nbstore/src/impls/idb/v1/db.ts index b0934fd21d545..946eb76c48e23 100644 --- a/packages/common/nbstore/src/impls/idb/v1/db.ts +++ b/packages/common/nbstore/src/impls/idb/v1/db.ts @@ -42,19 +42,23 @@ export interface BlobDBSchema extends DBSchema { }; } +export interface BlobIDBConnectionOptions { + id: string; +} + export class BlobIDBConnection extends AutoReconnectConnection< IDBPDatabase > { - constructor(private readonly workspaceId: string) { + constructor(private readonly options: BlobIDBConnectionOptions) { super(); } override get shareId() { - return `idb(old-blob):${this.workspaceId}`; + return `idb(old-blob):${this.options.id}`; } override async doConnect() { - return openDB(`${this.workspaceId}_blob`, 1, { + return openDB(`${this.options.id}_blob`, 1, { upgrade: db => { db.createObjectStore('blob'); }, diff --git a/packages/common/nbstore/src/impls/idb/v1/doc.ts b/packages/common/nbstore/src/impls/idb/v1/doc.ts index cd21db55276e4..a19c0fa20b974 100644 --- a/packages/common/nbstore/src/impls/idb/v1/doc.ts +++ b/packages/common/nbstore/src/impls/idb/v1/doc.ts @@ -10,6 +10,8 @@ import { DocIDBConnection } from './db'; * @deprecated readonly */ export class IndexedDBV1DocStorage extends DocStorageBase { + static readonly identifier = 'IndexedDBV1DocStorage'; + readonly connection = share(new DocIDBConnection()); get db() { diff --git a/packages/common/nbstore/src/impls/index.ts b/packages/common/nbstore/src/impls/index.ts index e43dd7df34ea3..4ff03bdae2087 100644 --- a/packages/common/nbstore/src/impls/index.ts +++ b/packages/common/nbstore/src/impls/index.ts @@ -1,47 +1,24 @@ import type { Storage } from '../storage'; -import { BroadcastChannelAwarenessStorage } from './broadcast-channel/awareness'; -import { - CloudAwarenessStorage, - CloudBlobStorage, - CloudDocStorage, -} from './cloud'; -import { - IndexedDBBlobStorage, - IndexedDBDocStorage, - IndexedDBSyncStorage, -} from './idb'; -import { IndexedDBV1BlobStorage, IndexedDBV1DocStorage } from './idb/v1'; +import type { broadcastChannelStorages } from './broadcast-channel'; +import type { cloudStorages } from './cloud'; +import type { idbStorages, idbv1Storages } from './idb'; +import type { sqliteStorages } from './sqlite'; -type StorageConstructor = new (...args: any[]) => Storage; - -const idb: StorageConstructor[] = [ - IndexedDBDocStorage, - IndexedDBBlobStorage, - IndexedDBSyncStorage, - BroadcastChannelAwarenessStorage, -]; - -const idbv1: StorageConstructor[] = [ - IndexedDBV1DocStorage, - IndexedDBV1BlobStorage, -]; - -const cloud: StorageConstructor[] = [ - CloudDocStorage, - CloudBlobStorage, - CloudAwarenessStorage, -]; - -export const storages: StorageConstructor[] = cloud.concat(idbv1, idb); +export type StorageConstructor = { + new (...args: any[]): Storage; + readonly identifier: string; +}; -const AvailableStorageImplementations = storages.reduce( - (acc, curr) => { - acc[curr.name] = curr; - return acc; - }, - {} as Record -); +type Storages = + | typeof cloudStorages + | typeof idbv1Storages + | typeof idbStorages + | typeof sqliteStorages + | typeof broadcastChannelStorages; -export const getAvailableStorageImplementations = (name: string) => { - return AvailableStorageImplementations[name]; +// oxlint-disable-next-line no-redeclare +export type AvailableStorageImplementations = { + [key in Storages[number]['identifier']]: Storages[number] & { + identifier: key; + }; }; diff --git a/packages/common/nbstore/src/impls/sqlite/blob.ts b/packages/common/nbstore/src/impls/sqlite/blob.ts index 40f9f44cc0163..ebfb7ca090dd5 100644 --- a/packages/common/nbstore/src/impls/sqlite/blob.ts +++ b/packages/common/nbstore/src/impls/sqlite/blob.ts @@ -1,11 +1,15 @@ import { share } from '../../connection'; import { type BlobRecord, BlobStorageBase } from '../../storage'; -import { NativeDBConnection } from './db'; +import { NativeDBConnection, type SqliteNativeDBOptions } from './db'; export class SqliteBlobStorage extends BlobStorageBase { - override connection = share( - new NativeDBConnection(this.peer, this.spaceType, this.spaceId) - ); + static readonly identifier = 'SqliteBlobStorage'; + + override connection = share(new NativeDBConnection(this.options)); + + constructor(private readonly options: SqliteNativeDBOptions) { + super(); + } get db() { return this.connection.apis; diff --git a/packages/common/nbstore/src/impls/sqlite/db.ts b/packages/common/nbstore/src/impls/sqlite/db.ts index 861d41ed127f4..e9724949ded9e 100644 --- a/packages/common/nbstore/src/impls/sqlite/db.ts +++ b/packages/common/nbstore/src/impls/sqlite/db.ts @@ -1,9 +1,81 @@ -import { apis } from '@affine/electron-api'; - import { AutoReconnectConnection } from '../../connection'; -import { type SpaceType, universalId } from '../../storage'; +import type { + BlobRecord, + DocClock, + DocRecord, + ListedBlobRecord, +} from '../../storage'; +import { type SpaceType, universalId } from '../../utils/universal-id'; + +export interface SqliteNativeDBOptions { + readonly flavour: string; + readonly type: SpaceType; + readonly id: string; +} -type NativeDBApis = NonNullable['nbstore'] extends infer APIs +export type NativeDBApis = { + connect(id: string): Promise; + disconnect(id: string): Promise; + pushUpdate(id: string, docId: string, update: Uint8Array): Promise; + getDocSnapshot(id: string, docId: string): Promise; + setDocSnapshot(id: string, snapshot: DocRecord): Promise; + getDocUpdates(id: string, docId: string): Promise; + markUpdatesMerged( + id: string, + docId: string, + updates: Date[] + ): Promise; + deleteDoc(id: string, docId: string): Promise; + getDocClocks( + id: string, + after?: Date | undefined | null + ): Promise; + getDocClock(id: string, docId: string): Promise; + getBlob(id: string, key: string): Promise; + setBlob(id: string, blob: BlobRecord): Promise; + deleteBlob(id: string, key: string, permanently: boolean): Promise; + releaseBlobs(id: string): Promise; + listBlobs(id: string): Promise; + getPeerRemoteClocks(id: string, peer: string): Promise; + getPeerRemoteClock( + id: string, + peer: string, + docId: string + ): Promise; + setPeerRemoteClock( + id: string, + peer: string, + docId: string, + clock: Date + ): Promise; + getPeerPulledRemoteClocks(id: string, peer: string): Promise; + getPeerPulledRemoteClock( + id: string, + peer: string, + docId: string + ): Promise; + setPeerPulledRemoteClock( + id: string, + peer: string, + docId: string, + clock: Date + ): Promise; + getPeerPushedClocks(id: string, peer: string): Promise; + getPeerPushedClock( + id: string, + peer: string, + docId: string + ): Promise; + setPeerPushedClock( + id: string, + peer: string, + docId: string, + clock: Date + ): Promise; + clearClocks(id: string): Promise; +}; + +type NativeDBApisWrapper = NativeDBApis extends infer APIs ? { [K in keyof APIs]: APIs[K] extends (...args: any[]) => any ? Parameters extends [string, ...infer Rest] @@ -13,49 +85,56 @@ type NativeDBApis = NonNullable['nbstore'] extends infer APIs } : never; +let apis: NativeDBApis | null = null; + +export function bindNativeDBApis(a: NativeDBApis) { + apis = a; +} + export class NativeDBConnection extends AutoReconnectConnection { - readonly apis: NativeDBApis; + readonly apis: NativeDBApisWrapper; - constructor( - private readonly peer: string, - private readonly type: SpaceType, - private readonly id: string - ) { + readonly flavour = this.options.flavour; + readonly type = this.options.type; + readonly id = this.options.id; + + constructor(private readonly options: SqliteNativeDBOptions) { super(); + if (!apis) { - throw new Error('Not in electron context.'); + throw new Error('Not in native context.'); } - this.apis = this.bindApis(apis.nbstore); + this.apis = this.warpApis(apis); } override get shareId(): string { - return `sqlite:${this.peer}:${this.type}:${this.id}`; + return `sqlite:${this.flavour}:${this.type}:${this.id}`; } - bindApis(originalApis: NonNullable['nbstore']): NativeDBApis { + warpApis(originalApis: NativeDBApis): NativeDBApisWrapper { const id = universalId({ - peer: this.peer, + peer: this.flavour, type: this.type, id: this.id, }); - return new Proxy(originalApis, { - get: (target, key: keyof NativeDBApis) => { - const v = target[key]; - if (typeof v !== 'function') { - return v; - } + return new Proxy( + {}, + { + get: (_target, key: keyof NativeDBApisWrapper) => { + const v = originalApis[key]; - return async (...args: any[]) => { - return v.call( - originalApis, - id, - // @ts-expect-error I don't know why it complains ts(2556) - ...args - ); - }; - }, - }) as unknown as NativeDBApis; + return async (...args: any[]) => { + return v.call( + originalApis, + id, + // @ts-expect-error I don't know why it complains ts(2556) + ...args + ); + }; + }, + } + ) as unknown as NativeDBApisWrapper; } override async doConnect() { @@ -63,7 +142,7 @@ export class NativeDBConnection extends AutoReconnectConnection { } override doDisconnect() { - this.apis.close().catch(err => { + this.apis.disconnect().catch(err => { console.error('NativeDBConnection close failed', err); }); } diff --git a/packages/common/nbstore/src/impls/sqlite/doc.ts b/packages/common/nbstore/src/impls/sqlite/doc.ts index 1c2bd4f0df657..7b94d15d26d15 100644 --- a/packages/common/nbstore/src/impls/sqlite/doc.ts +++ b/packages/common/nbstore/src/impls/sqlite/doc.ts @@ -1,54 +1,82 @@ import { share } from '../../connection'; -import { type DocClock, DocStorageBase, type DocUpdate } from '../../storage'; -import { NativeDBConnection } from './db'; +import { + type DocClocks, + type DocRecord, + DocStorageBase, + type DocUpdate, +} from '../../storage'; +import { NativeDBConnection, type SqliteNativeDBOptions } from './db'; -export class SqliteDocStorage extends DocStorageBase { - override connection = share( - new NativeDBConnection(this.peer, this.spaceType, this.spaceId) - ); +export class SqliteDocStorage extends DocStorageBase { + static readonly identifier = 'SqliteDocStorage'; + override connection = share(new NativeDBConnection(this.options)); get db() { return this.connection.apis; } - override async getDoc(docId: string) { - return this.db.getDoc(docId); - } - override async pushDocUpdate(update: DocUpdate) { - return this.db.pushDocUpdate(update); + const timestamp = await this.db.pushUpdate(update.docId, update.bin); + + this.emit( + 'update', + { + docId: update.docId, + bin: update.bin, + timestamp, + editor: update.editor, + }, + origin + ); + + return { docId: update.docId, timestamp }; } override async deleteDoc(docId: string) { - return this.db.deleteDoc(docId); + await this.db.deleteDoc(docId); } override async getDocTimestamps(after?: Date) { - return this.db.getDocTimestamps(after ? new Date(after) : undefined); + const clocks = await this.db.getDocClocks(after); + + return clocks.reduce((ret, cur) => { + ret[cur.docId] = cur.timestamp; + return ret; + }, {} as DocClocks); } - override getDocTimestamp(docId: string): Promise { - return this.db.getDocTimestamp(docId); + override async getDocTimestamp(docId: string) { + return this.db.getDocClock(docId); } - protected override async getDocSnapshot() { - // handled in db - // see electron/src/helper/nbstore/doc.ts - return null; + protected override async getDocSnapshot(docId: string) { + const snapshot = await this.db.getDocSnapshot(docId); + + if (!snapshot) { + return null; + } + + return snapshot; } - protected override async setDocSnapshot(): Promise { - // handled in db - return true; + protected override async setDocSnapshot( + snapshot: DocRecord + ): Promise { + return this.db.setDocSnapshot({ + docId: snapshot.docId, + bin: snapshot.bin, + timestamp: snapshot.timestamp, + }); } - protected override async getDocUpdates() { - // handled in db - return []; + protected override async getDocUpdates(docId: string) { + return this.db.getDocUpdates(docId); } - protected override markUpdatesMerged() { - // handled in db - return Promise.resolve(0); + protected override markUpdatesMerged(docId: string, updates: DocRecord[]) { + return this.db.markUpdatesMerged( + docId, + updates.map(update => update.timestamp) + ); } } diff --git a/packages/common/nbstore/src/impls/sqlite/index.ts b/packages/common/nbstore/src/impls/sqlite/index.ts index debe733b43801..e9c1ece1f7cdb 100644 --- a/packages/common/nbstore/src/impls/sqlite/index.ts +++ b/packages/common/nbstore/src/impls/sqlite/index.ts @@ -1,3 +1,16 @@ +import type { StorageConstructor } from '..'; +import { SqliteBlobStorage } from './blob'; +import { SqliteDocStorage } from './doc'; +import { SqliteSyncStorage } from './sync'; + export * from './blob'; +export { bindNativeDBApis, type NativeDBApis } from './db'; export * from './doc'; export * from './sync'; +export * from './v1'; + +export const sqliteStorages = [ + SqliteDocStorage, + SqliteBlobStorage, + SqliteSyncStorage, +] satisfies StorageConstructor[]; diff --git a/packages/common/nbstore/src/impls/sqlite/sync.ts b/packages/common/nbstore/src/impls/sqlite/sync.ts index 6344fdb52698f..99e57420449ee 100644 --- a/packages/common/nbstore/src/impls/sqlite/sync.ts +++ b/packages/common/nbstore/src/impls/sqlite/sync.ts @@ -1,18 +1,26 @@ import { share } from '../../connection'; -import { BasicSyncStorage, type DocClock } from '../../storage'; -import { NativeDBConnection } from './db'; +import { type DocClock, SyncStorageBase } from '../../storage'; +import { NativeDBConnection, type SqliteNativeDBOptions } from './db'; -export class SqliteSyncStorage extends BasicSyncStorage { - override connection = share( - new NativeDBConnection(this.peer, this.spaceType, this.spaceId) - ); +export class SqliteSyncStorage extends SyncStorageBase { + static readonly identifier = 'SqliteSyncStorage'; + + override connection = share(new NativeDBConnection(this.options)); + + constructor(private readonly options: SqliteNativeDBOptions) { + super(); + } get db() { return this.connection.apis; } override async getPeerRemoteClocks(peer: string) { - return this.db.getPeerRemoteClocks(peer); + return this.db + .getPeerRemoteClocks(peer) + .then(clocks => + Object.fromEntries(clocks.map(clock => [clock.docId, clock.timestamp])) + ); } override async getPeerRemoteClock(peer: string, docId: string) { @@ -20,11 +28,15 @@ export class SqliteSyncStorage extends BasicSyncStorage { } override async setPeerRemoteClock(peer: string, clock: DocClock) { - await this.db.setPeerRemoteClock(peer, clock); + await this.db.setPeerRemoteClock(peer, clock.docId, clock.timestamp); } override async getPeerPulledRemoteClocks(peer: string) { - return this.db.getPeerPulledRemoteClocks(peer); + return this.db + .getPeerPulledRemoteClocks(peer) + .then(clocks => + Object.fromEntries(clocks.map(clock => [clock.docId, clock.timestamp])) + ); } override async getPeerPulledRemoteClock(peer: string, docId: string) { @@ -32,11 +44,15 @@ export class SqliteSyncStorage extends BasicSyncStorage { } override async setPeerPulledRemoteClock(peer: string, clock: DocClock) { - await this.db.setPeerPulledRemoteClock(peer, clock); + await this.db.setPeerPulledRemoteClock(peer, clock.docId, clock.timestamp); } override async getPeerPushedClocks(peer: string) { - return this.db.getPeerPushedClocks(peer); + return this.db + .getPeerPushedClocks(peer) + .then(clocks => + Object.fromEntries(clocks.map(clock => [clock.docId, clock.timestamp])) + ); } override async getPeerPushedClock(peer: string, docId: string) { @@ -44,7 +60,7 @@ export class SqliteSyncStorage extends BasicSyncStorage { } override async setPeerPushedClock(peer: string, clock: DocClock) { - await this.db.setPeerPushedClock(peer, clock); + await this.db.setPeerPushedClock(peer, clock.docId, clock.timestamp); } override async clearClocks() { diff --git a/packages/common/nbstore/src/impls/sqlite/v1/blob.ts b/packages/common/nbstore/src/impls/sqlite/v1/blob.ts index 7f3de15bb5ec8..229a4c28a5596 100644 --- a/packages/common/nbstore/src/impls/sqlite/v1/blob.ts +++ b/packages/common/nbstore/src/impls/sqlite/v1/blob.ts @@ -1,7 +1,7 @@ -import { apis } from '@affine/electron-api'; - import { DummyConnection } from '../../../connection'; import { BlobStorageBase } from '../../../storage'; +import type { SpaceType } from '../../../utils/universal-id'; +import { apis } from './db'; /** * @deprecated readonly @@ -9,18 +9,22 @@ import { BlobStorageBase } from '../../../storage'; export class SqliteV1BlobStorage extends BlobStorageBase { override connection = new DummyConnection(); - get db() { + constructor(private readonly options: { type: SpaceType; id: string }) { + super(); + } + + private get db() { if (!apis) { throw new Error('Not in electron context.'); } - return apis.db; + return apis; } override async get(key: string) { const data: Uint8Array | null = await this.db.getBlob( - this.spaceType, - this.spaceId, + this.options.type, + this.options.id, key ); @@ -38,12 +42,12 @@ export class SqliteV1BlobStorage extends BlobStorageBase { override async delete(key: string, permanently: boolean) { if (permanently) { - await this.db.deleteBlob(this.spaceType, this.spaceId, key); + await this.db.deleteBlob(this.options.type, this.options.id, key); } } override async list() { - const keys = await this.db.getBlobKeys(this.spaceType, this.spaceId); + const keys = await this.db.getBlobKeys(this.options.type, this.options.id); return keys.map(key => ({ key, diff --git a/packages/common/nbstore/src/impls/sqlite/v1/db.ts b/packages/common/nbstore/src/impls/sqlite/v1/db.ts new file mode 100644 index 0000000000000..169026efa9c73 --- /dev/null +++ b/packages/common/nbstore/src/impls/sqlite/v1/db.ts @@ -0,0 +1,26 @@ +import type { SpaceType } from '../../../utils/universal-id'; + +interface NativeDBV1Apis { + getBlob: ( + spaceType: SpaceType, + workspaceId: string, + key: string + ) => Promise; + deleteBlob: ( + spaceType: SpaceType, + workspaceId: string, + key: string + ) => Promise; + getBlobKeys: (spaceType: SpaceType, workspaceId: string) => Promise; + getDocAsUpdates: ( + spaceType: SpaceType, + workspaceId: string, + subdocId: string + ) => Promise; +} + +export let apis: NativeDBV1Apis | null = null; + +export function bindNativeDBV1Apis(a: NativeDBV1Apis) { + apis = a; +} diff --git a/packages/common/nbstore/src/impls/sqlite/v1/doc.ts b/packages/common/nbstore/src/impls/sqlite/v1/doc.ts index bcf108cbb998f..203fcfa6aefa1 100644 --- a/packages/common/nbstore/src/impls/sqlite/v1/doc.ts +++ b/packages/common/nbstore/src/impls/sqlite/v1/doc.ts @@ -1,24 +1,27 @@ -import { apis } from '@affine/electron-api'; - import { DummyConnection } from '../../../connection'; import { type DocRecord, DocStorageBase, type DocUpdate, } from '../../../storage'; +import type { SpaceType } from '../../../utils/universal-id'; +import { apis } from './db'; /** * @deprecated readonly */ -export class SqliteV1DocStorage extends DocStorageBase { +export class SqliteV1DocStorage extends DocStorageBase<{ + type: SpaceType; + id: string; +}> { override connection = new DummyConnection(); - get db() { + private get db() { if (!apis) { throw new Error('Not in electron context.'); } - return apis.db; + return apis; } override async pushDocUpdate(update: DocUpdate) { @@ -29,8 +32,8 @@ export class SqliteV1DocStorage extends DocStorageBase { override async getDoc(docId: string) { const bin = await this.db.getDocAsUpdates( - this.spaceType, - this.spaceId, + this.options.type, + this.options.id, docId ); @@ -41,8 +44,8 @@ export class SqliteV1DocStorage extends DocStorageBase { }; } - override async deleteDoc(docId: string) { - await this.db.deleteDoc(this.spaceType, this.spaceId, docId); + override async deleteDoc() { + return; } protected override async getDocSnapshot() { diff --git a/packages/common/nbstore/src/impls/sqlite/v1/index.ts b/packages/common/nbstore/src/impls/sqlite/v1/index.ts index d476ae6eb9b92..808cd280ea32f 100644 --- a/packages/common/nbstore/src/impls/sqlite/v1/index.ts +++ b/packages/common/nbstore/src/impls/sqlite/v1/index.ts @@ -1,2 +1,3 @@ export * from './blob'; +export { bindNativeDBV1Apis } from './db'; export * from './doc'; diff --git a/packages/common/nbstore/src/index.ts b/packages/common/nbstore/src/index.ts index db2f6eb6cf459..ac5f880564518 100644 --- a/packages/common/nbstore/src/index.ts +++ b/packages/common/nbstore/src/index.ts @@ -1,2 +1,5 @@ export * from './connection'; +export * from './frontend'; export * from './storage'; +export * from './sync'; +export * from './utils/universal-id'; diff --git a/packages/common/nbstore/src/storage/awareness.ts b/packages/common/nbstore/src/storage/awareness.ts index 489de1a0aa1ca..b185eaf836ee0 100644 --- a/packages/common/nbstore/src/storage/awareness.ts +++ b/packages/common/nbstore/src/storage/awareness.ts @@ -1,6 +1,5 @@ -import { type Storage, StorageBase, type StorageOptions } from './storage'; - -export interface AwarenessStorageOptions extends StorageOptions {} +import type { Connection } from '../connection'; +import { type Storage } from './storage'; export type AwarenessRecord = { docId: string; @@ -23,13 +22,9 @@ export interface AwarenessStorage extends Storage { ): () => void; } -export abstract class AwarenessStorageBase< - Options extends AwarenessStorageOptions = AwarenessStorageOptions, - > - extends StorageBase - implements AwarenessStorage -{ - override readonly storageType = 'awareness'; +export abstract class AwarenessStorageBase implements AwarenessStorage { + readonly storageType = 'awareness'; + abstract readonly connection: Connection; abstract update(record: AwarenessRecord, origin?: string): Promise; diff --git a/packages/common/nbstore/src/storage/blob.ts b/packages/common/nbstore/src/storage/blob.ts index 4ad70517eee98..553ee0590573e 100644 --- a/packages/common/nbstore/src/storage/blob.ts +++ b/packages/common/nbstore/src/storage/blob.ts @@ -1,6 +1,5 @@ -import { type Storage, StorageBase, type StorageOptions } from './storage'; - -export interface BlobStorageOptions extends StorageOptions {} +import type { Connection } from '../connection'; +import { type Storage } from './storage'; export interface BlobRecord { key: string; @@ -29,13 +28,9 @@ export interface BlobStorage extends Storage { list(signal?: AbortSignal): Promise; } -export abstract class BlobStorageBase< - Options extends BlobStorageOptions = BlobStorageOptions, - > - extends StorageBase - implements BlobStorage -{ - override readonly storageType = 'blob'; +export abstract class BlobStorageBase implements BlobStorage { + readonly storageType = 'blob'; + abstract readonly connection: Connection; abstract get(key: string, signal?: AbortSignal): Promise; abstract set(blob: BlobRecord, signal?: AbortSignal): Promise; diff --git a/packages/common/nbstore/src/storage/doc.ts b/packages/common/nbstore/src/storage/doc.ts index 9a2c5a0818607..f2541b31ec755 100644 --- a/packages/common/nbstore/src/storage/doc.ts +++ b/packages/common/nbstore/src/storage/doc.ts @@ -1,10 +1,11 @@ import EventEmitter2 from 'eventemitter2'; import { diffUpdate, encodeStateVectorFromUpdate, mergeUpdates } from 'yjs'; +import type { Connection } from '../connection'; import { isEmptyUpdate } from '../utils/is-empty-update'; import type { Locker } from './lock'; import { SingletonLocker } from './lock'; -import { type Storage, StorageBase, type StorageOptions } from './storage'; +import { type Storage } from './storage'; export interface DocClock { docId: string; @@ -33,13 +34,19 @@ export interface Editor { avatarUrl: string | null; } -export interface DocStorageOptions extends StorageOptions { +export interface DocStorageOptions { mergeUpdates?: (updates: Uint8Array[]) => Promise | Uint8Array; + id: string; + + /** + * open as readonly mode. + */ + readonlyMode?: boolean; } export interface DocStorage extends Storage { readonly storageType: 'doc'; - + readonly isReadonly: boolean; /** * Get a doc record with latest binary. */ @@ -88,18 +95,22 @@ export interface DocStorage extends Storage { ): () => void; } -export abstract class DocStorageBase< - Opts extends DocStorageOptions = DocStorageOptions, - > - extends StorageBase - implements DocStorage -{ +export abstract class DocStorageBase implements DocStorage { + get isReadonly(): boolean { + return this.options.readonlyMode ?? false; + } private readonly event = new EventEmitter2(); - override readonly storageType = 'doc'; + readonly storageType = 'doc'; + abstract readonly connection: Connection; protected readonly locker: Locker = new SingletonLocker(); + protected readonly spaceId = this.options.id; + + constructor(protected readonly options: Opts & DocStorageOptions) {} async getDoc(docId: string) { - await using _lock = await this.lockDocForUpdate(docId); + await using _lock = this.isReadonly + ? undefined + : await this.lockDocForUpdate(docId); const snapshot = await this.getDocSnapshot(docId); const updates = await this.getDocUpdates(docId); @@ -117,10 +128,13 @@ export abstract class DocStorageBase< editor, }; - await this.setDocSnapshot(newSnapshot, snapshot); + // if is readonly, we will not set the new snapshot + if (!this.isReadonly) { + await this.setDocSnapshot(newSnapshot, snapshot); - // always mark updates as merged unless throws - await this.markUpdatesMerged(docId, updates); + // always mark updates as merged unless throws + await this.markUpdatesMerged(docId, updates); + } return newSnapshot; } diff --git a/packages/common/nbstore/src/storage/dummy/awareness.ts b/packages/common/nbstore/src/storage/dummy/awareness.ts new file mode 100644 index 0000000000000..cd0de30e7ab52 --- /dev/null +++ b/packages/common/nbstore/src/storage/dummy/awareness.ts @@ -0,0 +1,16 @@ +import { DummyConnection } from '../../connection'; +import { type AwarenessRecord, AwarenessStorageBase } from '../awareness'; + +export class DummyAwarenessStorage extends AwarenessStorageBase { + override update(_record: AwarenessRecord, _origin?: string): Promise { + return Promise.resolve(); + } + override subscribeUpdate( + _id: string, + _onUpdate: (update: AwarenessRecord, origin?: string) => void, + _onCollect: () => Promise + ): () => void { + return () => {}; + } + override connection = new DummyConnection(); +} diff --git a/packages/common/nbstore/src/storage/dummy/blob.ts b/packages/common/nbstore/src/storage/dummy/blob.ts new file mode 100644 index 0000000000000..b4f9354d31a09 --- /dev/null +++ b/packages/common/nbstore/src/storage/dummy/blob.ts @@ -0,0 +1,32 @@ +import { DummyConnection } from '../../connection'; +import { + type BlobRecord, + BlobStorageBase, + type ListedBlobRecord, +} from '../blob'; + +export class DummyBlobStorage extends BlobStorageBase { + override get( + _key: string, + _signal?: AbortSignal + ): Promise { + return Promise.resolve(null); + } + override set(_blob: BlobRecord, _signal?: AbortSignal): Promise { + return Promise.resolve(); + } + override delete( + _key: string, + _permanently: boolean, + _signal?: AbortSignal + ): Promise { + return Promise.resolve(); + } + override release(_signal?: AbortSignal): Promise { + return Promise.resolve(); + } + override list(_signal?: AbortSignal): Promise { + return Promise.resolve([]); + } + override connection = new DummyConnection(); +} diff --git a/packages/common/nbstore/src/storage/dummy/doc.ts b/packages/common/nbstore/src/storage/dummy/doc.ts new file mode 100644 index 0000000000000..18b059099d94a --- /dev/null +++ b/packages/common/nbstore/src/storage/dummy/doc.ts @@ -0,0 +1,41 @@ +import { DummyConnection } from '../../connection'; +import { + type DocClock, + type DocClocks, + type DocDiff, + type DocRecord, + type DocStorage, + type DocUpdate, +} from '../doc'; + +export class DummyDocStorage implements DocStorage { + readonly storageType = 'doc'; + readonly isReadonly = true; + getDoc(_docId: string): Promise { + return Promise.resolve(null); + } + getDocDiff(_docId: string, _state?: Uint8Array): Promise { + return Promise.resolve(null); + } + pushDocUpdate(update: DocUpdate, _origin?: string): Promise { + return Promise.resolve({ + docId: update.docId, + timestamp: new Date(), + }); + } + getDocTimestamp(_docId: string): Promise { + return Promise.resolve(null); + } + getDocTimestamps(_after?: Date): Promise { + return Promise.resolve({}); + } + deleteDoc(_docId: string): Promise { + return Promise.resolve(); + } + subscribeDocUpdate( + _callback: (update: DocRecord, origin?: string) => void + ): () => void { + return () => {}; + } + connection = new DummyConnection(); +} diff --git a/packages/common/nbstore/src/storage/dummy/sync.ts b/packages/common/nbstore/src/storage/dummy/sync.ts new file mode 100644 index 0000000000000..52d3d90460de3 --- /dev/null +++ b/packages/common/nbstore/src/storage/dummy/sync.ts @@ -0,0 +1,49 @@ +import { DummyConnection } from '../../connection'; +import type { DocClock, DocClocks } from '../doc'; +import { SyncStorageBase } from '../sync'; + +export class DummySyncStorage extends SyncStorageBase { + override getPeerRemoteClock( + _peer: string, + _docId: string + ): Promise { + return Promise.resolve(null); + } + override getPeerRemoteClocks(_peer: string): Promise { + return Promise.resolve({}); + } + override setPeerRemoteClock(_peer: string, _clock: DocClock): Promise { + return Promise.resolve(); + } + override getPeerPulledRemoteClock( + _peer: string, + _docId: string + ): Promise { + return Promise.resolve(null); + } + override getPeerPulledRemoteClocks(_peer: string): Promise { + return Promise.resolve({}); + } + override setPeerPulledRemoteClock( + _peer: string, + _clock: DocClock + ): Promise { + return Promise.resolve(); + } + override getPeerPushedClock( + _peer: string, + _docId: string + ): Promise { + return Promise.resolve(null); + } + override getPeerPushedClocks(_peer: string): Promise { + return Promise.resolve({}); + } + override setPeerPushedClock(_peer: string, _clock: DocClock): Promise { + return Promise.resolve(); + } + override clearClocks(): Promise { + return Promise.resolve(); + } + override connection = new DummyConnection(); +} diff --git a/packages/common/nbstore/src/storage/errors/index.ts b/packages/common/nbstore/src/storage/errors/index.ts new file mode 100644 index 0000000000000..38d63766cf677 --- /dev/null +++ b/packages/common/nbstore/src/storage/errors/index.ts @@ -0,0 +1 @@ +export * from './over-capacity'; diff --git a/packages/common/nbstore/src/storage/errors/over-capacity.ts b/packages/common/nbstore/src/storage/errors/over-capacity.ts new file mode 100644 index 0000000000000..574ce93332837 --- /dev/null +++ b/packages/common/nbstore/src/storage/errors/over-capacity.ts @@ -0,0 +1,5 @@ +export class OverCapacityError extends Error { + constructor(public originError?: any) { + super('Storage over capacity. Origin error: ' + originError); + } +} diff --git a/packages/common/nbstore/src/storage/index.ts b/packages/common/nbstore/src/storage/index.ts index eaea342bb93a5..106f760315166 100644 --- a/packages/common/nbstore/src/storage/index.ts +++ b/packages/common/nbstore/src/storage/index.ts @@ -3,56 +3,60 @@ import EventEmitter2 from 'eventemitter2'; import type { AwarenessStorage } from './awareness'; import type { BlobStorage } from './blob'; import type { DocStorage } from './doc'; -import type { Storage, StorageType } from './storage'; +import { DummyAwarenessStorage } from './dummy/awareness'; +import { DummyBlobStorage } from './dummy/blob'; +import { DummyDocStorage } from './dummy/doc'; +import { DummySyncStorage } from './dummy/sync'; +import type { StorageType } from './storage'; import type { SyncStorage } from './sync'; type Storages = DocStorage | BlobStorage | SyncStorage | AwarenessStorage; +export type SpaceStorageOptions = { + [K in StorageType]?: Storages & { storageType: K }; +}; + export class SpaceStorage { - protected readonly storages: Map = new Map(); + protected readonly storages: { + [K in StorageType]: Storages & { storageType: K }; + }; private readonly event = new EventEmitter2(); private readonly disposables: Set<() => void> = new Set(); - constructor(storages: Storage[] = []) { - this.storages = new Map( - storages.map(storage => [storage.storageType, storage]) - ); - } - - tryGet( - type: T - ): Extract | undefined { - return this.storages.get(type) as unknown as Extract< - Storages, - { storageType: T } - >; + constructor(storages: SpaceStorageOptions) { + this.storages = { + awareness: storages.awareness ?? new DummyAwarenessStorage(), + blob: storages.blob ?? new DummyBlobStorage(), + doc: storages.doc ?? new DummyDocStorage(), + sync: storages.sync ?? new DummySyncStorage(), + }; } get(type: T): Extract { - const storage = this.tryGet(type); + const storage = this.storages[type]; if (!storage) { throw new Error(`Storage ${type} not registered.`); } - return storage as Extract; + return storage as unknown as Extract; } connect() { - Array.from(this.storages.values()).forEach(storage => { + Object.values(this.storages).forEach(storage => { storage.connection.connect(); }); } disconnect() { - Array.from(this.storages.values()).forEach(storage => { + Object.values(this.storages).forEach(storage => { storage.connection.disconnect(); }); } async waitForConnected(signal?: AbortSignal) { await Promise.all( - Array.from(this.storages.values()).map(storage => + Object.values(this.storages).map(storage => storage.connection.waitForConnected(signal) ) ); @@ -61,13 +65,13 @@ export class SpaceStorage { async destroy() { this.disposables.forEach(disposable => disposable()); this.event.removeAllListeners(); - this.storages.clear(); } } export * from './awareness'; export * from './blob'; export * from './doc'; +export * from './errors'; export * from './history'; export * from './storage'; export * from './sync'; diff --git a/packages/common/nbstore/src/storage/storage.ts b/packages/common/nbstore/src/storage/storage.ts index 9f54fbd222297..7d4510303747b 100644 --- a/packages/common/nbstore/src/storage/storage.ts +++ b/packages/common/nbstore/src/storage/storage.ts @@ -1,120 +1,8 @@ import type { Connection } from '../connection'; -export type SpaceType = 'workspace' | 'userspace'; export type StorageType = 'blob' | 'doc' | 'sync' | 'awareness'; -export interface StorageOptions { - peer: string; - type: SpaceType; - id: string; -} - -export function universalId({ peer, type, id }: StorageOptions) { - return `@peer(${peer});@type(${type});@id(${id});`; -} - -export function isValidSpaceType(type: string): type is SpaceType { - return type === 'workspace' || type === 'userspace'; -} - -export function isValidUniversalId(opts: Record): boolean { - const requiredKeys: Array = [ - 'peer', - 'type', - 'id', - ] as const; - - for (const key of requiredKeys) { - if (!opts[key]) { - return false; - } - } - - return isValidSpaceType(opts.type); -} - -export function parseUniversalId(id: string) { - const result: Partial = {}; - let key = ''; - let value = ''; - let isInValue = false; - - let i = -1; - - while (++i < id.length) { - const ch = id[i]; - const nextCh = id[i + 1]; - - // when we are in value string, we only care about ch and next char to be [')', ';'] to end the id part - if (isInValue) { - if (ch === ')' && nextCh === ';') { - // @ts-expect-error we know the key is valid - result[key] = value; - key = ''; - value = ''; - isInValue = false; - i++; - continue; - } - - value += ch; - continue; - } - - if (ch === '@') { - const keyEnd = id.indexOf('(', i); - // we find '@' but no '(' in lookahead or '(' is immediately after '@', invalid id - if (keyEnd === -1 || keyEnd === i + 1) { - break; - } - - key = id.slice(i + 1, keyEnd); - i = keyEnd; - isInValue = true; - } else { - break; - } - } - - if (!isValidUniversalId(result)) { - throw new Error( - `Invalid universal storage id: ${id}. It should be in format of @peer(\${peer});@type(\${type});@id(\${id});` - ); - } - - return result as StorageOptions; -} - export interface Storage { readonly storageType: StorageType; readonly connection: Connection; - readonly peer: string; - readonly spaceType: string; - readonly spaceId: string; - readonly universalId: string; -} - -export abstract class StorageBase - implements Storage -{ - abstract readonly storageType: StorageType; - abstract readonly connection: Connection; - - get peer() { - return this.options.peer; - } - - get spaceType() { - return this.options.type; - } - - get spaceId() { - return this.options.id; - } - - get universalId() { - return universalId(this.options); - } - - constructor(public readonly options: Opts) {} } diff --git a/packages/common/nbstore/src/storage/sync.ts b/packages/common/nbstore/src/storage/sync.ts index 9edda9b6305cc..ceec4d37a17b7 100644 --- a/packages/common/nbstore/src/storage/sync.ts +++ b/packages/common/nbstore/src/storage/sync.ts @@ -1,7 +1,6 @@ +import type { Connection } from '../connection'; import type { DocClock, DocClocks } from './doc'; -import { type Storage, StorageBase, type StorageOptions } from './storage'; - -export interface SyncStorageOptions extends StorageOptions {} +import { type Storage } from './storage'; export interface SyncStorage extends Storage { readonly storageType: 'sync'; @@ -21,13 +20,9 @@ export interface SyncStorage extends Storage { clearClocks(): Promise; } -export abstract class BasicSyncStorage< - Opts extends SyncStorageOptions = SyncStorageOptions, - > - extends StorageBase - implements SyncStorage -{ - override readonly storageType = 'sync'; +export abstract class SyncStorageBase implements SyncStorage { + readonly storageType = 'sync'; + abstract readonly connection: Connection; abstract getPeerRemoteClock( peer: string, diff --git a/packages/common/nbstore/src/sync/awareness/index.ts b/packages/common/nbstore/src/sync/awareness/index.ts index 51971448f3279..7a905647c3b1e 100644 --- a/packages/common/nbstore/src/sync/awareness/index.ts +++ b/packages/common/nbstore/src/sync/awareness/index.ts @@ -2,6 +2,7 @@ import type { AwarenessRecord, AwarenessStorage, } from '../../storage/awareness'; +import type { PeerStorageOptions } from '../types'; export interface AwarenessSync { update(record: AwarenessRecord, origin?: string): Promise; @@ -13,14 +14,13 @@ export interface AwarenessSync { } export class AwarenessSyncImpl implements AwarenessSync { - constructor( - readonly local: AwarenessStorage, - readonly remotes: AwarenessStorage[] - ) {} + constructor(readonly storages: PeerStorageOptions) {} async update(record: AwarenessRecord, origin?: string) { await Promise.all( - [this.local, ...this.remotes].map(peer => peer.update(record, origin)) + [this.storages.local, ...Object.values(this.storages.remotes)].map(peer => + peer.update(record, origin) + ) ); } @@ -29,9 +29,10 @@ export class AwarenessSyncImpl implements AwarenessSync { onUpdate: (update: AwarenessRecord, origin?: string) => void, onCollect: () => Promise ): () => void { - const unsubscribes = [this.local, ...this.remotes].map(peer => - peer.subscribeUpdate(id, onUpdate, onCollect) - ); + const unsubscribes = [ + this.storages.local, + ...Object.values(this.storages.remotes), + ].map(peer => peer.subscribeUpdate(id, onUpdate, onCollect)); return () => { unsubscribes.forEach(unsubscribe => unsubscribe()); }; diff --git a/packages/common/nbstore/src/sync/blob/index.ts b/packages/common/nbstore/src/sync/blob/index.ts index 8cafa4816ba98..a561f94e50168 100644 --- a/packages/common/nbstore/src/sync/blob/index.ts +++ b/packages/common/nbstore/src/sync/blob/index.ts @@ -1,34 +1,48 @@ +import EventEmitter2 from 'eventemitter2'; import { difference } from 'lodash-es'; +import { BehaviorSubject, type Observable } from 'rxjs'; import type { BlobRecord, BlobStorage } from '../../storage'; +import { OverCapacityError } from '../../storage'; import { MANUALLY_STOP, throwIfAborted } from '../../utils/throw-if-aborted'; +import type { PeerStorageOptions } from '../types'; + +export interface BlobSyncState { + isStorageOverCapacity: boolean; +} export interface BlobSync { + readonly state$: Observable; downloadBlob( blobId: string, signal?: AbortSignal ): Promise; uploadBlob(blob: BlobRecord, signal?: AbortSignal): Promise; + fullSync(signal?: AbortSignal): Promise; + setMaxBlobSize(size: number): void; + onReachedMaxBlobSize(cb: (byteSize: number) => void): () => void; } export class BlobSyncImpl implements BlobSync { + readonly state$ = new BehaviorSubject({ + isStorageOverCapacity: false, + }); private abort: AbortController | null = null; + private maxBlobSize: number = 1024 * 1024 * 100; // 100MB + readonly event = new EventEmitter2(); - constructor( - readonly local: BlobStorage, - readonly remotes: BlobStorage[] - ) {} + constructor(readonly storages: PeerStorageOptions) {} async downloadBlob(blobId: string, signal?: AbortSignal) { - const localBlob = await this.local.get(blobId, signal); + const localBlob = await this.storages.local.get(blobId, signal); if (localBlob) { return localBlob; } - for (const storage of this.remotes) { + for (const storage of Object.values(this.storages.remotes)) { const data = await storage.get(blobId, signal); if (data) { - await this.local.set(data, signal); + await this.storages.local.set(data, signal); return data; } } @@ -36,21 +50,35 @@ export class BlobSyncImpl implements BlobSync { } async uploadBlob(blob: BlobRecord, signal?: AbortSignal) { - await this.local.set(blob); + if (blob.data.length > this.maxBlobSize) { + this.event.emit('abort-large-blob', blob.data.length); + console.error('blob over limit, abort set'); + } + + await this.storages.local.set(blob); await Promise.allSettled( - this.remotes.map(remote => remote.set(blob, signal)) + Object.values(this.storages.remotes).map(async remote => { + try { + return await remote.set(blob, signal); + } catch (err) { + if (err instanceof OverCapacityError) { + this.state$.next({ isStorageOverCapacity: true }); + } + throw err; + } + }) ); } - private async sync(signal?: AbortSignal) { + async fullSync(signal?: AbortSignal) { throwIfAborted(signal); - for (const remote of this.remotes) { + for (const [remotePeer, remote] of Object.entries(this.storages.remotes)) { let localList: string[] = []; let remoteList: string[] = []; try { - localList = (await this.local.list(signal)).map(b => b.key); + localList = (await this.storages.local.list(signal)).map(b => b.key); throwIfAborted(signal); remoteList = (await remote.list(signal)).map(b => b.key); throwIfAborted(signal); @@ -65,7 +93,7 @@ export class BlobSyncImpl implements BlobSync { const needUpload = difference(localList, remoteList); for (const key of needUpload) { try { - const data = await this.local.get(key, signal); + const data = await this.storages.local.get(key, signal); throwIfAborted(signal); if (data) { await remote.set(data, signal); @@ -76,7 +104,7 @@ export class BlobSyncImpl implements BlobSync { throw err; } console.error( - `error when sync ${key} from [${this.local.peer}] to [${remote.peer}]`, + `error when sync ${key} from [local] to [${remotePeer}]`, err ); } @@ -89,7 +117,7 @@ export class BlobSyncImpl implements BlobSync { const data = await remote.get(key, signal); throwIfAborted(signal); if (data) { - await this.local.set(data, signal); + await this.storages.local.set(data, signal); throwIfAborted(signal); } } catch (err) { @@ -97,7 +125,7 @@ export class BlobSyncImpl implements BlobSync { throw err; } console.error( - `error when sync ${key} from [${remote.peer}] to [${this.local.peer}]`, + `error when sync ${key} from [${remotePeer}] to [local]`, err ); } @@ -107,13 +135,13 @@ export class BlobSyncImpl implements BlobSync { start() { if (this.abort) { - this.abort.abort(); + this.abort.abort(MANUALLY_STOP); } const abort = new AbortController(); this.abort = abort; - this.sync(abort.signal).catch(error => { + this.fullSync(abort.signal).catch(error => { if (error === MANUALLY_STOP) { return; } @@ -130,4 +158,15 @@ export class BlobSyncImpl implements BlobSync { // TODO: implement return () => {}; } + + setMaxBlobSize(size: number): void { + this.maxBlobSize = size; + } + + onReachedMaxBlobSize(cb: (byteSize: number) => void): () => void { + this.event.on('abort-large-blob', cb); + return () => { + this.event.off('abort-large-blob', cb); + }; + } } diff --git a/packages/common/nbstore/src/sync/doc/index.ts b/packages/common/nbstore/src/sync/doc/index.ts index e5465051f903e..b179c2b992a34 100644 --- a/packages/common/nbstore/src/sync/doc/index.ts +++ b/packages/common/nbstore/src/sync/doc/index.ts @@ -1,17 +1,23 @@ import type { Observable } from 'rxjs'; -import { combineLatest, map } from 'rxjs'; +import { combineLatest, map, of } from 'rxjs'; import type { DocStorage, SyncStorage } from '../../storage'; +import { DummyDocStorage } from '../../storage/dummy/doc'; +import { DummySyncStorage } from '../../storage/dummy/sync'; +import { MANUALLY_STOP } from '../../utils/throw-if-aborted'; +import type { PeerStorageOptions } from '../types'; import { DocSyncPeer } from './peer'; export interface DocSyncState { total: number; syncing: number; + synced: boolean; retrying: boolean; errorMessage: string | null; } export interface DocSyncDocState { + synced: boolean; syncing: boolean; retrying: boolean; errorMessage: string | null; @@ -24,43 +30,70 @@ export interface DocSync { } export class DocSyncImpl implements DocSync { - private readonly peers: DocSyncPeer[] = this.remotes.map( - remote => new DocSyncPeer(this.local, this.sync, remote) + private readonly peers: DocSyncPeer[] = Object.entries( + this.storages.remotes + ).map( + ([peerId, remote]) => + new DocSyncPeer(peerId, this.storages.local, this.sync, remote) ); private abort: AbortController | null = null; - readonly state$: Observable = combineLatest( - this.peers.map(peer => peer.peerState$) - ).pipe( - map(allPeers => ({ - total: allPeers.reduce((acc, peer) => acc + peer.total, 0), - syncing: allPeers.reduce((acc, peer) => acc + peer.syncing, 0), - retrying: allPeers.some(peer => peer.retrying), - errorMessage: - allPeers.find(peer => peer.errorMessage)?.errorMessage ?? null, - })) - ); + get state$() { + return combineLatest(this.peers.map(peer => peer.peerState$)).pipe( + map(allPeers => ({ + total: allPeers.reduce((acc, peer) => Math.max(acc, peer.total), 0), + syncing: allPeers.reduce((acc, peer) => Math.max(acc, peer.syncing), 0), + synced: allPeers.every(peer => peer.synced), + retrying: allPeers.some(peer => peer.retrying), + errorMessage: + allPeers.find(peer => peer.errorMessage)?.errorMessage ?? null, + })) + ) as Observable; + } constructor( - readonly local: DocStorage, - readonly sync: SyncStorage, - readonly remotes: DocStorage[] + readonly storages: PeerStorageOptions, + readonly sync: SyncStorage ) {} + /** + * for testing + */ + static get dummy() { + return new DocSyncImpl( + { + local: new DummyDocStorage(), + remotes: {}, + }, + new DummySyncStorage() + ); + } + docState$(docId: string): Observable { + if (this.peers.length === 0) { + return of({ + errorMessage: null, + retrying: false, + syncing: false, + synced: true, + }); + } return combineLatest(this.peers.map(peer => peer.docState$(docId))).pipe( - map(allPeers => ({ - errorMessage: - allPeers.find(peer => peer.errorMessage)?.errorMessage ?? null, - retrying: allPeers.some(peer => peer.retrying), - syncing: allPeers.some(peer => peer.syncing), - })) + map(allPeers => { + return { + errorMessage: + allPeers.find(peer => peer.errorMessage)?.errorMessage ?? null, + retrying: allPeers.some(peer => peer.retrying), + syncing: allPeers.some(peer => peer.syncing), + synced: allPeers.every(peer => peer.synced), + }; + }) ); } start() { if (this.abort) { - this.abort.abort(); + this.abort.abort(MANUALLY_STOP); } const abort = new AbortController(); this.abort = abort; diff --git a/packages/common/nbstore/src/sync/doc/peer.ts b/packages/common/nbstore/src/sync/doc/peer.ts index 2ffff7299a088..c12a4629f3fbb 100644 --- a/packages/common/nbstore/src/sync/doc/peer.ts +++ b/packages/common/nbstore/src/sync/doc/peer.ts @@ -43,6 +43,7 @@ interface Status { remoteClocks: ClockMap; syncing: boolean; retrying: boolean; + skipped: boolean; errorMessage: string | null; } @@ -50,11 +51,13 @@ interface PeerState { total: number; syncing: number; retrying: boolean; + synced: boolean; errorMessage: string | null; } interface PeerDocState { syncing: boolean; + synced: boolean; retrying: boolean; errorMessage: string | null; } @@ -92,10 +95,11 @@ export class DocSyncPeer { /** * random unique id for recognize self in "update" event */ - private readonly uniqueId = `sync:${this.local.universalId}:${this.remote.universalId}:${nanoid()}`; + private readonly uniqueId = `sync:${this.peerId}:${nanoid()}`; private readonly prioritySettings = new Map(); constructor( + readonly peerId: string, readonly local: DocStorage, readonly syncMetadata: SyncStorage, readonly remote: DocStorage, @@ -110,43 +114,59 @@ export class DocSyncPeer { remoteClocks: new ClockMap(new Map()), syncing: false, retrying: false, + skipped: false, errorMessage: null, }; private readonly statusUpdatedSubject$ = new Subject(); - peerState$ = new Observable(subscribe => { - const next = () => { - if (!this.status.syncing) { - // if syncing = false, jobMap is empty - subscribe.next({ - total: this.status.docs.size, - syncing: this.status.docs.size, - retrying: this.status.retrying, - errorMessage: this.status.errorMessage, - }); - } else { - const syncing = this.status.jobMap.size; - subscribe.next({ - total: this.status.docs.size, - syncing: syncing, - retrying: this.status.retrying, - errorMessage: this.status.errorMessage, - }); - } - }; - next(); - return this.statusUpdatedSubject$.subscribe(() => { + get peerState$() { + return new Observable(subscribe => { + const next = () => { + if (this.status.skipped) { + subscribe.next({ + total: 0, + syncing: 0, + synced: true, + retrying: false, + errorMessage: null, + }); + } else if (!this.status.syncing) { + // if syncing = false, jobMap is empty + subscribe.next({ + total: this.status.docs.size, + syncing: this.status.docs.size, + synced: false, + retrying: this.status.retrying, + errorMessage: this.status.errorMessage, + }); + } else { + const syncing = this.status.jobMap.size; + subscribe.next({ + total: this.status.docs.size, + syncing: syncing, + retrying: this.status.retrying, + errorMessage: this.status.errorMessage, + synced: syncing === 0, + }); + } + }; next(); + return this.statusUpdatedSubject$.subscribe(() => { + next(); + }); }); - }); + } docState$(docId: string) { return new Observable(subscribe => { const next = () => { + const syncing = + !this.status.connectedDocs.has(docId) || + this.status.jobMap.has(docId); + subscribe.next({ - syncing: - !this.status.connectedDocs.has(docId) || - this.status.jobMap.has(docId), + syncing: syncing, + synced: !syncing, retrying: this.status.retrying, errorMessage: this.status.errorMessage, }); @@ -161,22 +181,21 @@ export class DocSyncPeer { private readonly jobs = createJobErrorCatcher({ connect: async (docId: string, signal?: AbortSignal) => { const pushedClock = - (await this.syncMetadata.getPeerPushedClock(this.remote.peer, docId)) + (await this.syncMetadata.getPeerPushedClock(this.peerId, docId)) ?.timestamp ?? null; const clock = await this.local.getDocTimestamp(docId); throwIfAborted(signal); - if (pushedClock === null || pushedClock !== clock?.timestamp) { + if ( + !this.remote.isReadonly && + (pushedClock === null || pushedClock !== clock?.timestamp) + ) { await this.jobs.pullAndPush(docId, signal); } else { // no need to push const pulled = - ( - await this.syncMetadata.getPeerPulledRemoteClock( - this.remote.peer, - docId - ) - )?.timestamp ?? null; + (await this.syncMetadata.getPeerPulledRemoteClock(this.peerId, docId)) + ?.timestamp ?? null; if (pulled === null || pulled !== this.status.remoteClocks.get(docId)) { await this.jobs.pull(docId, signal); } @@ -214,7 +233,7 @@ export class DocSyncPeer { }); } throwIfAborted(signal); - await this.syncMetadata.setPeerPushedClock(this.remote.peer, { + await this.syncMetadata.setPeerPushedClock(this.peerId, { docId, timestamp: maxClock, }); @@ -249,7 +268,7 @@ export class DocSyncPeer { this.uniqueId ); throwIfAborted(signal); - await this.syncMetadata.setPeerPulledRemoteClock(this.remote.peer, { + await this.syncMetadata.setPeerPulledRemoteClock(this.peerId, { docId, timestamp: remoteClock, }); @@ -273,7 +292,7 @@ export class DocSyncPeer { }); } throwIfAborted(signal); - await this.syncMetadata.setPeerPushedClock(this.remote.peer, { + await this.syncMetadata.setPeerPushedClock(this.peerId, { docId, timestamp: localClock, }); @@ -294,7 +313,7 @@ export class DocSyncPeer { remoteClock, }); } - await this.syncMetadata.setPeerPushedClock(this.remote.peer, { + await this.syncMetadata.setPeerPushedClock(this.peerId, { docId, timestamp: localDocRecord.timestamp, }); @@ -322,7 +341,7 @@ export class DocSyncPeer { this.uniqueId ); throwIfAborted(signal); - await this.syncMetadata.setPeerPulledRemoteClock(this.remote.peer, { + await this.syncMetadata.setPeerPulledRemoteClock(this.peerId, { docId, timestamp: remoteClock, }); @@ -360,7 +379,7 @@ export class DocSyncPeer { ); throwIfAborted(signal); - await this.syncMetadata.setPeerPulledRemoteClock(this.remote.peer, { + await this.syncMetadata.setPeerPulledRemoteClock(this.peerId, { docId, timestamp: remoteClock, }); @@ -372,7 +391,7 @@ export class DocSyncPeer { updateRemoteClock: async (docId: string, remoteClock: Date) => { const updated = this.status.remoteClocks.setIfBigger(docId, remoteClock); if (updated) { - await this.syncMetadata.setPeerRemoteClock(this.remote.peer, { + await this.syncMetadata.setPeerRemoteClock(this.peerId, { docId, timestamp: remoteClock, }); @@ -455,6 +474,7 @@ export class DocSyncPeer { jobMap: new Map(), remoteClocks: new ClockMap(new Map()), syncing: false, + skipped: false, // tell ui to show retrying status retrying: true, // error message from last retry @@ -482,6 +502,17 @@ export class DocSyncPeer { private async retryLoop(signal?: AbortSignal) { throwIfAborted(signal); + if (this.local.isReadonly) { + // Local is readonly, skip sync + this.status.skipped = true; + this.statusUpdatedSubject$.next(true); + await new Promise((_, reject) => { + signal?.addEventListener('abort', reason => { + reject(reason); + }); + }); + return; + } const abort = new AbortController(); signal?.addEventListener('abort', reason => { @@ -536,8 +567,8 @@ export class DocSyncPeer { if ( origin === this.uniqueId || origin?.startsWith( - `sync:${this.local.peer}:${this.remote.peer}:` - // skip if local and remote is same + `sync:${this.peerId}:` + // skip if peerId is same ) ) { return; @@ -572,7 +603,7 @@ export class DocSyncPeer { // get cached clocks from metadata const cachedClocks = await this.syncMetadata.getPeerRemoteClocks( - this.remote.peer + this.peerId ); throwIfAborted(signal); for (const [id, v] of Object.entries(cachedClocks)) { diff --git a/packages/common/nbstore/src/sync/index.ts b/packages/common/nbstore/src/sync/index.ts index d787f1ff9800a..109e3fdf7b1c5 100644 --- a/packages/common/nbstore/src/sync/index.ts +++ b/packages/common/nbstore/src/sync/index.ts @@ -1,65 +1,63 @@ -import { combineLatest, map, type Observable, of } from 'rxjs'; +import { map, type Observable } from 'rxjs'; -import type { - AwarenessStorage, - BlobStorage, - DocStorage, - SpaceStorage, -} from '../storage'; +import type { SpaceStorage } from '../storage'; import { AwarenessSyncImpl } from './awareness'; import { BlobSyncImpl } from './blob'; import { DocSyncImpl, type DocSyncState } from './doc'; +import type { PeerStorageOptions } from './types'; + +export type { BlobSyncState } from './blob'; +export type { DocSyncDocState, DocSyncState } from './doc'; export interface SyncState { doc?: DocSyncState; } export class Sync { - readonly doc: DocSyncImpl | null; - readonly blob: BlobSyncImpl | null; - readonly awareness: AwarenessSyncImpl | null; + readonly doc: DocSyncImpl; + readonly blob: BlobSyncImpl; + readonly awareness: AwarenessSyncImpl; readonly state$: Observable; - constructor( - readonly local: SpaceStorage, - readonly peers: SpaceStorage[] - ) { - const doc = local.tryGet('doc'); - const blob = local.tryGet('blob'); - const sync = local.tryGet('sync'); - const awareness = local.tryGet('awareness'); - - this.doc = - doc && sync - ? new DocSyncImpl( - doc, - sync, - peers - .map(peer => peer.tryGet('doc')) - .filter((v): v is DocStorage => !!v) - ) - : null; - this.blob = blob - ? new BlobSyncImpl( - blob, - peers - .map(peer => peer.tryGet('blob')) - .filter((v): v is BlobStorage => !!v) - ) - : null; - this.awareness = awareness - ? new AwarenessSyncImpl( - awareness, - peers - .map(peer => peer.tryGet('awareness')) - .filter((v): v is AwarenessStorage => !!v) - ) - : null; + constructor(readonly storages: PeerStorageOptions) { + const doc = storages.local.get('doc'); + const blob = storages.local.get('blob'); + const sync = storages.local.get('sync'); + const awareness = storages.local.get('awareness'); - this.state$ = combineLatest([this.doc?.state$ ?? of(undefined)]).pipe( - map(([doc]) => ({ doc })) + this.doc = new DocSyncImpl( + { + local: doc, + remotes: Object.fromEntries( + Object.entries(storages.remotes).map(([peerId, remote]) => [ + peerId, + remote.get('doc'), + ]) + ), + }, + sync ); + this.blob = new BlobSyncImpl({ + local: blob, + remotes: Object.fromEntries( + Object.entries(storages.remotes).map(([peerId, remote]) => [ + peerId, + remote.get('blob'), + ]) + ), + }); + this.awareness = new AwarenessSyncImpl({ + local: awareness, + remotes: Object.fromEntries( + Object.entries(storages.remotes).map(([peerId, remote]) => [ + peerId, + remote.get('awareness'), + ]) + ), + }); + + this.state$ = this.doc.state$.pipe(map(doc => ({ doc }))); } start() { diff --git a/packages/common/nbstore/src/sync/types.ts b/packages/common/nbstore/src/sync/types.ts new file mode 100644 index 0000000000000..35fe0d38965f0 --- /dev/null +++ b/packages/common/nbstore/src/sync/types.ts @@ -0,0 +1,4 @@ +export interface PeerStorageOptions { + local: S; + remotes: Record; +} diff --git a/packages/common/nbstore/src/storage/__tests__/__snapshots__/storage.spec.ts.snap b/packages/common/nbstore/src/utils/__tests__/__snapshots__/storage.spec.ts.snap similarity index 100% rename from packages/common/nbstore/src/storage/__tests__/__snapshots__/storage.spec.ts.snap rename to packages/common/nbstore/src/utils/__tests__/__snapshots__/storage.spec.ts.snap diff --git a/packages/common/nbstore/src/storage/__tests__/storage.spec.ts b/packages/common/nbstore/src/utils/__tests__/storage.spec.ts similarity index 94% rename from packages/common/nbstore/src/storage/__tests__/storage.spec.ts rename to packages/common/nbstore/src/utils/__tests__/storage.spec.ts index 8787fcaaa51aa..d52e24d8dfb88 100644 --- a/packages/common/nbstore/src/storage/__tests__/storage.spec.ts +++ b/packages/common/nbstore/src/utils/__tests__/storage.spec.ts @@ -1,6 +1,6 @@ import { describe, expect, it } from 'vitest'; -import { parseUniversalId, universalId } from '../storage'; +import { parseUniversalId, universalId } from '../universal-id'; describe('parseUniversalId', () => { it('should generate universal id', () => { diff --git a/packages/common/nbstore/src/utils/universal-id.ts b/packages/common/nbstore/src/utils/universal-id.ts new file mode 100644 index 0000000000000..4decc2d601a94 --- /dev/null +++ b/packages/common/nbstore/src/utils/universal-id.ts @@ -0,0 +1,93 @@ +export type SpaceType = 'workspace' | 'userspace'; + +export function universalId({ + peer, + type, + id, +}: { + peer: string; + type: SpaceType; + id: string; +}) { + return `@peer(${peer});@type(${type});@id(${id});`; +} + +export function isValidSpaceType(type: string): type is SpaceType { + return type === 'workspace' || type === 'userspace'; +} + +export function isValidUniversalId(opts: Record): boolean { + const requiredKeys = ['peer', 'type', 'id'] as const; + + for (const key of requiredKeys) { + if (!opts[key]) { + return false; + } + } + + return isValidSpaceType(opts.type); +} + +export function parseUniversalId(id: string): { + peer: string; + type: SpaceType; + id: string; +} { + const result: Partial<{ + peer: string; + type: SpaceType; + id: string; + }> = {}; + let key = ''; + let value = ''; + let isInValue = false; + + let i = -1; + + while (++i < id.length) { + const ch = id[i]; + const nextCh = id[i + 1]; + + // when we are in value string, we only care about ch and next char to be [')', ';'] to end the id part + if (isInValue) { + if (ch === ')' && nextCh === ';') { + // @ts-expect-error we know the key is valid + result[key] = value; + key = ''; + value = ''; + isInValue = false; + i++; + continue; + } + + value += ch; + continue; + } + + if (ch === '@') { + const keyEnd = id.indexOf('(', i); + // we find '@' but no '(' in lookahead or '(' is immediately after '@', invalid id + if (keyEnd === -1 || keyEnd === i + 1) { + break; + } + + key = id.slice(i + 1, keyEnd); + i = keyEnd; + isInValue = true; + } else { + break; + } + } + + if (!isValidUniversalId(result)) { + throw new Error( + `Invalid universal storage id: ${id}. It should be in format of @peer(\${peer});@type(\${type});@id(\${id});` + ); + } + + return result as { + peer: string; + type: SpaceType; + id: string; + }; +} diff --git a/packages/common/nbstore/src/worker/client.ts b/packages/common/nbstore/src/worker/client.ts index f4f8ef8603d26..bc7be3c6f4e81 100644 --- a/packages/common/nbstore/src/worker/client.ts +++ b/packages/common/nbstore/src/worker/client.ts @@ -1,54 +1,58 @@ import type { OpClient } from '@toeverything/infra/op'; import { DummyConnection } from '../connection'; -import { DocFrontend } from '../frontend/doc'; +import { AwarenessFrontend, BlobFrontend, DocFrontend } from '../frontend'; import { type AwarenessRecord, - type AwarenessStorage, type BlobRecord, type BlobStorage, type DocRecord, type DocStorage, type DocUpdate, type ListedBlobRecord, - type StorageOptions, - universalId, } from '../storage'; import type { AwarenessSync } from '../sync/awareness'; import type { BlobSync } from '../sync/blob'; import type { DocSync } from '../sync/doc'; -import type { WorkerOps } from './ops'; +import type { WorkerInitOptions, WorkerOps } from './ops'; + +export type { WorkerInitOptions } from './ops'; export class WorkerClient { constructor( private readonly client: OpClient, - private readonly options: StorageOptions - ) {} - - readonly docStorage = new WorkerDocStorage(this.client, this.options); - readonly blobStorage = new WorkerBlobStorage(this.client, this.options); - readonly awarenessStorage = new WorkerAwarenessStorage( - this.client, - this.options - ); - readonly docSync = new WorkerDocSync(this.client); - readonly blobSync = new WorkerBlobSync(this.client); - readonly awarenessSync = new WorkerAwarenessSync(this.client); - - readonly docFrontend = new DocFrontend(this.docStorage, this.docSync); + options: WorkerInitOptions + ) { + client.listen(); + this.client.call('worker.init', options).catch(err => { + console.error('error initializing worker', err); + }); + this.docStorage = new WorkerDocStorage(this.client); + this.blobStorage = new WorkerBlobStorage(this.client); + this.docSync = new WorkerDocSync(this.client); + this.blobSync = new WorkerBlobSync(this.client); + this.awarenessSync = new WorkerAwarenessSync(this.client); + this.docFrontend = new DocFrontend(this.docStorage, this.docSync); + this.blobFrontend = new BlobFrontend(this.blobStorage, this.blobSync); + this.awarenessFrontend = new AwarenessFrontend(this.awarenessSync); + } + + private readonly docStorage: WorkerDocStorage; + private readonly blobStorage: WorkerBlobStorage; + private readonly docSync: WorkerDocSync; + private readonly blobSync: WorkerBlobSync; + private readonly awarenessSync: WorkerAwarenessSync; + + readonly docFrontend: DocFrontend; + readonly blobFrontend: BlobFrontend; + readonly awarenessFrontend: AwarenessFrontend; } class WorkerDocStorage implements DocStorage { - constructor( - private readonly client: OpClient, - private readonly options: StorageOptions - ) {} + constructor(private readonly client: OpClient) {} - readonly peer = this.options.peer; - readonly spaceType = this.options.type; - readonly spaceId = this.options.id; - readonly universalId = universalId(this.options); readonly storageType = 'doc'; + readonly isReadonly = false; async getDoc(docId: string) { return this.client.call('docStorage.getDoc', docId); @@ -119,16 +123,9 @@ class WorkerDocConnection extends DummyConnection { } class WorkerBlobStorage implements BlobStorage { - constructor( - private readonly client: OpClient, - private readonly options: StorageOptions - ) {} + constructor(private readonly client: OpClient) {} readonly storageType = 'blob'; - readonly peer = this.options.peer; - readonly spaceType = this.options.type; - readonly spaceId = this.options.id; - readonly universalId = universalId(this.options); get(key: string, _signal?: AbortSignal): Promise { return this.client.call('blobStorage.getBlob', key); @@ -156,63 +153,6 @@ class WorkerBlobStorage implements BlobStorage { connection = new DummyConnection(); } -class WorkerAwarenessStorage implements AwarenessStorage { - constructor( - private readonly client: OpClient, - private readonly options: StorageOptions - ) {} - - readonly storageType = 'awareness'; - readonly peer = this.options.peer; - readonly spaceType = this.options.type; - readonly spaceId = this.options.id; - readonly universalId = universalId(this.options); - - update(record: AwarenessRecord, origin?: string): Promise { - return this.client.call('awarenessStorage.update', { - awareness: record, - origin, - }); - } - subscribeUpdate( - id: string, - onUpdate: (update: AwarenessRecord, origin?: string) => void, - onCollect: () => Promise - ): () => void { - const subscription = this.client - .ob$('awarenessStorage.subscribeUpdate', id) - .subscribe({ - next: update => { - if (update.type === 'awareness-update') { - onUpdate(update.awareness, update.origin); - } - if (update.type === 'awareness-collect') { - onCollect() - .then(record => { - if (record) { - this.client - .call('awarenessStorage.collect', { - awareness: record, - collectId: update.collectId, - }) - .catch(err => { - console.error('error feedback collected awareness', err); - }); - } - }) - .catch(err => { - console.error('error collecting awareness', err); - }); - } - }, - }); - return () => { - subscription.unsubscribe(); - }; - } - connection = new DummyConnection(); -} - class WorkerDocSync implements DocSync { constructor(private readonly client: OpClient) {} @@ -234,6 +174,22 @@ class WorkerDocSync implements DocSync { class WorkerBlobSync implements BlobSync { constructor(private readonly client: OpClient) {} + readonly state$ = this.client.ob$('blobSync.state'); + setMaxBlobSize(size: number): void { + this.client.call('blobSync.setMaxBlobSize', size).catch(err => { + console.error('error setting max blob size', err); + }); + } + onReachedMaxBlobSize(cb: (byteSize: number) => void): () => void { + const subscription = this.client + .ob$('blobSync.onReachedMaxBlobSize') + .subscribe(byteSize => { + cb(byteSize); + }); + return () => { + subscription.unsubscribe(); + }; + } downloadBlob( blobId: string, _signal?: AbortSignal @@ -243,6 +199,27 @@ class WorkerBlobSync implements BlobSync { uploadBlob(blob: BlobRecord, _signal?: AbortSignal): Promise { return this.client.call('blobSync.uploadBlob', blob); } + fullSync(signal?: AbortSignal): Promise { + return new Promise((resolve, reject) => { + const abortListener = () => { + reject(signal?.reason); + subscription.unsubscribe(); + }; + + signal?.addEventListener('abort', abortListener); + + const subscription = this.client.ob$('blobSync.fullSync').subscribe({ + next() { + signal?.removeEventListener('abort', abortListener); + resolve(); + }, + error(err) { + signal?.removeEventListener('abort', abortListener); + reject(err); + }, + }); + }); + } } class WorkerAwarenessSync implements AwarenessSync { diff --git a/packages/common/nbstore/src/worker/consumer.ts b/packages/common/nbstore/src/worker/consumer.ts index f83e94f435304..fd8ba71cd35fb 100644 --- a/packages/common/nbstore/src/worker/consumer.ts +++ b/packages/common/nbstore/src/worker/consumer.ts @@ -1,27 +1,29 @@ import type { OpConsumer } from '@toeverything/infra/op'; import { Observable } from 'rxjs'; -import { getAvailableStorageImplementations } from '../impls'; -import { SpaceStorage, type StorageOptions } from '../storage'; +import { type StorageConstructor } from '../impls'; +import { SpaceStorage } from '../storage'; import type { AwarenessRecord } from '../storage/awareness'; import { Sync } from '../sync'; -import type { WorkerOps } from './ops'; +import type { PeerStorageOptions } from '../sync/types'; +import type { WorkerInitOptions, WorkerOps } from './ops'; + +export type { WorkerOps }; export class WorkerConsumer { - private remotes: SpaceStorage[] = []; - private local: SpaceStorage | null = null; + private storages: PeerStorageOptions | null = null; private sync: Sync | null = null; get ensureLocal() { - if (!this.local) { + if (!this.storages) { throw new Error('Not initialized'); } - return this.local; + return this.storages.local; } get ensureSync() { if (!this.sync) { - throw new Error('Not initialized'); + throw new Error('Sync not initialized'); } return this.sync; } @@ -31,11 +33,7 @@ export class WorkerConsumer { } get docSync() { - const docSync = this.ensureSync.doc; - if (!docSync) { - throw new Error('Doc sync not initialized'); - } - return docSync; + return this.ensureSync.doc; } get blobStorage() { @@ -43,11 +41,7 @@ export class WorkerConsumer { } get blobSync() { - const blobSync = this.ensureSync.blob; - if (!blobSync) { - throw new Error('Blob sync not initialized'); - } - return blobSync; + return this.ensureSync.blob; } get syncStorage() { @@ -59,41 +53,58 @@ export class WorkerConsumer { } get awarenessSync() { - const awarenessSync = this.ensureSync.awareness; - if (!awarenessSync) { - throw new Error('Awareness sync not initialized'); - } - return awarenessSync; + return this.ensureSync.awareness; } - constructor(private readonly consumer: OpConsumer) {} - - listen() { + constructor( + private readonly consumer: OpConsumer, + private readonly availableStorageImplementations: StorageConstructor[] + ) { this.registerHandlers(); this.consumer.listen(); } - async init(init: { - local: { name: string; opts: StorageOptions }[]; - remotes: { name: string; opts: StorageOptions }[][]; - }) { - this.local = new SpaceStorage( - init.local.map(opt => { - const Storage = getAvailableStorageImplementations(opt.name); - return new Storage(opt.opts); - }) - ); - this.remotes = init.remotes.map(opts => { - return new SpaceStorage( - opts.map(opt => { - const Storage = getAvailableStorageImplementations(opt.name); - return new Storage(opt.opts); + init(init: WorkerInitOptions) { + this.storages = { + local: new SpaceStorage( + Object.fromEntries( + Object.entries(init.local).map(([type, opt]) => { + const Storage = this.availableStorageImplementations.find( + impl => impl.identifier === opt.name + ); + if (!Storage) { + throw new Error(`Storage implementation ${opt.name} not found`); + } + return [type, new Storage(opt.opts as any)]; + }) + ) + ), + remotes: Object.fromEntries( + Object.entries(init.remotes).map(([peer, opts]) => { + return [ + peer, + new SpaceStorage( + Object.fromEntries( + Object.entries(opts).map(([type, opt]) => { + const Storage = this.availableStorageImplementations.find( + impl => impl.identifier === opt.name + ); + if (!Storage) { + throw new Error( + `Storage implementation ${opt.name} not found` + ); + } + return [type, new Storage(opt.opts as any)]; + }) + ) + ), + ]; }) - ); - }); - this.sync = new Sync(this.local, this.remotes); - this.local.connect(); - for (const remote of this.remotes) { + ), + }; + this.sync = new Sync(this.storages); + this.storages.local.connect(); + for (const remote of Object.values(this.storages.remotes)) { remote.connect(); } this.sync.start(); @@ -101,9 +112,9 @@ export class WorkerConsumer { async destroy() { this.sync?.stop(); - this.local?.disconnect(); - await this.local?.destroy(); - for (const remote of this.remotes) { + this.storages?.local.disconnect(); + await this.storages?.local.destroy(); + for (const remote of Object.values(this.storages?.remotes ?? {})) { remote.disconnect(); await remote.destroy(); } @@ -144,7 +155,7 @@ export class WorkerConsumer { subscriber.next(true); subscriber.complete(); }) - .catch(error => { + .catch((error: any) => { subscriber.error(error); }); return () => abortController.abort(); @@ -224,6 +235,29 @@ export class WorkerConsumer { }), 'blobSync.downloadBlob': key => this.blobSync.downloadBlob(key), 'blobSync.uploadBlob': blob => this.blobSync.uploadBlob(blob), + 'blobSync.fullSync': () => + new Observable(subscriber => { + const abortController = new AbortController(); + this.blobSync + .fullSync(abortController.signal) + .then(() => { + subscriber.next(true); + subscriber.complete(); + }) + .catch(error => { + subscriber.error(error); + }); + return () => abortController.abort(); + }), + 'blobSync.state': () => this.blobSync.state$, + 'blobSync.setMaxBlobSize': size => this.blobSync.setMaxBlobSize(size), + 'blobSync.onReachedMaxBlobSize': () => + new Observable(subscriber => { + const undo = this.blobSync.onReachedMaxBlobSize(byteSize => { + subscriber.next(byteSize); + }); + return () => undo(); + }), 'awarenessSync.update': ({ awareness, origin }) => this.awarenessSync.update(awareness, origin), 'awarenessSync.subscribeUpdate': docId => diff --git a/packages/common/nbstore/src/worker/ops.ts b/packages/common/nbstore/src/worker/ops.ts index aabf7c2889a4f..5de0baaa56d58 100644 --- a/packages/common/nbstore/src/worker/ops.ts +++ b/packages/common/nbstore/src/worker/ops.ts @@ -1,3 +1,4 @@ +import type { AvailableStorageImplementations } from '../impls'; import type { BlobRecord, DocClock, @@ -6,20 +7,27 @@ import type { DocRecord, DocUpdate, ListedBlobRecord, - StorageOptions, + StorageType, } from '../storage'; import type { AwarenessRecord } from '../storage/awareness'; +import type { BlobSyncState } from '../sync/blob'; import type { DocSyncDocState, DocSyncState } from '../sync/doc'; +type StorageInitOptions = Values<{ + [key in keyof AvailableStorageImplementations]: { + name: key; + opts: ConstructorParameters[0]; + }; +}>; + +export interface WorkerInitOptions { + local: { [key in StorageType]?: StorageInitOptions }; + remotes: Record; +} + interface GroupedWorkerOps { worker: { - init: [ - { - local: { name: string; opts: StorageOptions }[]; - remotes: { name: string; opts: StorageOptions }[][]; - }, - void, - ]; + init: [WorkerInitOptions, void]; destroy: [void, void]; }; @@ -83,6 +91,10 @@ interface GroupedWorkerOps { blobSync: { downloadBlob: [string, BlobRecord | null]; uploadBlob: [BlobRecord, void]; + fullSync: [void, boolean]; + setMaxBlobSize: [number, void]; + onReachedMaxBlobSize: [void, number]; + state: [void, BlobSyncState]; }; awarenessSync: { diff --git a/packages/common/nbstore/tsconfig.json b/packages/common/nbstore/tsconfig.json index 183d9103b4062..ca7aecca3a307 100644 --- a/packages/common/nbstore/tsconfig.json +++ b/packages/common/nbstore/tsconfig.json @@ -10,9 +10,6 @@ { "path": "../../frontend/graphql" }, - { - "path": "../../frontend/electron-api" - }, { "path": "../infra" }, diff --git a/packages/frontend/apps/electron/src/helper/nbstore/blob.ts b/packages/frontend/apps/electron/src/helper/nbstore/blob.ts deleted file mode 100644 index c1e0641db9bf0..0000000000000 --- a/packages/frontend/apps/electron/src/helper/nbstore/blob.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { type BlobRecord, BlobStorageBase, share } from '@affine/nbstore'; - -import { NativeDBConnection } from './db'; - -export class SqliteBlobStorage extends BlobStorageBase { - override connection = share( - new NativeDBConnection(this.peer, this.spaceType, this.spaceId) - ); - - get db() { - return this.connection.inner; - } - - override async get(key: string) { - return this.db.getBlob(key); - } - - override async set(blob: BlobRecord) { - await this.db.setBlob(blob); - } - - override async delete(key: string, permanently: boolean) { - await this.db.deleteBlob(key, permanently); - } - - override async release() { - await this.db.releaseBlobs(); - } - - override async list() { - return this.db.listBlobs(); - } -} diff --git a/packages/frontend/apps/electron/src/helper/nbstore/db.ts b/packages/frontend/apps/electron/src/helper/nbstore/db.ts deleted file mode 100644 index d5edb4c33326b..0000000000000 --- a/packages/frontend/apps/electron/src/helper/nbstore/db.ts +++ /dev/null @@ -1,46 +0,0 @@ -import path from 'node:path'; - -import { DocStorage as NativeDocStorage } from '@affine/native'; -import { AutoReconnectConnection, type SpaceType } from '@affine/nbstore'; -import fs from 'fs-extra'; - -import { logger } from '../logger'; -import { getSpaceDBPath } from '../workspace/meta'; - -export class NativeDBConnection extends AutoReconnectConnection { - constructor( - private readonly peer: string, - private readonly type: SpaceType, - private readonly id: string - ) { - super(); - } - - async getDBPath() { - return await getSpaceDBPath(this.peer, this.type, this.id); - } - - override get shareId(): string { - return `sqlite:${this.peer}:${this.type}:${this.id}`; - } - - override async doConnect() { - const dbPath = await this.getDBPath(); - await fs.ensureDir(path.dirname(dbPath)); - const conn = new NativeDocStorage(dbPath); - await conn.connect(); - logger.info('[nbstore] connection established', this.shareId); - return conn; - } - - override doDisconnect(conn: NativeDocStorage) { - conn - .close() - .then(() => { - logger.info('[nbstore] connection closed', this.shareId); - }) - .catch(err => { - logger.error('[nbstore] connection close failed', this.shareId, err); - }); - } -} diff --git a/packages/frontend/apps/electron/src/helper/nbstore/doc.ts b/packages/frontend/apps/electron/src/helper/nbstore/doc.ts deleted file mode 100644 index 4078f50513d1d..0000000000000 --- a/packages/frontend/apps/electron/src/helper/nbstore/doc.ts +++ /dev/null @@ -1,83 +0,0 @@ -import { - type DocClocks, - type DocRecord, - DocStorageBase, - type DocUpdate, - share, -} from '@affine/nbstore'; - -import { NativeDBConnection } from './db'; - -export class SqliteDocStorage extends DocStorageBase { - override connection = share( - new NativeDBConnection(this.peer, this.spaceType, this.spaceId) - ); - - get db() { - return this.connection.inner; - } - - override async pushDocUpdate(update: DocUpdate) { - const timestamp = await this.db.pushUpdate(update.docId, update.bin); - - return { docId: update.docId, timestamp }; - } - - override async deleteDoc(docId: string) { - await this.db.deleteDoc(docId); - } - - override async getDocTimestamps(after?: Date) { - const clocks = await this.db.getDocClocks(after); - - return clocks.reduce((ret, cur) => { - ret[cur.docId] = cur.timestamp; - return ret; - }, {} as DocClocks); - } - - override async getDocTimestamp(docId: string) { - return this.db.getDocClock(docId); - } - - protected override async getDocSnapshot(docId: string) { - const snapshot = await this.db.getDocSnapshot(docId); - - if (!snapshot) { - return null; - } - - return { - docId, - bin: snapshot.data, - timestamp: snapshot.timestamp, - }; - } - - protected override async setDocSnapshot( - snapshot: DocRecord - ): Promise { - return this.db.setDocSnapshot({ - docId: snapshot.docId, - data: Buffer.from(snapshot.bin), - timestamp: new Date(snapshot.timestamp), - }); - } - - protected override async getDocUpdates(docId: string) { - return this.db.getDocUpdates(docId).then(updates => - updates.map(update => ({ - docId, - bin: update.data, - timestamp: update.createdAt, - })) - ); - } - - protected override markUpdatesMerged(docId: string, updates: DocRecord[]) { - return this.db.markUpdatesMerged( - docId, - updates.map(update => update.timestamp) - ); - } -} diff --git a/packages/frontend/apps/electron/src/helper/nbstore/handlers.ts b/packages/frontend/apps/electron/src/helper/nbstore/handlers.ts index 946cb79cf5041..dcc284c205b7d 100644 --- a/packages/frontend/apps/electron/src/helper/nbstore/handlers.ts +++ b/packages/frontend/apps/electron/src/helper/nbstore/handlers.ts @@ -1,128 +1,43 @@ -import { - type BlobRecord, - type DocClock, - type DocUpdate, -} from '@affine/nbstore'; - -import { ensureStorage, getStorage } from './storage'; - -export const nbstoreHandlers = { - connect: async (id: string) => { - await ensureStorage(id); - }, - - close: async (id: string) => { - const store = getStorage(id); - - if (store) { - store.disconnect(); - // The store may be shared with other tabs, so we don't delete it from cache - // the underlying connection will handle the close correctly - // STORE_CACHE.delete(`${spaceType}:${spaceId}`); - } - }, - - pushDocUpdate: async (id: string, update: DocUpdate) => { - const store = await ensureStorage(id); - return store.get('doc').pushDocUpdate(update); - }, - - getDoc: async (id: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('doc').getDoc(docId); - }, - - deleteDoc: async (id: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('doc').deleteDoc(docId); - }, - - getDocTimestamps: async (id: string, after?: Date) => { - const store = await ensureStorage(id); - return store.get('doc').getDocTimestamps(after); - }, - - getDocTimestamp: async (id: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('doc').getDocTimestamp(docId); - }, - - setBlob: async (id: string, blob: BlobRecord) => { - const store = await ensureStorage(id); - return store.get('blob').set(blob); - }, - - getBlob: async (id: string, key: string) => { - const store = await ensureStorage(id); - return store.get('blob').get(key); - }, - - deleteBlob: async (id: string, key: string, permanently: boolean) => { - const store = await ensureStorage(id); - return store.get('blob').delete(key, permanently); - }, - - listBlobs: async (id: string) => { - const store = await ensureStorage(id); - return store.get('blob').list(); - }, - - releaseBlobs: async (id: string) => { - const store = await ensureStorage(id); - return store.get('blob').release(); - }, - - getPeerRemoteClocks: async (id: string, peer: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerRemoteClocks(peer); - }, - - getPeerRemoteClock: async (id: string, peer: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerRemoteClock(peer, docId); - }, - - setPeerRemoteClock: async (id: string, peer: string, clock: DocClock) => { - const store = await ensureStorage(id); - return store.get('sync').setPeerRemoteClock(peer, clock); - }, - - getPeerPulledRemoteClocks: async (id: string, peer: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerPulledRemoteClocks(peer); - }, - - getPeerPulledRemoteClock: async (id: string, peer: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerPulledRemoteClock(peer, docId); - }, - - setPeerPulledRemoteClock: async ( - id: string, - peer: string, - clock: DocClock - ) => { - const store = await ensureStorage(id); - return store.get('sync').setPeerPulledRemoteClock(peer, clock); - }, - - getPeerPushedClocks: async (id: string, peer: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerPushedClocks(peer); - }, - - getPeerPushedClock: async (id: string, peer: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerPushedClock(peer, docId); - }, - - setPeerPushedClock: async (id: string, peer: string, clock: DocClock) => { - const store = await ensureStorage(id); - return store.get('sync').setPeerPushedClock(peer, clock); - }, - - clearClocks: async (id: string) => { - const store = await ensureStorage(id); - return store.get('sync').clearClocks(); - }, +import path from 'node:path'; + +import { DocStoragePool } from '@affine/native'; +import { parseUniversalId } from '@affine/nbstore'; +import type { NativeDBApis } from '@affine/nbstore/sqlite'; +import fs from 'fs-extra'; + +import { getSpaceDBPath } from '../workspace/meta'; + +const POOL = new DocStoragePool(); + +export const nbstoreHandlers: NativeDBApis = { + connect: async (universalId: string) => { + const { peer, type, id } = parseUniversalId(universalId); + const dbPath = await getSpaceDBPath(peer, type, id); + await fs.ensureDir(path.dirname(dbPath)); + await POOL.connect(universalId, dbPath); + }, + disconnect: POOL.disconnect.bind(POOL), + pushUpdate: POOL.pushUpdate.bind(POOL), + getDocSnapshot: POOL.getDocSnapshot.bind(POOL), + setDocSnapshot: POOL.setDocSnapshot.bind(POOL), + getDocUpdates: POOL.getDocUpdates.bind(POOL), + markUpdatesMerged: POOL.markUpdatesMerged.bind(POOL), + deleteDoc: POOL.deleteDoc.bind(POOL), + getDocClocks: POOL.getDocClocks.bind(POOL), + getDocClock: POOL.getDocClock.bind(POOL), + getBlob: POOL.getBlob.bind(POOL), + setBlob: POOL.setBlob.bind(POOL), + deleteBlob: POOL.deleteBlob.bind(POOL), + releaseBlobs: POOL.releaseBlobs.bind(POOL), + listBlobs: POOL.listBlobs.bind(POOL), + getPeerRemoteClocks: POOL.getPeerRemoteClocks.bind(POOL), + getPeerRemoteClock: POOL.getPeerRemoteClock.bind(POOL), + setPeerRemoteClock: POOL.setPeerRemoteClock.bind(POOL), + getPeerPulledRemoteClocks: POOL.getPeerPulledRemoteClocks.bind(POOL), + getPeerPulledRemoteClock: POOL.getPeerPulledRemoteClock.bind(POOL), + setPeerPulledRemoteClock: POOL.setPeerPulledRemoteClock.bind(POOL), + getPeerPushedClocks: POOL.getPeerPushedClocks.bind(POOL), + getPeerPushedClock: POOL.getPeerPushedClock.bind(POOL), + setPeerPushedClock: POOL.setPeerPushedClock.bind(POOL), + clearClocks: POOL.clearClocks.bind(POOL), }; diff --git a/packages/frontend/apps/electron/src/helper/nbstore/index.ts b/packages/frontend/apps/electron/src/helper/nbstore/index.ts index 860899542964f..655bc6a9424db 100644 --- a/packages/frontend/apps/electron/src/helper/nbstore/index.ts +++ b/packages/frontend/apps/electron/src/helper/nbstore/index.ts @@ -1,4 +1,3 @@ export { nbstoreHandlers } from './handlers'; -export * from './storage'; export { dbEvents as dbEventsV1, dbHandlers as dbHandlersV1 } from './v1'; export { universalId } from '@affine/nbstore'; diff --git a/packages/frontend/apps/electron/src/helper/nbstore/storage.ts b/packages/frontend/apps/electron/src/helper/nbstore/storage.ts deleted file mode 100644 index 4e4f2b222fbe5..0000000000000 --- a/packages/frontend/apps/electron/src/helper/nbstore/storage.ts +++ /dev/null @@ -1,92 +0,0 @@ -import { parseUniversalId, SpaceStorage } from '@affine/nbstore'; -import { applyUpdate, Doc as YDoc } from 'yjs'; - -import { logger } from '../logger'; -import { SqliteBlobStorage } from './blob'; -import { NativeDBConnection } from './db'; -import { SqliteDocStorage } from './doc'; -import { SqliteSyncStorage } from './sync'; - -export class SqliteSpaceStorage extends SpaceStorage { - get connection() { - const docStore = this.get('doc'); - - if (!docStore) { - throw new Error('doc store not found'); - } - - const connection = docStore.connection; - - if (!(connection instanceof NativeDBConnection)) { - throw new Error('doc store connection is not a Sqlite connection'); - } - - return connection; - } - - async getDBPath() { - return this.connection.getDBPath(); - } - - async getWorkspaceName() { - const docStore = this.tryGet('doc'); - - if (!docStore) { - return null; - } - - const doc = await docStore.getDoc(docStore.spaceId); - if (!doc) { - return null; - } - - const ydoc = new YDoc(); - applyUpdate(ydoc, doc.bin); - return ydoc.getMap('meta').get('name') as string; - } - - async checkpoint() { - await this.connection.inner.checkpoint(); - } -} - -const STORE_CACHE = new Map(); - -process.on('beforeExit', () => { - STORE_CACHE.forEach(store => { - store.destroy().catch(err => { - logger.error('[nbstore] destroy store failed', err); - }); - }); -}); - -export function getStorage(universalId: string) { - return STORE_CACHE.get(universalId); -} - -export async function ensureStorage(universalId: string) { - const { peer, type, id } = parseUniversalId(universalId); - let store = STORE_CACHE.get(universalId); - - if (!store) { - const opts = { - peer, - type, - id, - }; - - store = new SqliteSpaceStorage([ - new SqliteDocStorage(opts), - new SqliteBlobStorage(opts), - new SqliteSyncStorage(opts), - ]); - - store.connect(); - - await store.waitForConnected(); - - STORE_CACHE.set(universalId, store); - } - - return store; -} diff --git a/packages/frontend/apps/electron/src/helper/nbstore/sync.ts b/packages/frontend/apps/electron/src/helper/nbstore/sync.ts deleted file mode 100644 index 2942371b59be2..0000000000000 --- a/packages/frontend/apps/electron/src/helper/nbstore/sync.ts +++ /dev/null @@ -1,70 +0,0 @@ -import { - BasicSyncStorage, - type DocClock, - type DocClocks, - share, -} from '@affine/nbstore'; - -import { NativeDBConnection } from './db'; - -export class SqliteSyncStorage extends BasicSyncStorage { - override connection = share( - new NativeDBConnection(this.peer, this.spaceType, this.spaceId) - ); - - get db() { - return this.connection.inner; - } - - override async getPeerRemoteClocks(peer: string) { - const records = await this.db.getPeerRemoteClocks(peer); - return records.reduce((clocks, { docId, timestamp }) => { - clocks[docId] = timestamp; - return clocks; - }, {} as DocClocks); - } - - override async getPeerRemoteClock(peer: string, docId: string) { - return this.db.getPeerRemoteClock(peer, docId); - } - - override async setPeerRemoteClock(peer: string, clock: DocClock) { - await this.db.setPeerRemoteClock(peer, clock.docId, clock.timestamp); - } - - override async getPeerPulledRemoteClock(peer: string, docId: string) { - return this.db.getPeerPulledRemoteClock(peer, docId); - } - - override async getPeerPulledRemoteClocks(peer: string) { - const records = await this.db.getPeerPulledRemoteClocks(peer); - return records.reduce((clocks, { docId, timestamp }) => { - clocks[docId] = timestamp; - return clocks; - }, {} as DocClocks); - } - - override async setPeerPulledRemoteClock(peer: string, clock: DocClock) { - await this.db.setPeerPulledRemoteClock(peer, clock.docId, clock.timestamp); - } - - override async getPeerPushedClocks(peer: string) { - const records = await this.db.getPeerPushedClocks(peer); - return records.reduce((clocks, { docId, timestamp }) => { - clocks[docId] = timestamp; - return clocks; - }, {} as DocClocks); - } - - override async getPeerPushedClock(peer: string, docId: string) { - return this.db.getPeerPushedClock(peer, docId); - } - - override async setPeerPushedClock(peer: string, clock: DocClock) { - await this.db.setPeerPushedClock(peer, clock.docId, clock.timestamp); - } - - override async clearClocks() { - await this.db.clearClocks(); - } -} diff --git a/packages/frontend/apps/ios/App/App.xcodeproj/project.pbxproj b/packages/frontend/apps/ios/App/App.xcodeproj/project.pbxproj index 51d8e8e925002..32d212cfdd512 100644 --- a/packages/frontend/apps/ios/App/App.xcodeproj/project.pbxproj +++ b/packages/frontend/apps/ios/App/App.xcodeproj/project.pbxproj @@ -13,6 +13,7 @@ 50A285D72D112A5E000D5A6D /* Localizable.xcstrings in Resources */ = {isa = PBXBuildFile; fileRef = 50A285D62D112A5E000D5A6D /* Localizable.xcstrings */; }; 50A285D82D112A5E000D5A6D /* InfoPlist.xcstrings in Resources */ = {isa = PBXBuildFile; fileRef = 50A285D52D112A5E000D5A6D /* InfoPlist.xcstrings */; }; 50A285DC2D112B24000D5A6D /* Intelligents in Frameworks */ = {isa = PBXBuildFile; productRef = 50A285DB2D112B24000D5A6D /* Intelligents */; }; + 9D52FC432D26CDBF00105D0A /* JSValueContainerExt.swift in Sources */ = {isa = PBXBuildFile; fileRef = 9D52FC422D26CDB600105D0A /* JSValueContainerExt.swift */; }; 9D6A85332CCF6DA700DAB35F /* HashcashPlugin.swift in Sources */ = {isa = PBXBuildFile; fileRef = 9D6A85322CCF6DA700DAB35F /* HashcashPlugin.swift */; }; 9D90BE252CCB9876006677DB /* CookieManager.swift in Sources */ = {isa = PBXBuildFile; fileRef = 9D90BE172CCB9876006677DB /* CookieManager.swift */; }; 9D90BE262CCB9876006677DB /* CookiePlugin.swift in Sources */ = {isa = PBXBuildFile; fileRef = 9D90BE182CCB9876006677DB /* CookiePlugin.swift */; }; @@ -23,8 +24,8 @@ 9D90BE2B2CCB9876006677DB /* config.xml in Resources */ = {isa = PBXBuildFile; fileRef = 9D90BE1F2CCB9876006677DB /* config.xml */; }; 9D90BE2D2CCB9876006677DB /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 9D90BE222CCB9876006677DB /* Main.storyboard */; }; 9D90BE2E2CCB9876006677DB /* public in Resources */ = {isa = PBXBuildFile; fileRef = 9D90BE232CCB9876006677DB /* public */; }; + 9DFCD1462D27D1D70028C92B /* libaffine_mobile_native.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 9DFCD1452D27D1D70028C92B /* libaffine_mobile_native.a */; }; C4C413792CBE705D00337889 /* Pods_App.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = AF277DCFFFF123FFC6DF26C7 /* Pods_App.framework */; }; - C4C97C6E2D0304D100BC2AD1 /* libaffine_mobile_native.a in Frameworks */ = {isa = PBXBuildFile; fileRef = C4C97C6D2D0304D100BC2AD1 /* libaffine_mobile_native.a */; }; C4C97C7C2D030BE000BC2AD1 /* affine_mobile_native.swift in Sources */ = {isa = PBXBuildFile; fileRef = C4C97C6F2D0307B700BC2AD1 /* affine_mobile_native.swift */; }; C4C97C7D2D030BE000BC2AD1 /* affine_mobile_nativeFFI.h in Sources */ = {isa = PBXBuildFile; fileRef = C4C97C702D0307B700BC2AD1 /* affine_mobile_nativeFFI.h */; }; C4C97C7E2D030BE000BC2AD1 /* affine_mobile_nativeFFI.modulemap in Sources */ = {isa = PBXBuildFile; fileRef = C4C97C712D0307B700BC2AD1 /* affine_mobile_nativeFFI.modulemap */; }; @@ -39,6 +40,7 @@ 50802D5E2D112F7D00694021 /* Intelligents */ = {isa = PBXFileReference; lastKnownFileType = wrapper; path = Intelligents; sourceTree = ""; }; 50A285D52D112A5E000D5A6D /* InfoPlist.xcstrings */ = {isa = PBXFileReference; lastKnownFileType = text.json.xcstrings; path = InfoPlist.xcstrings; sourceTree = ""; }; 50A285D62D112A5E000D5A6D /* Localizable.xcstrings */ = {isa = PBXFileReference; lastKnownFileType = text.json.xcstrings; path = Localizable.xcstrings; sourceTree = ""; }; + 9D52FC422D26CDB600105D0A /* JSValueContainerExt.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = JSValueContainerExt.swift; sourceTree = ""; }; 9D6A85322CCF6DA700DAB35F /* HashcashPlugin.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = HashcashPlugin.swift; sourceTree = ""; }; 9D90BE172CCB9876006677DB /* CookieManager.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CookieManager.swift; sourceTree = ""; }; 9D90BE182CCB9876006677DB /* CookiePlugin.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CookiePlugin.swift; sourceTree = ""; }; @@ -50,10 +52,10 @@ 9D90BE202CCB9876006677DB /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; 9D90BE212CCB9876006677DB /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; 9D90BE232CCB9876006677DB /* public */ = {isa = PBXFileReference; lastKnownFileType = folder; path = public; sourceTree = ""; }; + 9DFCD1452D27D1D70028C92B /* libaffine_mobile_native.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libaffine_mobile_native.a; sourceTree = ""; }; AF277DCFFFF123FFC6DF26C7 /* Pods_App.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_App.framework; sourceTree = BUILT_PRODUCTS_DIR; }; AF51FD2D460BCFE21FA515B2 /* Pods-App.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-App.release.xcconfig"; path = "Pods/Target Support Files/Pods-App/Pods-App.release.xcconfig"; sourceTree = ""; }; - C4C97C6B2D03027900BC2AD1 /* libaffine_mobile_native.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libaffine_mobile_native.a; path = "../../../../../target/aarch64-apple-ios-sim/release/libaffine_mobile_native.a"; sourceTree = ""; }; - C4C97C6D2D0304D100BC2AD1 /* libaffine_mobile_native.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libaffine_mobile_native.a; sourceTree = ""; }; + C4C97C6B2D03027900BC2AD1 /* libaffine_mobile_native.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libaffine_mobile_native.a; path = "../../../../../target/aarch64-apple-ios-sim/debug/libaffine_mobile_native.a"; sourceTree = ""; }; C4C97C6F2D0307B700BC2AD1 /* affine_mobile_native.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = affine_mobile_native.swift; sourceTree = ""; }; C4C97C702D0307B700BC2AD1 /* affine_mobile_nativeFFI.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = affine_mobile_nativeFFI.h; sourceTree = ""; }; C4C97C712D0307B700BC2AD1 /* affine_mobile_nativeFFI.modulemap */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.module-map"; path = affine_mobile_nativeFFI.modulemap; sourceTree = ""; }; @@ -70,8 +72,8 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - C4C97C6E2D0304D100BC2AD1 /* libaffine_mobile_native.a in Frameworks */, 50A285DC2D112B24000D5A6D /* Intelligents in Frameworks */, + 9DFCD1462D27D1D70028C92B /* libaffine_mobile_native.a in Frameworks */, 50802D612D112F8700694021 /* Intelligents in Frameworks */, C4C413792CBE705D00337889 /* Pods_App.framework in Frameworks */, ); @@ -84,8 +86,8 @@ isa = PBXGroup; children = ( C4C97C6B2D03027900BC2AD1 /* libaffine_mobile_native.a */, - C4C97C6D2D0304D100BC2AD1 /* libaffine_mobile_native.a */, AF277DCFFFF123FFC6DF26C7 /* Pods_App.framework */, + 9DFCD1452D27D1D70028C92B /* libaffine_mobile_native.a */, ); name = Frameworks; sourceTree = ""; @@ -154,6 +156,7 @@ 9D90BE242CCB9876006677DB /* App */ = { isa = PBXGroup; children = ( + 9D52FC422D26CDB600105D0A /* JSValueContainerExt.swift */, 9D90BE1A2CCB9876006677DB /* Plugins */, 9D90BE1C2CCB9876006677DB /* AppDelegate.swift */, 507513692D1924C600AD60C0 /* RootViewController.swift */, @@ -325,6 +328,7 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9D52FC432D26CDBF00105D0A /* JSValueContainerExt.swift in Sources */, 5075136E2D1925BC00AD60C0 /* IntelligentsPlugin.swift in Sources */, 5075136A2D1924C600AD60C0 /* RootViewController.swift in Sources */, C4C97C7C2D030BE000BC2AD1 /* affine_mobile_native.swift in Sources */, diff --git a/packages/frontend/apps/ios/App/App/JSValueContainerExt.swift b/packages/frontend/apps/ios/App/App/JSValueContainerExt.swift new file mode 100644 index 0000000000000..94827635b518e --- /dev/null +++ b/packages/frontend/apps/ios/App/App/JSValueContainerExt.swift @@ -0,0 +1,55 @@ +// +// JSValueContainerExt.swift +// App +// +// Created by EYHN on 2025/1/2. +// +import Capacitor + +enum RequestParamError: Error { + case request(key: String) +} + +extension JSValueContainer { + public func getStringEnsure(_ key: String) throws -> String { + guard let str = self.getString(key) else { + throw RequestParamError.request(key: key) + } + return str + } + + public func getIntEnsure(_ key: String) throws -> Int { + guard let int = self.getInt(key) else { + throw RequestParamError.request(key: key) + } + return int + } + + public func getDoubleEnsure(_ key: String) throws -> Double { + guard let doub = self.getDouble(key) else { + throw RequestParamError.request(key: key) + } + return doub + } + + public func getBoolEnsure(_ key: String) throws -> Bool { + guard let bool = self.getBool(key) else { + throw RequestParamError.request(key: key) + } + return bool + } + + public func getArrayEnsure(_ key: String) throws -> JSArray { + guard let arr = self.getArray(key) else { + throw RequestParamError.request(key: key) + } + return arr + } + + public func getArrayEnsure(_ key: String, _ ofType: T.Type) throws -> [T] { + guard let arr = self.getArray(key, ofType) else { + throw RequestParamError.request(key: key) + } + return arr + } +} diff --git a/packages/frontend/apps/ios/App/App/Plugins/NBStore/NBStorePlugin.swift b/packages/frontend/apps/ios/App/App/Plugins/NBStore/NBStorePlugin.swift index 706e1b5c09973..fd7120277ee7e 100644 --- a/packages/frontend/apps/ios/App/App/Plugins/NBStore/NBStorePlugin.swift +++ b/packages/frontend/apps/ios/App/App/Plugins/NBStore/NBStorePlugin.swift @@ -3,400 +3,475 @@ import Foundation @objc(NbStorePlugin) public class NbStorePlugin: CAPPlugin, CAPBridgedPlugin { - private let docStoragePool: DocStoragePool = .init(noPointer: DocStoragePool.NoPointer()) - - public let identifier = "NbStorePlugin" - public let jsName = "NbStoreDocStorage" - public let pluginMethods: [CAPPluginMethod] = [ - CAPPluginMethod(name: "getSpaceDBPath", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "connect", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "close", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "isClosed", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "checkpoint", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "validate", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "setSpaceId", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "pushUpdate", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "getDocSnapshot", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "setDocSnapshot", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "getDocUpdates", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "markUpdatesMerged", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "deleteDoc", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "getDocClocks", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "getDocClock", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "getBlob", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "setBlob", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "deleteBlob", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "releaseBlobs", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "listBlobs", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "getPeerRemoteClocks", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "getPeerRemoteClock", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "setPeerRemoteClock", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "getPeerPulledRemoteClocks", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "getPeerPulledRemoteClock", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "setPeerPulledRemoteClock", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "getPeerPushedClocks", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "setPeerPushedClock", returnType: CAPPluginReturnPromise), - CAPPluginMethod(name: "clearClocks", returnType: CAPPluginReturnPromise), - ] - - @objc func getSpaceDBPath(_ call: CAPPluginCall) { - let peer = call.getString("peer") ?? "" - let spaceType = call.getString("spaceType") ?? "" - let id = call.getString("id") ?? "" - - do { - let path = try getDbPath(peer: peer, spaceType: spaceType, id: id) - call.resolve(["path": path]) - } catch { - call.reject("Failed to get space DB path", nil, error) + private let docStoragePool: DocStoragePool = newDocStoragePool() + + public let identifier = "NbStorePlugin" + public let jsName = "NbStoreDocStorage" + public let pluginMethods: [CAPPluginMethod] = [ + CAPPluginMethod(name: "connect", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "disconnect", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "setSpaceId", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "pushUpdate", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "getDocSnapshot", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "setDocSnapshot", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "getDocUpdates", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "markUpdatesMerged", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "deleteDoc", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "getDocClocks", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "getDocClock", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "getBlob", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "setBlob", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "deleteBlob", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "releaseBlobs", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "listBlobs", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "getPeerRemoteClocks", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "getPeerRemoteClock", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "setPeerRemoteClock", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "getPeerPulledRemoteClocks", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "getPeerPulledRemoteClock", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "setPeerPulledRemoteClock", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "getPeerPushedClocks", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "setPeerPushedClock", returnType: CAPPluginReturnPromise), + CAPPluginMethod(name: "clearClocks", returnType: CAPPluginReturnPromise), + ] + + @objc func connect(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let spaceId = try call.getStringEnsure("spaceId") + let spaceType = try call.getStringEnsure("spaceType") + let peer = try call.getStringEnsure("peer") + guard let documentDir = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else { + call.reject("Failed to get document file urls") + return } + let peerDir = documentDir.appending(path: "workspaces") + .appending(path: spaceType) + .appending(path: + peer + .replacing(#/[\/!@#$%^&*()+~`"':;,?<>|]/#, with: "_") + .replacing(/_+/, with: "_") + .replacing(/_+$/, with: "")) + try FileManager.default.createDirectory(atPath: peerDir.path(), withIntermediateDirectories: true) + let db = peerDir.appending(path: spaceId + ".db") + try await docStoragePool.connect(universalId: id, path: db.path()) + call.resolve() + } catch { + call.reject("Failed to connect storage, \(error)", nil, error) + } } - - @objc func connect(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - try? await docStoragePool.connect(universalId: id) - } - - @objc func close(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - try? await docStoragePool.close(universalId: id) - } - - @objc func isClosed(_ call: CAPPluginCall) { - let id = call.getString("id") ?? "" - call.resolve(["isClosed": docStoragePool.isClosed(universalId: id)]) - } - - @objc func checkpoint(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - try? await docStoragePool.checkpoint(universalId: id) + } + + @objc func disconnect(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + try await docStoragePool.disconnect(universalId: id) + call.resolve() + } catch { + call.reject("Failed to disconnect, \(error)", nil, error) + } } - - @objc func validate(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let validate = (try? await docStoragePool.validate(universalId: id)) ?? false - call.resolve(["isValidate": validate]) + } + + @objc func setSpaceId(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let spaceId = try call.getStringEnsure("spaceId") + try await docStoragePool.setSpaceId(universalId: id, spaceId: spaceId) + call.resolve() + } catch { + call.reject("Failed to set space id, \(error)", nil, error) + } } - - @objc func setSpaceId(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let spaceId = call.getString("spaceId") ?? "" - do { - try await docStoragePool.setSpaceId(universalId: id, spaceId: spaceId) - call.resolve() - } catch { - call.reject("Failed to set space id", nil, error) - } + } + + @objc func pushUpdate(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let docId = try call.getStringEnsure("docId") + let data = try call.getStringEnsure("data") + let timestamp = try await docStoragePool.pushUpdate(universalId: id, docId: docId, update: data) + call.resolve(["timestamp": timestamp]) + } catch { + call.reject("Failed to push update, \(error)", nil, error) + } } - - @objc func pushUpdate(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let docId = call.getString("docId") ?? "" - let data = call.getString("data") ?? "" - do { - let timestamp = try await docStoragePool.pushUpdate(universalId: id, docId: docId, update: data) - call.resolve(["timestamp": timestamp.timeIntervalSince1970]) - - } catch { - call.reject("Failed to push update", nil, error) - } - } - - @objc func getDocSnapshot(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let docId = call.getString("docId") ?? "" - do { - if let record = try await docStoragePool.getDocSnapshot(universalId: id, docId: docId) { - call.resolve([ - "docId": record.docId, - "data": record.data, - "timestamp": record.timestamp.timeIntervalSince1970, - ]) - } else { - call.resolve() - } - } catch { - call.reject("Failed to get doc snapshot", nil, error) + } + + @objc func getDocSnapshot(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let docId = try call.getStringEnsure("docId") + + if let record = try await docStoragePool.getDocSnapshot(universalId: id, docId: docId) { + call.resolve([ + "docId": record.docId, + "bin": record.bin, + "timestamp": record.timestamp, + ]) + } else { + call.resolve() } + } catch { + call.reject("Failed to get doc snapshot, \(error)", nil, error) + } } - - @objc func setDocSnapshot(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let docId = call.getString("docId") ?? "" - let data = call.getString("data") ?? "" - let timestamp = Date() - do { - let success = try await docStoragePool.setDocSnapshot( - universalId: id, - snapshot: DocRecord(docId: docId, data: data, timestamp: timestamp) - ) - call.resolve(["success": success]) - } catch { - call.reject("Failed to set doc snapshot", nil, error) - } + } + + @objc func setDocSnapshot(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let docId = try call.getStringEnsure("docId") + let bin = try call.getStringEnsure("bin") + let timestamp = try call.getIntEnsure("timestamp") + let success = try await docStoragePool.setDocSnapshot( + universalId: id, + snapshot: DocRecord(docId: docId, bin: bin, timestamp: Int64(timestamp)) + ) + call.resolve(["success": success]) + } catch { + call.reject("Failed to set doc snapshot, \(error)", nil, error) + } } - - @objc func getDocUpdates(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let docId = call.getString("docId") ?? "" - do { - let updates = try await docStoragePool.getDocUpdates(universalId: id, docId: docId) - let mapped = updates.map { [ - "docId": $0.docId, - "createdAt": $0.createdAt.timeIntervalSince1970, - "data": $0.data, - ] } - call.resolve(["updates": mapped]) - } catch { - call.reject("Failed to get doc updates", nil, error) - } + } + + @objc func getDocUpdates(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let docId = try call.getStringEnsure("docId") + let updates = try await docStoragePool.getDocUpdates(universalId: id, docId: docId) + let mapped = updates.map { [ + "docId": $0.docId, + "timestamp": $0.timestamp, + "bin": $0.bin, + ] } + call.resolve(["updates": mapped]) + } catch { + call.reject("Failed to get doc updates, \(error)", nil, error) + } } - - @objc func markUpdatesMerged(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let docId = call.getString("docId") ?? "" - let times = call.getArray("timestamps", Double.self) ?? [] - let dateArray = times.map { Date(timeIntervalSince1970: $0) } - do { - let count = try await docStoragePool.markUpdatesMerged(universalId: id, docId: docId, updates: dateArray) - call.resolve(["count": count]) - } catch { - call.reject("Failed to mark updates merged", nil, error) - } + } + + @objc func markUpdatesMerged(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let docId = try call.getStringEnsure("docId") + let times = try call.getArrayEnsure("timestamps", Int64.self) + + let count = try await docStoragePool.markUpdatesMerged(universalId: id, docId: docId, updates: times) + call.resolve(["count": count]) + } catch { + call.reject("Failed to mark updates merged, \(error)", nil, error) + } } - - @objc func deleteDoc(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let docId = call.getString("docId") ?? "" - do { - try await docStoragePool.deleteDoc(universalId: id, docId: docId) - call.resolve() - } catch { - call.reject("Failed to delete doc", nil, error) - } + } + + @objc func deleteDoc(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let docId = try call.getStringEnsure("docId") + + try await docStoragePool.deleteDoc(universalId: id, docId: docId) + call.resolve() + } catch { + call.reject("Failed to delete doc, \(error)", nil, error) + } } - - @objc func getDocClocks(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" + } + + @objc func getDocClocks(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") let after = call.getInt("after") - do { - let docClocks = try await docStoragePool.getDocClocks( - universalId: id, - after: after != nil ? Date(timeIntervalSince1970: TimeInterval(after!)) : nil - ) - let mapped = docClocks.map { [ - "docId": $0.docId, - "timestamp": $0.timestamp.timeIntervalSince1970, - ] } - call.resolve(["clocks": mapped]) - } catch { - call.reject("Failed to get doc clocks", nil, error) - } + + let docClocks = try await docStoragePool.getDocClocks( + universalId: id, + after: after != nil ? Int64(after!) : nil + ) + let mapped = docClocks.map { [ + "docId": $0.docId, + "timestamp": $0.timestamp, + ] } + call.resolve(["clocks": mapped]) + } catch { + call.reject("Failed to get doc clocks, \(error)", nil, error) + } } - - @objc func getDocClock(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let docId = call.getString("docId") ?? "" - do { - if let docClock = try await docStoragePool.getDocClock(universalId: id, docId: docId) { - call.resolve([ - "docId": docClock.docId, - "timestamp": docClock.timestamp.timeIntervalSince1970, - ]) - } else { - call.resolve() - } - } catch { - call.reject("Failed to get doc clock for docId: \(docId)", nil, error) + } + + @objc func getDocClock(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let docId = try call.getStringEnsure("docId") + if let docClock = try await docStoragePool.getDocClock(universalId: id, docId: docId) { + call.resolve([ + "docId": docClock.docId, + "timestamp": docClock.timestamp, + ]) + } else { + call.resolve() } + } catch { + call.reject("Failed to get doc clock, \(error)", nil, error) + } } - - @objc func getBlob(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let key = call.getString("key") ?? "" - if let blob = try? await docStoragePool.getBlob(universalId: id, key: key) { - call.resolve(["blob": blob]) + } + + @objc func getBlob(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let key = try call.getStringEnsure("key") + if let blob = try await docStoragePool.getBlob(universalId: id, key: key) { + call.resolve(["blob":[ + "key": blob.key, + "data": blob.data, + "mime": blob.mime, + "size": blob.size, + "createdAt": blob.createdAt + ]]) } else { - call.resolve() + call.resolve() } + } catch { + call.reject("Failed to get blob, \(error)", nil, error) + } } - - @objc func setBlob(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let key = call.getString("key") ?? "" - let data = call.getString("data") ?? "" - let mime = call.getString("mime") ?? "" - try? await docStoragePool.setBlob(universalId: id, blob: SetBlob(key: key, data: data, mime: mime)) + } + + @objc func setBlob(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let key = try call.getStringEnsure("key") + let data = try call.getStringEnsure("data") + let mime = try call.getStringEnsure("mime") + try await docStoragePool.setBlob(universalId: id, blob: SetBlob(key: key, data: data, mime: mime)) + call.resolve() + } catch { + call.reject("Failed to set blob, \(error)", nil, error) + } } - - @objc func deleteBlob(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let key = call.getString("key") ?? "" + } + + @objc func deleteBlob(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let key = try call.getStringEnsure("key") let permanently = call.getBool("permanently") ?? false - try? await docStoragePool.deleteBlob(universalId: id, key: key, permanently: permanently) + try await docStoragePool.deleteBlob(universalId: id, key: key, permanently: permanently) + call.resolve() + } catch { + call.reject("Failed to delete blob, \(error)", nil, error) + } } - - @objc func releaseBlobs(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - try? await docStoragePool.releaseBlobs(universalId: id) + } + + @objc func releaseBlobs(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + try await docStoragePool.releaseBlobs(universalId: id) + call.resolve() + } catch { + call.reject("Failed to release blobs, \(error)", nil, error) + } } - - @objc func listBlobs(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - if let blobs = try? await docStoragePool.listBlobs(universalId: id) { - let mapped = blobs.map { [ - "key": $0.key, - "size": $0.size, - "mime": $0.mime, - "createdAt": $0.createdAt.timeIntervalSince1970, - ] } - call.resolve(["blobs": mapped]) - } else { - call.resolve() - } + } + + @objc func listBlobs(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let blobs = try await docStoragePool.listBlobs(universalId: id) + let mapped = blobs.map { [ + "key": $0.key, + "size": $0.size, + "mime": $0.mime, + "createdAt": $0.createdAt, + ] } + call.resolve(["blobs": mapped]) + } catch { + call.reject("Failed to list blobs, \(error)", nil, error) + } } - - @objc func getPeerRemoteClocks(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let peer = call.getString("peer") ?? "" - do { - let clocks = try await docStoragePool.getPeerRemoteClocks(universalId: id, peer: peer) - let mapped = clocks.map { [ - "docId": $0.docId, - "timestamp": $0.timestamp.timeIntervalSince1970, - ] } - call.resolve(["clocks": mapped]) - - } catch { - call.reject("Failed to get peer remote clocks", nil, error) - } + } + + @objc func getPeerRemoteClocks(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let peer = try call.getStringEnsure("peer") + + let clocks = try await docStoragePool.getPeerRemoteClocks(universalId: id, peer: peer) + let mapped = clocks.map { [ + "docId": $0.docId, + "timestamp": $0.timestamp, + ] } + call.resolve(["clocks": mapped]) + } catch { + call.reject("Failed to get peer remote clocks, \(error)", nil, error) + } } - - @objc func getPeerRemoteClock(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let peer = call.getString("peer") ?? "" - let docId = call.getString("docId") ?? "" - do { - let clock = try await docStoragePool.getPeerRemoteClock(universalId: id, peer: peer, docId: docId) - call.resolve([ - "docId": clock.docId, - "timestamp": clock.timestamp.timeIntervalSince1970, - ]) - - } catch { - call.reject("Failed to get peer remote clock", nil, error) - } + } + + @objc func getPeerRemoteClock(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let peer = try call.getStringEnsure("peer") + let docId = try call.getStringEnsure("docId") + + let clock = try await docStoragePool.getPeerRemoteClock(universalId: id, peer: peer, docId: docId) + call.resolve([ + "docId": clock.docId, + "timestamp": clock.timestamp, + ]) + + } catch { + call.reject("Failed to get peer remote clock, \(error)", nil, error) + } } - - @objc func setPeerRemoteClock(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let peer = call.getString("peer") ?? "" - let docId = call.getString("docId") ?? "" - let timestamp = call.getDouble("timestamp") ?? 0 - do { - try await docStoragePool.setPeerRemoteClock( - universalId: id, - peer: peer, - docId: docId, - clock: Date(timeIntervalSince1970: timestamp) - ) - call.resolve() - } catch { - call.reject("Failed to set peer remote clock", nil, error) - } + } + + @objc func setPeerRemoteClock(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let peer = try call.getStringEnsure("peer") + let docId = try call.getStringEnsure("docId") + let timestamp = try call.getIntEnsure("timestamp") + try await docStoragePool.setPeerRemoteClock( + universalId: id, + peer: peer, + docId: docId, + clock: Int64(timestamp) + ) + call.resolve() + } catch { + call.reject("Failed to set peer remote clock, \(error)", nil, error) + } } - - @objc func getPeerPulledRemoteClocks(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let peer = call.getString("peer") ?? "" - do { - let clocks = try await docStoragePool.getPeerPulledRemoteClocks(universalId: id, peer: peer) - let mapped = clocks.map { [ - "docId": $0.docId, - "timestamp": $0.timestamp.timeIntervalSince1970, - ] } - call.resolve(["clocks": mapped]) - - } catch { - call.reject("Failed to get peer pulled remote clocks", nil, error) - } + } + + @objc func getPeerPulledRemoteClocks(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let peer = try call.getStringEnsure("peer") + + let clocks = try await docStoragePool.getPeerPulledRemoteClocks(universalId: id, peer: peer) + let mapped = clocks.map { [ + "docId": $0.docId, + "timestamp": $0.timestamp, + ] } + call.resolve(["clocks": mapped]) + } catch { + call.reject("Failed to get peer pulled remote clocks, \(error)", nil, error) + } } - - @objc func getPeerPulledRemoteClock(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let peer = call.getString("peer") ?? "" - let docId = call.getString("docId") ?? "" - do { - let clock = try await docStoragePool.getPeerPulledRemoteClock(universalId: id, peer: peer, docId: docId) - call.resolve([ - "docId": clock.docId, - "timestamp": clock.timestamp.timeIntervalSince1970, - ]) - - } catch { - call.reject("Failed to get peer pulled remote clock", nil, error) - } + } + + @objc func getPeerPulledRemoteClock(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let peer = try call.getStringEnsure("peer") + let docId = try call.getStringEnsure("docId") + + let clock = try await docStoragePool.getPeerPulledRemoteClock(universalId: id, peer: peer, docId: docId) + call.resolve([ + "docId": clock.docId, + "timestamp": clock.timestamp, + ]) + + } catch { + call.reject("Failed to get peer pulled remote clock, \(error)", nil, error) + } } - - @objc func setPeerPulledRemoteClock(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let peer = call.getString("peer") ?? "" - let docId = call.getString("docId") ?? "" - let timestamp = call.getDouble("timestamp") ?? 0 - do { - try await docStoragePool.setPeerPulledRemoteClock( - universalId: id, - peer: peer, - docId: docId, - clock: Date(timeIntervalSince1970: timestamp) - ) - call.resolve() - } catch { - call.reject("Failed to set peer pulled remote clock", nil, error) - } + } + + @objc func setPeerPulledRemoteClock(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let peer = try call.getStringEnsure("peer") + let docId = try call.getStringEnsure("docId") + let timestamp = try call.getIntEnsure("timestamp") + + try await docStoragePool.setPeerPulledRemoteClock( + universalId: id, + peer: peer, + docId: docId, + clock: Int64(timestamp) + ) + call.resolve() + } catch { + call.reject("Failed to set peer pulled remote clock, \(error)", nil, error) + } } - - @objc func getPeerPushedClocks(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let peer = call.getString("peer") ?? "" - do { - let clocks = try await docStoragePool.getPeerPushedClocks(universalId: id, peer: peer) - let mapped = clocks.map { [ - "docId": $0.docId, - "timestamp": $0.timestamp.timeIntervalSince1970, - ] } - call.resolve(["clocks": mapped]) - - } catch { - call.reject("Failed to get peer pushed clocks", nil, error) - } + } + + @objc func getPeerPushedClocks(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let peer = try call.getStringEnsure("peer") + let clocks = try await docStoragePool.getPeerPushedClocks(universalId: id, peer: peer) + let mapped = clocks.map { [ + "docId": $0.docId, + "timestamp": $0.timestamp, + ] } + call.resolve(["clocks": mapped]) + + } catch { + call.reject("Failed to get peer pushed clocks, \(error)", nil, error) + } } - - @objc func setPeerPushedClock(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - let peer = call.getString("peer") ?? "" - let docId = call.getString("docId") ?? "" - let timestamp = call.getDouble("timestamp") ?? 0 - do { - try await docStoragePool.setPeerPushedClock( - universalId: id, - peer: peer, - docId: docId, - clock: Date(timeIntervalSince1970: timestamp) - ) - call.resolve() - } catch { - call.reject("Failed to set peer pushed clock", nil, error) - } + } + + @objc func setPeerPushedClock(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + let peer = try call.getStringEnsure("peer") + let docId = try call.getStringEnsure("docId") + let timestamp = try call.getIntEnsure("timestamp") + + try await docStoragePool.setPeerPushedClock( + universalId: id, + peer: peer, + docId: docId, + clock: Int64(timestamp) + ) + call.resolve() + } catch { + call.reject("Failed to set peer pushed clock, \(error)", nil, error) + } } - - @objc func clearClocks(_ call: CAPPluginCall) async { - let id = call.getString("id") ?? "" - do { - try await docStoragePool.clearClocks(universalId: id) - call.resolve() - } catch { - call.reject("Failed to clear clocks", nil, error) - } + } + + @objc func clearClocks(_ call: CAPPluginCall) { + Task { + do { + let id = try call.getStringEnsure("id") + try await docStoragePool.clearClocks(universalId: id) + call.resolve() + } catch { + call.reject("Failed to clear clocks, \(error)", nil, error) + } } + } } diff --git a/packages/frontend/apps/ios/App/App/uniffi/affine_mobile_native.swift b/packages/frontend/apps/ios/App/App/uniffi/affine_mobile_native.swift index f318d6d36f748..c96cec1083593 100644 --- a/packages/frontend/apps/ios/App/App/uniffi/affine_mobile_native.swift +++ b/packages/frontend/apps/ios/App/App/uniffi/affine_mobile_native.swift @@ -492,65 +492,25 @@ private struct FfiConverterString: FfiConverter { } } -#if swift(>=5.8) - @_documentation(visibility: private) -#endif -private struct FfiConverterTimestamp: FfiConverterRustBuffer { - typealias SwiftType = Date - - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Date { - let seconds: Int64 = try readInt(&buf) - let nanoseconds: UInt32 = try readInt(&buf) - if seconds >= 0 { - let delta = Double(seconds) + (Double(nanoseconds) / 1.0e9) - return Date(timeIntervalSince1970: delta) - } else { - let delta = Double(seconds) - (Double(nanoseconds) / 1.0e9) - return Date(timeIntervalSince1970: delta) - } - } - - public static func write(_ value: Date, into buf: inout [UInt8]) { - var delta = value.timeIntervalSince1970 - var sign: Int64 = 1 - if delta < 0 { - // The nanoseconds portion of the epoch offset must always be - // positive, to simplify the calculation we will use the absolute - // value of the offset. - sign = -1 - delta = -delta - } - if delta.rounded(.down) > Double(Int64.max) { - fatalError("Timestamp overflow, exceeds max bounds supported by Uniffi") - } - let seconds = Int64(delta) - let nanoseconds = UInt32((delta - Double(seconds)) * 1.0e9) - writeInt(&buf, sign * seconds) - writeInt(&buf, nanoseconds) - } -} - public protocol DocStoragePoolProtocol: AnyObject { - func checkpoint(universalId: String) async throws - func clearClocks(universalId: String) async throws - func close(universalId: String) async throws - /** * Initialize the database and run migrations. */ - func connect(universalId: String) async throws + func connect(universalId: String, path: String) async throws func deleteBlob(universalId: String, key: String, permanently: Bool) async throws func deleteDoc(universalId: String, docId: String) async throws + func disconnect(universalId: String) async throws + func getBlob(universalId: String, key: String) async throws -> Blob? func getDocClock(universalId: String, docId: String) async throws -> DocClock? - func getDocClocks(universalId: String, after: Date?) async throws -> [DocClock] + func getDocClocks(universalId: String, after: Int64?) async throws -> [DocClock] func getDocSnapshot(universalId: String, docId: String) async throws -> DocRecord? @@ -566,13 +526,11 @@ public protocol DocStoragePoolProtocol: AnyObject { func getPeerRemoteClocks(universalId: String, peer: String) async throws -> [DocClock] - func isClosed(universalId: String) -> Bool - func listBlobs(universalId: String) async throws -> [ListedBlob] - func markUpdatesMerged(universalId: String, docId: String, updates: [Date]) async throws -> UInt32 + func markUpdatesMerged(universalId: String, docId: String, updates: [Int64]) async throws -> UInt32 - func pushUpdate(universalId: String, docId: String, update: String) async throws -> Date + func pushUpdate(universalId: String, docId: String, update: String) async throws -> Int64 func releaseBlobs(universalId: String) async throws @@ -580,15 +538,13 @@ public protocol DocStoragePoolProtocol: AnyObject { func setDocSnapshot(universalId: String, snapshot: DocRecord) async throws -> Bool - func setPeerPulledRemoteClock(universalId: String, peer: String, docId: String, clock: Date) async throws + func setPeerPulledRemoteClock(universalId: String, peer: String, docId: String, clock: Int64) async throws - func setPeerPushedClock(universalId: String, peer: String, docId: String, clock: Date) async throws + func setPeerPushedClock(universalId: String, peer: String, docId: String, clock: Int64) async throws - func setPeerRemoteClock(universalId: String, peer: String, docId: String, clock: Date) async throws + func setPeerRemoteClock(universalId: String, peer: String, docId: String, clock: Int64) async throws func setSpaceId(universalId: String, spaceId: String) async throws - - func validate(universalId: String) async throws -> Bool } open class DocStoragePool: @@ -640,23 +596,6 @@ open class DocStoragePool: try! rustCall { uniffi_affine_mobile_native_fn_free_docstoragepool(pointer, $0) } } - open func checkpoint(universalId: String) async throws { - return - try await uniffiRustCallAsync( - rustFutureFunc: { - uniffi_affine_mobile_native_fn_method_docstoragepool_checkpoint( - self.uniffiClonePointer(), - FfiConverterString.lower(universalId) - ) - }, - pollFunc: ffi_affine_mobile_native_rust_future_poll_void, - completeFunc: ffi_affine_mobile_native_rust_future_complete_void, - freeFunc: ffi_affine_mobile_native_rust_future_free_void, - liftFunc: { $0 }, - errorHandler: FfiConverterTypeUniffiError.lift - ) - } - open func clearClocks(universalId: String) async throws { return try await uniffiRustCallAsync( @@ -674,13 +613,16 @@ open class DocStoragePool: ) } - open func close(universalId: String) async throws { + /** + * Initialize the database and run migrations. + */ + open func connect(universalId: String, path: String) async throws { return try await uniffiRustCallAsync( rustFutureFunc: { - uniffi_affine_mobile_native_fn_method_docstoragepool_close( + uniffi_affine_mobile_native_fn_method_docstoragepool_connect( self.uniffiClonePointer(), - FfiConverterString.lower(universalId) + FfiConverterString.lower(universalId), FfiConverterString.lower(path) ) }, pollFunc: ffi_affine_mobile_native_rust_future_poll_void, @@ -691,16 +633,13 @@ open class DocStoragePool: ) } - /** - * Initialize the database and run migrations. - */ - open func connect(universalId: String) async throws { + open func deleteBlob(universalId: String, key: String, permanently: Bool) async throws { return try await uniffiRustCallAsync( rustFutureFunc: { - uniffi_affine_mobile_native_fn_method_docstoragepool_connect( + uniffi_affine_mobile_native_fn_method_docstoragepool_delete_blob( self.uniffiClonePointer(), - FfiConverterString.lower(universalId) + FfiConverterString.lower(universalId), FfiConverterString.lower(key), FfiConverterBool.lower(permanently) ) }, pollFunc: ffi_affine_mobile_native_rust_future_poll_void, @@ -711,13 +650,13 @@ open class DocStoragePool: ) } - open func deleteBlob(universalId: String, key: String, permanently: Bool) async throws { + open func deleteDoc(universalId: String, docId: String) async throws { return try await uniffiRustCallAsync( rustFutureFunc: { - uniffi_affine_mobile_native_fn_method_docstoragepool_delete_blob( + uniffi_affine_mobile_native_fn_method_docstoragepool_delete_doc( self.uniffiClonePointer(), - FfiConverterString.lower(universalId), FfiConverterString.lower(key), FfiConverterBool.lower(permanently) + FfiConverterString.lower(universalId), FfiConverterString.lower(docId) ) }, pollFunc: ffi_affine_mobile_native_rust_future_poll_void, @@ -728,13 +667,13 @@ open class DocStoragePool: ) } - open func deleteDoc(universalId: String, docId: String) async throws { + open func disconnect(universalId: String) async throws { return try await uniffiRustCallAsync( rustFutureFunc: { - uniffi_affine_mobile_native_fn_method_docstoragepool_delete_doc( + uniffi_affine_mobile_native_fn_method_docstoragepool_disconnect( self.uniffiClonePointer(), - FfiConverterString.lower(universalId), FfiConverterString.lower(docId) + FfiConverterString.lower(universalId) ) }, pollFunc: ffi_affine_mobile_native_rust_future_poll_void, @@ -779,13 +718,13 @@ open class DocStoragePool: ) } - open func getDocClocks(universalId: String, after: Date?) async throws -> [DocClock] { + open func getDocClocks(universalId: String, after: Int64?) async throws -> [DocClock] { return try await uniffiRustCallAsync( rustFutureFunc: { uniffi_affine_mobile_native_fn_method_docstoragepool_get_doc_clocks( self.uniffiClonePointer(), - FfiConverterString.lower(universalId), FfiConverterOptionTimestamp.lower(after) + FfiConverterString.lower(universalId), FfiConverterOptionInt64.lower(after) ) }, pollFunc: ffi_affine_mobile_native_rust_future_poll_rust_buffer, @@ -915,13 +854,6 @@ open class DocStoragePool: ) } - open func isClosed(universalId: String) -> Bool { - return try! FfiConverterBool.lift(try! rustCall { - uniffi_affine_mobile_native_fn_method_docstoragepool_is_closed(self.uniffiClonePointer(), - FfiConverterString.lower(universalId), $0) - }) - } - open func listBlobs(universalId: String) async throws -> [ListedBlob] { return try await uniffiRustCallAsync( @@ -939,13 +871,13 @@ open class DocStoragePool: ) } - open func markUpdatesMerged(universalId: String, docId: String, updates: [Date]) async throws -> UInt32 { + open func markUpdatesMerged(universalId: String, docId: String, updates: [Int64]) async throws -> UInt32 { return try await uniffiRustCallAsync( rustFutureFunc: { uniffi_affine_mobile_native_fn_method_docstoragepool_mark_updates_merged( self.uniffiClonePointer(), - FfiConverterString.lower(universalId), FfiConverterString.lower(docId), FfiConverterSequenceTimestamp.lower(updates) + FfiConverterString.lower(universalId), FfiConverterString.lower(docId), FfiConverterSequenceInt64.lower(updates) ) }, pollFunc: ffi_affine_mobile_native_rust_future_poll_u32, @@ -956,7 +888,7 @@ open class DocStoragePool: ) } - open func pushUpdate(universalId: String, docId: String, update: String) async throws -> Date { + open func pushUpdate(universalId: String, docId: String, update: String) async throws -> Int64 { return try await uniffiRustCallAsync( rustFutureFunc: { @@ -965,10 +897,10 @@ open class DocStoragePool: FfiConverterString.lower(universalId), FfiConverterString.lower(docId), FfiConverterString.lower(update) ) }, - pollFunc: ffi_affine_mobile_native_rust_future_poll_rust_buffer, - completeFunc: ffi_affine_mobile_native_rust_future_complete_rust_buffer, - freeFunc: ffi_affine_mobile_native_rust_future_free_rust_buffer, - liftFunc: FfiConverterTimestamp.lift, + pollFunc: ffi_affine_mobile_native_rust_future_poll_i64, + completeFunc: ffi_affine_mobile_native_rust_future_complete_i64, + freeFunc: ffi_affine_mobile_native_rust_future_free_i64, + liftFunc: FfiConverterInt64.lift, errorHandler: FfiConverterTypeUniffiError.lift ) } @@ -1024,13 +956,13 @@ open class DocStoragePool: ) } - open func setPeerPulledRemoteClock(universalId: String, peer: String, docId: String, clock: Date) async throws { + open func setPeerPulledRemoteClock(universalId: String, peer: String, docId: String, clock: Int64) async throws { return try await uniffiRustCallAsync( rustFutureFunc: { uniffi_affine_mobile_native_fn_method_docstoragepool_set_peer_pulled_remote_clock( self.uniffiClonePointer(), - FfiConverterString.lower(universalId), FfiConverterString.lower(peer), FfiConverterString.lower(docId), FfiConverterTimestamp.lower(clock) + FfiConverterString.lower(universalId), FfiConverterString.lower(peer), FfiConverterString.lower(docId), FfiConverterInt64.lower(clock) ) }, pollFunc: ffi_affine_mobile_native_rust_future_poll_void, @@ -1041,13 +973,13 @@ open class DocStoragePool: ) } - open func setPeerPushedClock(universalId: String, peer: String, docId: String, clock: Date) async throws { + open func setPeerPushedClock(universalId: String, peer: String, docId: String, clock: Int64) async throws { return try await uniffiRustCallAsync( rustFutureFunc: { uniffi_affine_mobile_native_fn_method_docstoragepool_set_peer_pushed_clock( self.uniffiClonePointer(), - FfiConverterString.lower(universalId), FfiConverterString.lower(peer), FfiConverterString.lower(docId), FfiConverterTimestamp.lower(clock) + FfiConverterString.lower(universalId), FfiConverterString.lower(peer), FfiConverterString.lower(docId), FfiConverterInt64.lower(clock) ) }, pollFunc: ffi_affine_mobile_native_rust_future_poll_void, @@ -1058,13 +990,13 @@ open class DocStoragePool: ) } - open func setPeerRemoteClock(universalId: String, peer: String, docId: String, clock: Date) async throws { + open func setPeerRemoteClock(universalId: String, peer: String, docId: String, clock: Int64) async throws { return try await uniffiRustCallAsync( rustFutureFunc: { uniffi_affine_mobile_native_fn_method_docstoragepool_set_peer_remote_clock( self.uniffiClonePointer(), - FfiConverterString.lower(universalId), FfiConverterString.lower(peer), FfiConverterString.lower(docId), FfiConverterTimestamp.lower(clock) + FfiConverterString.lower(universalId), FfiConverterString.lower(peer), FfiConverterString.lower(docId), FfiConverterInt64.lower(clock) ) }, pollFunc: ffi_affine_mobile_native_rust_future_poll_void, @@ -1091,23 +1023,6 @@ open class DocStoragePool: errorHandler: FfiConverterTypeUniffiError.lift ) } - - open func validate(universalId: String) async throws -> Bool { - return - try await uniffiRustCallAsync( - rustFutureFunc: { - uniffi_affine_mobile_native_fn_method_docstoragepool_validate( - self.uniffiClonePointer(), - FfiConverterString.lower(universalId) - ) - }, - pollFunc: ffi_affine_mobile_native_rust_future_poll_i8, - completeFunc: ffi_affine_mobile_native_rust_future_complete_i8, - freeFunc: ffi_affine_mobile_native_rust_future_free_i8, - liftFunc: FfiConverterBool.lift, - errorHandler: FfiConverterTypeUniffiError.lift - ) - } } #if swift(>=5.8) @@ -1162,11 +1077,11 @@ public struct Blob { public var data: String public var mime: String public var size: Int64 - public var createdAt: Date + public var createdAt: Int64 // Default memberwise initializers are never public by default, so we // declare one manually. - public init(key: String, data: String, mime: String, size: Int64, createdAt: Date) { + public init(key: String, data: String, mime: String, size: Int64, createdAt: Int64) { self.key = key self.data = data self.mime = mime @@ -1215,7 +1130,7 @@ public struct FfiConverterTypeBlob: FfiConverterRustBuffer { data: FfiConverterString.read(from: &buf), mime: FfiConverterString.read(from: &buf), size: FfiConverterInt64.read(from: &buf), - createdAt: FfiConverterTimestamp.read(from: &buf) + createdAt: FfiConverterInt64.read(from: &buf) ) } @@ -1224,7 +1139,7 @@ public struct FfiConverterTypeBlob: FfiConverterRustBuffer { FfiConverterString.write(value.data, into: &buf) FfiConverterString.write(value.mime, into: &buf) FfiConverterInt64.write(value.size, into: &buf) - FfiConverterTimestamp.write(value.createdAt, into: &buf) + FfiConverterInt64.write(value.createdAt, into: &buf) } } @@ -1244,11 +1159,11 @@ public func FfiConverterTypeBlob_lower(_ value: Blob) -> RustBuffer { public struct DocClock { public var docId: String - public var timestamp: Date + public var timestamp: Int64 // Default memberwise initializers are never public by default, so we // declare one manually. - public init(docId: String, timestamp: Date) { + public init(docId: String, timestamp: Int64) { self.docId = docId self.timestamp = timestamp } @@ -1279,13 +1194,13 @@ public struct FfiConverterTypeDocClock: FfiConverterRustBuffer { return try DocClock( docId: FfiConverterString.read(from: &buf), - timestamp: FfiConverterTimestamp.read(from: &buf) + timestamp: FfiConverterInt64.read(from: &buf) ) } public static func write(_ value: DocClock, into buf: inout [UInt8]) { FfiConverterString.write(value.docId, into: &buf) - FfiConverterTimestamp.write(value.timestamp, into: &buf) + FfiConverterInt64.write(value.timestamp, into: &buf) } } @@ -1305,14 +1220,14 @@ public func FfiConverterTypeDocClock_lower(_ value: DocClock) -> RustBuffer { public struct DocRecord { public var docId: String - public var data: String - public var timestamp: Date + public var bin: String + public var timestamp: Int64 // Default memberwise initializers are never public by default, so we // declare one manually. - public init(docId: String, data: String, timestamp: Date) { + public init(docId: String, bin: String, timestamp: Int64) { self.docId = docId - self.data = data + self.bin = bin self.timestamp = timestamp } } @@ -1322,7 +1237,7 @@ extension DocRecord: Equatable, Hashable { if lhs.docId != rhs.docId { return false } - if lhs.data != rhs.data { + if lhs.bin != rhs.bin { return false } if lhs.timestamp != rhs.timestamp { @@ -1333,7 +1248,7 @@ extension DocRecord: Equatable, Hashable { public func hash(into hasher: inout Hasher) { hasher.combine(docId) - hasher.combine(data) + hasher.combine(bin) hasher.combine(timestamp) } } @@ -1346,15 +1261,15 @@ public struct FfiConverterTypeDocRecord: FfiConverterRustBuffer { return try DocRecord( docId: FfiConverterString.read(from: &buf), - data: FfiConverterString.read(from: &buf), - timestamp: FfiConverterTimestamp.read(from: &buf) + bin: FfiConverterString.read(from: &buf), + timestamp: FfiConverterInt64.read(from: &buf) ) } public static func write(_ value: DocRecord, into buf: inout [UInt8]) { FfiConverterString.write(value.docId, into: &buf) - FfiConverterString.write(value.data, into: &buf) - FfiConverterTimestamp.write(value.timestamp, into: &buf) + FfiConverterString.write(value.bin, into: &buf) + FfiConverterInt64.write(value.timestamp, into: &buf) } } @@ -1374,15 +1289,15 @@ public func FfiConverterTypeDocRecord_lower(_ value: DocRecord) -> RustBuffer { public struct DocUpdate { public var docId: String - public var createdAt: Date - public var data: String + public var timestamp: Int64 + public var bin: String // Default memberwise initializers are never public by default, so we // declare one manually. - public init(docId: String, createdAt: Date, data: String) { + public init(docId: String, timestamp: Int64, bin: String) { self.docId = docId - self.createdAt = createdAt - self.data = data + self.timestamp = timestamp + self.bin = bin } } @@ -1391,10 +1306,10 @@ extension DocUpdate: Equatable, Hashable { if lhs.docId != rhs.docId { return false } - if lhs.createdAt != rhs.createdAt { + if lhs.timestamp != rhs.timestamp { return false } - if lhs.data != rhs.data { + if lhs.bin != rhs.bin { return false } return true @@ -1402,8 +1317,8 @@ extension DocUpdate: Equatable, Hashable { public func hash(into hasher: inout Hasher) { hasher.combine(docId) - hasher.combine(createdAt) - hasher.combine(data) + hasher.combine(timestamp) + hasher.combine(bin) } } @@ -1415,15 +1330,15 @@ public struct FfiConverterTypeDocUpdate: FfiConverterRustBuffer { return try DocUpdate( docId: FfiConverterString.read(from: &buf), - createdAt: FfiConverterTimestamp.read(from: &buf), - data: FfiConverterString.read(from: &buf) + timestamp: FfiConverterInt64.read(from: &buf), + bin: FfiConverterString.read(from: &buf) ) } public static func write(_ value: DocUpdate, into buf: inout [UInt8]) { FfiConverterString.write(value.docId, into: &buf) - FfiConverterTimestamp.write(value.createdAt, into: &buf) - FfiConverterString.write(value.data, into: &buf) + FfiConverterInt64.write(value.timestamp, into: &buf) + FfiConverterString.write(value.bin, into: &buf) } } @@ -1445,11 +1360,11 @@ public struct ListedBlob { public var key: String public var size: Int64 public var mime: String - public var createdAt: Date + public var createdAt: Int64 // Default memberwise initializers are never public by default, so we // declare one manually. - public init(key: String, size: Int64, mime: String, createdAt: Date) { + public init(key: String, size: Int64, mime: String, createdAt: Int64) { self.key = key self.size = size self.mime = mime @@ -1492,7 +1407,7 @@ public struct FfiConverterTypeListedBlob: FfiConverterRustBuffer { key: FfiConverterString.read(from: &buf), size: FfiConverterInt64.read(from: &buf), mime: FfiConverterString.read(from: &buf), - createdAt: FfiConverterTimestamp.read(from: &buf) + createdAt: FfiConverterInt64.read(from: &buf) ) } @@ -1500,7 +1415,7 @@ public struct FfiConverterTypeListedBlob: FfiConverterRustBuffer { FfiConverterString.write(value.key, into: &buf) FfiConverterInt64.write(value.size, into: &buf) FfiConverterString.write(value.mime, into: &buf) - FfiConverterTimestamp.write(value.createdAt, into: &buf) + FfiConverterInt64.write(value.createdAt, into: &buf) } } @@ -1588,21 +1503,11 @@ public func FfiConverterTypeSetBlob_lower(_ value: SetBlob) -> RustBuffer { } public enum UniffiError { - case GetUserDocumentDirectoryFailed - case CreateAffineDirFailed(String - ) - case EmptyDocStoragePath - case EmptySpaceId - case SqlxError(String + case Err(String ) case Base64DecodingError(String ) - case InvalidUniversalId(String - ) - case InvalidSpaceType(String - ) - case ConcatSpaceDirFailed(String - ) + case TimestampDecodingError } #if swift(>=5.8) @@ -1614,65 +1519,29 @@ public struct FfiConverterTypeUniffiError: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> UniffiError { let variant: Int32 = try readInt(&buf) switch variant { - case 1: return .GetUserDocumentDirectoryFailed - case 2: return try .CreateAffineDirFailed( - FfiConverterString.read(from: &buf) - ) - case 3: return .EmptyDocStoragePath - case 4: return .EmptySpaceId - case 5: return try .SqlxError( - FfiConverterString.read(from: &buf) - ) - case 6: return try .Base64DecodingError( - FfiConverterString.read(from: &buf) - ) - case 7: return try .InvalidUniversalId( + case 1: return try .Err( FfiConverterString.read(from: &buf) ) - case 8: return try .InvalidSpaceType( - FfiConverterString.read(from: &buf) - ) - case 9: return try .ConcatSpaceDirFailed( + case 2: return try .Base64DecodingError( FfiConverterString.read(from: &buf) ) + case 3: return .TimestampDecodingError default: throw UniffiInternalError.unexpectedEnumCase } } public static func write(_ value: UniffiError, into buf: inout [UInt8]) { switch value { - case .GetUserDocumentDirectoryFailed: + case let .Err(v1): writeInt(&buf, Int32(1)) - - case let .CreateAffineDirFailed(v1): - writeInt(&buf, Int32(2)) - FfiConverterString.write(v1, into: &buf) - - case .EmptyDocStoragePath: - writeInt(&buf, Int32(3)) - - case .EmptySpaceId: - writeInt(&buf, Int32(4)) - - case let .SqlxError(v1): - writeInt(&buf, Int32(5)) FfiConverterString.write(v1, into: &buf) case let .Base64DecodingError(v1): - writeInt(&buf, Int32(6)) - FfiConverterString.write(v1, into: &buf) - - case let .InvalidUniversalId(v1): - writeInt(&buf, Int32(7)) - FfiConverterString.write(v1, into: &buf) - - case let .InvalidSpaceType(v1): - writeInt(&buf, Int32(8)) + writeInt(&buf, Int32(2)) FfiConverterString.write(v1, into: &buf) - case let .ConcatSpaceDirFailed(v1): - writeInt(&buf, Int32(9)) - FfiConverterString.write(v1, into: &buf) + case .TimestampDecodingError: + writeInt(&buf, Int32(3)) } } } @@ -1688,8 +1557,8 @@ extension UniffiError: Foundation.LocalizedError { #if swift(>=5.8) @_documentation(visibility: private) #endif -private struct FfiConverterOptionTimestamp: FfiConverterRustBuffer { - typealias SwiftType = Date? +private struct FfiConverterOptionInt64: FfiConverterRustBuffer { + typealias SwiftType = Int64? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -1697,13 +1566,13 @@ private struct FfiConverterOptionTimestamp: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterTimestamp.write(value, into: &buf) + FfiConverterInt64.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTimestamp.read(from: &buf) + case 1: return try FfiConverterInt64.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } @@ -1784,23 +1653,23 @@ private struct FfiConverterOptionTypeDocRecord: FfiConverterRustBuffer { #if swift(>=5.8) @_documentation(visibility: private) #endif -private struct FfiConverterSequenceTimestamp: FfiConverterRustBuffer { - typealias SwiftType = [Date] +private struct FfiConverterSequenceInt64: FfiConverterRustBuffer { + typealias SwiftType = [Int64] - public static func write(_ value: [Date], into buf: inout [UInt8]) { + public static func write(_ value: [Int64], into buf: inout [UInt8]) { let len = Int32(value.count) writeInt(&buf, len) for item in value { - FfiConverterTimestamp.write(item, into: &buf) + FfiConverterInt64.write(item, into: &buf) } } - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> [Date] { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> [Int64] { let len: Int32 = try readInt(&buf) - var seq = [Date]() + var seq = [Int64]() seq.reserveCapacity(Int(len)) for _ in 0 ..< len { - try seq.append(FfiConverterTimestamp.read(from: &buf)) + try seq.append(FfiConverterInt64.read(from: &buf)) } return seq } @@ -1928,16 +1797,6 @@ private func uniffiFutureContinuationCallback(handle: UInt64, pollResult: Int8) } } -public func getDbPath(peer: String, spaceType: String, id: String) throws -> String { - return try FfiConverterString.lift(rustCallWithError(FfiConverterTypeUniffiError.lift) { - uniffi_affine_mobile_native_fn_func_get_db_path( - FfiConverterString.lower(peer), - FfiConverterString.lower(spaceType), - FfiConverterString.lower(id), $0 - ) - }) -} - public func hashcashMint(resource: String, bits: UInt32) -> String { return try! FfiConverterString.lift(try! rustCall { uniffi_affine_mobile_native_fn_func_hashcash_mint( @@ -1947,6 +1806,13 @@ public func hashcashMint(resource: String, bits: UInt32) -> String { }) } +public func newDocStoragePool() -> DocStoragePool { + return try! FfiConverterTypeDocStoragePool.lift(try! rustCall { + uniffi_affine_mobile_native_fn_func_new_doc_storage_pool($0 + ) + }) +} + private enum InitializationResult { case ok case contractVersionMismatch @@ -1963,22 +1829,16 @@ private var initializationResult: InitializationResult = { if bindings_contract_version != scaffolding_contract_version { return InitializationResult.contractVersionMismatch } - if uniffi_affine_mobile_native_checksum_func_get_db_path() != 65350 { - return InitializationResult.apiChecksumMismatch - } if uniffi_affine_mobile_native_checksum_func_hashcash_mint() != 23633 { return InitializationResult.apiChecksumMismatch } - if uniffi_affine_mobile_native_checksum_method_docstoragepool_checkpoint() != 36299 { + if uniffi_affine_mobile_native_checksum_func_new_doc_storage_pool() != 32882 { return InitializationResult.apiChecksumMismatch } if uniffi_affine_mobile_native_checksum_method_docstoragepool_clear_clocks() != 51151 { return InitializationResult.apiChecksumMismatch } - if uniffi_affine_mobile_native_checksum_method_docstoragepool_close() != 46846 { - return InitializationResult.apiChecksumMismatch - } - if uniffi_affine_mobile_native_checksum_method_docstoragepool_connect() != 57961 { + if uniffi_affine_mobile_native_checksum_method_docstoragepool_connect() != 19047 { return InitializationResult.apiChecksumMismatch } if uniffi_affine_mobile_native_checksum_method_docstoragepool_delete_blob() != 53695 { @@ -1987,13 +1847,16 @@ private var initializationResult: InitializationResult = { if uniffi_affine_mobile_native_checksum_method_docstoragepool_delete_doc() != 4005 { return InitializationResult.apiChecksumMismatch } + if uniffi_affine_mobile_native_checksum_method_docstoragepool_disconnect() != 20410 { + return InitializationResult.apiChecksumMismatch + } if uniffi_affine_mobile_native_checksum_method_docstoragepool_get_blob() != 56927 { return InitializationResult.apiChecksumMismatch } if uniffi_affine_mobile_native_checksum_method_docstoragepool_get_doc_clock() != 48394 { return InitializationResult.apiChecksumMismatch } - if uniffi_affine_mobile_native_checksum_method_docstoragepool_get_doc_clocks() != 23822 { + if uniffi_affine_mobile_native_checksum_method_docstoragepool_get_doc_clocks() != 46082 { return InitializationResult.apiChecksumMismatch } if uniffi_affine_mobile_native_checksum_method_docstoragepool_get_doc_snapshot() != 31220 { @@ -2017,16 +1880,13 @@ private var initializationResult: InitializationResult = { if uniffi_affine_mobile_native_checksum_method_docstoragepool_get_peer_remote_clocks() != 14523 { return InitializationResult.apiChecksumMismatch } - if uniffi_affine_mobile_native_checksum_method_docstoragepool_is_closed() != 40091 { - return InitializationResult.apiChecksumMismatch - } if uniffi_affine_mobile_native_checksum_method_docstoragepool_list_blobs() != 6777 { return InitializationResult.apiChecksumMismatch } - if uniffi_affine_mobile_native_checksum_method_docstoragepool_mark_updates_merged() != 26982 { + if uniffi_affine_mobile_native_checksum_method_docstoragepool_mark_updates_merged() != 42713 { return InitializationResult.apiChecksumMismatch } - if uniffi_affine_mobile_native_checksum_method_docstoragepool_push_update() != 54572 { + if uniffi_affine_mobile_native_checksum_method_docstoragepool_push_update() != 20688 { return InitializationResult.apiChecksumMismatch } if uniffi_affine_mobile_native_checksum_method_docstoragepool_release_blobs() != 2203 { @@ -2038,21 +1898,18 @@ private var initializationResult: InitializationResult = { if uniffi_affine_mobile_native_checksum_method_docstoragepool_set_doc_snapshot() != 5287 { return InitializationResult.apiChecksumMismatch } - if uniffi_affine_mobile_native_checksum_method_docstoragepool_set_peer_pulled_remote_clock() != 40733 { + if uniffi_affine_mobile_native_checksum_method_docstoragepool_set_peer_pulled_remote_clock() != 33923 { return InitializationResult.apiChecksumMismatch } - if uniffi_affine_mobile_native_checksum_method_docstoragepool_set_peer_pushed_clock() != 15697 { + if uniffi_affine_mobile_native_checksum_method_docstoragepool_set_peer_pushed_clock() != 16565 { return InitializationResult.apiChecksumMismatch } - if uniffi_affine_mobile_native_checksum_method_docstoragepool_set_peer_remote_clock() != 57108 { + if uniffi_affine_mobile_native_checksum_method_docstoragepool_set_peer_remote_clock() != 46506 { return InitializationResult.apiChecksumMismatch } if uniffi_affine_mobile_native_checksum_method_docstoragepool_set_space_id() != 21955 { return InitializationResult.apiChecksumMismatch } - if uniffi_affine_mobile_native_checksum_method_docstoragepool_validate() != 17232 { - return InitializationResult.apiChecksumMismatch - } return InitializationResult.ok }() diff --git a/packages/frontend/apps/ios/App/App/uniffi/affine_mobile_nativeFFI.h b/packages/frontend/apps/ios/App/App/uniffi/affine_mobile_nativeFFI.h index 05519cd029fd5..53e77103572c0 100644 --- a/packages/frontend/apps/ios/App/App/uniffi/affine_mobile_nativeFFI.h +++ b/packages/frontend/apps/ios/App/App/uniffi/affine_mobile_nativeFFI.h @@ -261,24 +261,14 @@ void*_Nonnull uniffi_affine_mobile_native_fn_clone_docstoragepool(void*_Nonnull void uniffi_affine_mobile_native_fn_free_docstoragepool(void*_Nonnull ptr, RustCallStatus *_Nonnull out_status ); #endif -#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_CHECKPOINT -#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_CHECKPOINT -uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_checkpoint(void*_Nonnull ptr, RustBuffer universal_id -); -#endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_CLEAR_CLOCKS #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_CLEAR_CLOCKS uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_clear_clocks(void*_Nonnull ptr, RustBuffer universal_id ); #endif -#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_CLOSE -#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_CLOSE -uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_close(void*_Nonnull ptr, RustBuffer universal_id -); -#endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_CONNECT #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_CONNECT -uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_connect(void*_Nonnull ptr, RustBuffer universal_id +uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_connect(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer path ); #endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_DELETE_BLOB @@ -291,6 +281,11 @@ uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_delete_blob(void*_ uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_delete_doc(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer doc_id ); #endif +#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_DISCONNECT +#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_DISCONNECT +uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_disconnect(void*_Nonnull ptr, RustBuffer universal_id +); +#endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_GET_BLOB #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_GET_BLOB uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_get_blob(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer key @@ -341,11 +336,6 @@ uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_get_peer_remote_cl uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_get_peer_remote_clocks(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer peer ); #endif -#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_IS_CLOSED -#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_IS_CLOSED -int8_t uniffi_affine_mobile_native_fn_method_docstoragepool_is_closed(void*_Nonnull ptr, RustBuffer universal_id, RustCallStatus *_Nonnull out_status -); -#endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_LIST_BLOBS #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_LIST_BLOBS uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_list_blobs(void*_Nonnull ptr, RustBuffer universal_id @@ -378,17 +368,17 @@ uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_doc_snapshot(v #endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_SET_PEER_PULLED_REMOTE_CLOCK #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_SET_PEER_PULLED_REMOTE_CLOCK -uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_peer_pulled_remote_clock(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer peer, RustBuffer doc_id, RustBuffer clock +uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_peer_pulled_remote_clock(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer peer, RustBuffer doc_id, int64_t clock ); #endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_SET_PEER_PUSHED_CLOCK #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_SET_PEER_PUSHED_CLOCK -uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_peer_pushed_clock(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer peer, RustBuffer doc_id, RustBuffer clock +uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_peer_pushed_clock(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer peer, RustBuffer doc_id, int64_t clock ); #endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_SET_PEER_REMOTE_CLOCK #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_SET_PEER_REMOTE_CLOCK -uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_peer_remote_clock(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer peer, RustBuffer doc_id, RustBuffer clock +uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_peer_remote_clock(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer peer, RustBuffer doc_id, int64_t clock ); #endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_SET_SPACE_ID @@ -396,19 +386,15 @@ uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_peer_remote_cl uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_set_space_id(void*_Nonnull ptr, RustBuffer universal_id, RustBuffer space_id ); #endif -#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_VALIDATE -#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_METHOD_DOCSTORAGEPOOL_VALIDATE -uint64_t uniffi_affine_mobile_native_fn_method_docstoragepool_validate(void*_Nonnull ptr, RustBuffer universal_id -); -#endif -#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_FUNC_GET_DB_PATH -#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_FUNC_GET_DB_PATH -RustBuffer uniffi_affine_mobile_native_fn_func_get_db_path(RustBuffer peer, RustBuffer space_type, RustBuffer id, RustCallStatus *_Nonnull out_status -); -#endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_FUNC_HASHCASH_MINT #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_FUNC_HASHCASH_MINT RustBuffer uniffi_affine_mobile_native_fn_func_hashcash_mint(RustBuffer resource, uint32_t bits, RustCallStatus *_Nonnull out_status +); +#endif +#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_FUNC_NEW_DOC_STORAGE_POOL +#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_FN_FUNC_NEW_DOC_STORAGE_POOL +void*_Nonnull uniffi_affine_mobile_native_fn_func_new_doc_storage_pool(RustCallStatus *_Nonnull out_status + ); #endif #ifndef UNIFFI_FFIDEF_FFI_AFFINE_MOBILE_NATIVE_RUSTBUFFER_ALLOC @@ -689,12 +675,6 @@ void ffi_affine_mobile_native_rust_future_free_void(uint64_t handle #ifndef UNIFFI_FFIDEF_FFI_AFFINE_MOBILE_NATIVE_RUST_FUTURE_COMPLETE_VOID #define UNIFFI_FFIDEF_FFI_AFFINE_MOBILE_NATIVE_RUST_FUTURE_COMPLETE_VOID void ffi_affine_mobile_native_rust_future_complete_void(uint64_t handle, RustCallStatus *_Nonnull out_status -); -#endif -#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_FUNC_GET_DB_PATH -#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_FUNC_GET_DB_PATH -uint16_t uniffi_affine_mobile_native_checksum_func_get_db_path(void - ); #endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_FUNC_HASHCASH_MINT @@ -703,9 +683,9 @@ uint16_t uniffi_affine_mobile_native_checksum_func_hashcash_mint(void ); #endif -#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_CHECKPOINT -#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_CHECKPOINT -uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_checkpoint(void +#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_FUNC_NEW_DOC_STORAGE_POOL +#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_FUNC_NEW_DOC_STORAGE_POOL +uint16_t uniffi_affine_mobile_native_checksum_func_new_doc_storage_pool(void ); #endif @@ -713,12 +693,6 @@ uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_checkpoint(v #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_CLEAR_CLOCKS uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_clear_clocks(void -); -#endif -#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_CLOSE -#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_CLOSE -uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_close(void - ); #endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_CONNECT @@ -737,6 +711,12 @@ uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_delete_blob( #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_DELETE_DOC uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_delete_doc(void +); +#endif +#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_DISCONNECT +#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_DISCONNECT +uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_disconnect(void + ); #endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_GET_BLOB @@ -797,12 +777,6 @@ uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_get_peer_rem #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_GET_PEER_REMOTE_CLOCKS uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_get_peer_remote_clocks(void -); -#endif -#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_IS_CLOSED -#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_IS_CLOSED -uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_is_closed(void - ); #endif #ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_LIST_BLOBS @@ -863,12 +837,6 @@ uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_set_peer_rem #define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_SET_SPACE_ID uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_set_space_id(void -); -#endif -#ifndef UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_VALIDATE -#define UNIFFI_FFIDEF_UNIFFI_AFFINE_MOBILE_NATIVE_CHECKSUM_METHOD_DOCSTORAGEPOOL_VALIDATE -uint16_t uniffi_affine_mobile_native_checksum_method_docstoragepool_validate(void - ); #endif #ifndef UNIFFI_FFIDEF_FFI_AFFINE_MOBILE_NATIVE_UNIFFI_CONTRACT_VERSION diff --git a/packages/frontend/apps/ios/src/plugins/nbstore/blob.ts b/packages/frontend/apps/ios/src/plugins/nbstore/blob.ts deleted file mode 100644 index c1e0641db9bf0..0000000000000 --- a/packages/frontend/apps/ios/src/plugins/nbstore/blob.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { type BlobRecord, BlobStorageBase, share } from '@affine/nbstore'; - -import { NativeDBConnection } from './db'; - -export class SqliteBlobStorage extends BlobStorageBase { - override connection = share( - new NativeDBConnection(this.peer, this.spaceType, this.spaceId) - ); - - get db() { - return this.connection.inner; - } - - override async get(key: string) { - return this.db.getBlob(key); - } - - override async set(blob: BlobRecord) { - await this.db.setBlob(blob); - } - - override async delete(key: string, permanently: boolean) { - await this.db.deleteBlob(key, permanently); - } - - override async release() { - await this.db.releaseBlobs(); - } - - override async list() { - return this.db.listBlobs(); - } -} diff --git a/packages/frontend/apps/ios/src/plugins/nbstore/db.ts b/packages/frontend/apps/ios/src/plugins/nbstore/db.ts deleted file mode 100644 index 81121571db7d4..0000000000000 --- a/packages/frontend/apps/ios/src/plugins/nbstore/db.ts +++ /dev/null @@ -1,60 +0,0 @@ -import type { DocStorage } from '@affine/native'; -import { - AutoReconnectConnection, - isValidSpaceType, - type SpaceType, - universalId, -} from '@affine/nbstore'; - -import { NativeDocStorage, NbStoreDocStorage } from './plugin'; - -export class NativeDBConnection extends AutoReconnectConnection { - private readonly universalId: string; - - constructor( - private readonly peer: string, - private readonly type: SpaceType, - private readonly id: string - ) { - super(); - if (!isValidSpaceType(type)) { - throw new TypeError(`Invalid space type: ${type}`); - } - this.universalId = universalId({ - peer: peer, - type: type, - id: id, - }); - } - - async getDBPath() { - const { path } = await NbStoreDocStorage.getSpaceDBPath({ - peer: this.peer, - spaceType: this.type, - id: this.id, - }); - return path; - } - - override get shareId(): string { - return `sqlite:${this.peer}:${this.type}:${this.id}`; - } - - override async doConnect() { - const conn = new NativeDocStorage(this.universalId); - await conn.connect(); - console.info('[nbstore] connection established', this.shareId); - return conn; - } - - override doDisconnect(conn: NativeDocStorage) { - conn - .close() - .then(() => { - console.info('[nbstore] connection closed', this.shareId); - }) - .catch(err => { - console.error('[nbstore] connection close failed', this.shareId, err); - }); - } -} diff --git a/packages/frontend/apps/ios/src/plugins/nbstore/definitions.ts b/packages/frontend/apps/ios/src/plugins/nbstore/definitions.ts index cac91534631c4..916606c6c753e 100644 --- a/packages/frontend/apps/ios/src/plugins/nbstore/definitions.ts +++ b/packages/frontend/apps/ios/src/plugins/nbstore/definitions.ts @@ -27,17 +27,13 @@ export interface DocClock { } export interface NbStorePlugin { - getSpaceDBPath: (options: { - peer: string; - spaceType: string; + connect: (options: { id: string; - }) => Promise<{ path: string }>; - create: (options: { id: string; path: string }) => Promise; - connect: (options: { id: string }) => Promise; - close: (options: { id: string }) => Promise; - isClosed: (options: { id: string }) => Promise<{ isClosed: boolean }>; - checkpoint: (options: { id: string }) => Promise; - validate: (options: { id: string }) => Promise<{ isValidate: boolean }>; + spaceId: string; + spaceType: string; + peer: string; + }) => Promise; + disconnect: (options: { id: string }) => Promise; setSpaceId: (options: { id: string; spaceId: string }) => Promise; pushUpdate: (options: { @@ -49,7 +45,7 @@ export interface NbStorePlugin { | { docId: string; // base64 encoded data - data: string; + bin: string; timestamp: number; } | undefined @@ -57,23 +53,24 @@ export interface NbStorePlugin { setDocSnapshot: (options: { id: string; docId: string; - data: string; + bin: string; + timestamp: number; }) => Promise<{ success: boolean }>; - getDocUpdates: (options: { id: string; docId: string }) => Promise< - { + getDocUpdates: (options: { id: string; docId: string }) => Promise<{ + updates: { docId: string; - createdAt: number; + timestamp: number; // base64 encoded data - data: string; - }[] - >; + bin: string; + }[]; + }>; markUpdatesMerged: (options: { id: string; docId: string; timestamps: number[]; }) => Promise<{ count: number }>; deleteDoc: (options: { id: string; docId: string }) => Promise; - getDocClocks: (options: { id: string; after: number }) => Promise< + getDocClocks: (options: { id: string; after?: number | null }) => Promise< { docId: string; timestamp: number; diff --git a/packages/frontend/apps/ios/src/plugins/nbstore/doc.ts b/packages/frontend/apps/ios/src/plugins/nbstore/doc.ts deleted file mode 100644 index 4078f50513d1d..0000000000000 --- a/packages/frontend/apps/ios/src/plugins/nbstore/doc.ts +++ /dev/null @@ -1,83 +0,0 @@ -import { - type DocClocks, - type DocRecord, - DocStorageBase, - type DocUpdate, - share, -} from '@affine/nbstore'; - -import { NativeDBConnection } from './db'; - -export class SqliteDocStorage extends DocStorageBase { - override connection = share( - new NativeDBConnection(this.peer, this.spaceType, this.spaceId) - ); - - get db() { - return this.connection.inner; - } - - override async pushDocUpdate(update: DocUpdate) { - const timestamp = await this.db.pushUpdate(update.docId, update.bin); - - return { docId: update.docId, timestamp }; - } - - override async deleteDoc(docId: string) { - await this.db.deleteDoc(docId); - } - - override async getDocTimestamps(after?: Date) { - const clocks = await this.db.getDocClocks(after); - - return clocks.reduce((ret, cur) => { - ret[cur.docId] = cur.timestamp; - return ret; - }, {} as DocClocks); - } - - override async getDocTimestamp(docId: string) { - return this.db.getDocClock(docId); - } - - protected override async getDocSnapshot(docId: string) { - const snapshot = await this.db.getDocSnapshot(docId); - - if (!snapshot) { - return null; - } - - return { - docId, - bin: snapshot.data, - timestamp: snapshot.timestamp, - }; - } - - protected override async setDocSnapshot( - snapshot: DocRecord - ): Promise { - return this.db.setDocSnapshot({ - docId: snapshot.docId, - data: Buffer.from(snapshot.bin), - timestamp: new Date(snapshot.timestamp), - }); - } - - protected override async getDocUpdates(docId: string) { - return this.db.getDocUpdates(docId).then(updates => - updates.map(update => ({ - docId, - bin: update.data, - timestamp: update.createdAt, - })) - ); - } - - protected override markUpdatesMerged(docId: string, updates: DocRecord[]) { - return this.db.markUpdatesMerged( - docId, - updates.map(update => update.timestamp) - ); - } -} diff --git a/packages/frontend/apps/ios/src/plugins/nbstore/handlers.ts b/packages/frontend/apps/ios/src/plugins/nbstore/handlers.ts deleted file mode 100644 index 946cb79cf5041..0000000000000 --- a/packages/frontend/apps/ios/src/plugins/nbstore/handlers.ts +++ /dev/null @@ -1,128 +0,0 @@ -import { - type BlobRecord, - type DocClock, - type DocUpdate, -} from '@affine/nbstore'; - -import { ensureStorage, getStorage } from './storage'; - -export const nbstoreHandlers = { - connect: async (id: string) => { - await ensureStorage(id); - }, - - close: async (id: string) => { - const store = getStorage(id); - - if (store) { - store.disconnect(); - // The store may be shared with other tabs, so we don't delete it from cache - // the underlying connection will handle the close correctly - // STORE_CACHE.delete(`${spaceType}:${spaceId}`); - } - }, - - pushDocUpdate: async (id: string, update: DocUpdate) => { - const store = await ensureStorage(id); - return store.get('doc').pushDocUpdate(update); - }, - - getDoc: async (id: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('doc').getDoc(docId); - }, - - deleteDoc: async (id: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('doc').deleteDoc(docId); - }, - - getDocTimestamps: async (id: string, after?: Date) => { - const store = await ensureStorage(id); - return store.get('doc').getDocTimestamps(after); - }, - - getDocTimestamp: async (id: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('doc').getDocTimestamp(docId); - }, - - setBlob: async (id: string, blob: BlobRecord) => { - const store = await ensureStorage(id); - return store.get('blob').set(blob); - }, - - getBlob: async (id: string, key: string) => { - const store = await ensureStorage(id); - return store.get('blob').get(key); - }, - - deleteBlob: async (id: string, key: string, permanently: boolean) => { - const store = await ensureStorage(id); - return store.get('blob').delete(key, permanently); - }, - - listBlobs: async (id: string) => { - const store = await ensureStorage(id); - return store.get('blob').list(); - }, - - releaseBlobs: async (id: string) => { - const store = await ensureStorage(id); - return store.get('blob').release(); - }, - - getPeerRemoteClocks: async (id: string, peer: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerRemoteClocks(peer); - }, - - getPeerRemoteClock: async (id: string, peer: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerRemoteClock(peer, docId); - }, - - setPeerRemoteClock: async (id: string, peer: string, clock: DocClock) => { - const store = await ensureStorage(id); - return store.get('sync').setPeerRemoteClock(peer, clock); - }, - - getPeerPulledRemoteClocks: async (id: string, peer: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerPulledRemoteClocks(peer); - }, - - getPeerPulledRemoteClock: async (id: string, peer: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerPulledRemoteClock(peer, docId); - }, - - setPeerPulledRemoteClock: async ( - id: string, - peer: string, - clock: DocClock - ) => { - const store = await ensureStorage(id); - return store.get('sync').setPeerPulledRemoteClock(peer, clock); - }, - - getPeerPushedClocks: async (id: string, peer: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerPushedClocks(peer); - }, - - getPeerPushedClock: async (id: string, peer: string, docId: string) => { - const store = await ensureStorage(id); - return store.get('sync').getPeerPushedClock(peer, docId); - }, - - setPeerPushedClock: async (id: string, peer: string, clock: DocClock) => { - const store = await ensureStorage(id); - return store.get('sync').setPeerPushedClock(peer, clock); - }, - - clearClocks: async (id: string) => { - const store = await ensureStorage(id); - return store.get('sync').clearClocks(); - }, -}; diff --git a/packages/frontend/apps/ios/src/plugins/nbstore/index.ts b/packages/frontend/apps/ios/src/plugins/nbstore/index.ts index 6d17cae7d3acc..2895ab4d27c8a 100644 --- a/packages/frontend/apps/ios/src/plugins/nbstore/index.ts +++ b/packages/frontend/apps/ios/src/plugins/nbstore/index.ts @@ -1,5 +1,304 @@ +import { + base64ToUint8Array, + uint8ArrayToBase64, +} from '@affine/core/modules/workspace-engine'; +import { + type BlobRecord, + type DocClock, + type DocRecord, + type ListedBlobRecord, + parseUniversalId, +} from '@affine/nbstore'; +import { type NativeDBApis } from '@affine/nbstore/sqlite'; +import { registerPlugin } from '@capacitor/core'; + +import type { NbStorePlugin } from './definitions'; + export * from './definitions'; -export { nbstoreHandlers } from './handlers'; -export { NbStoreDocStorage } from './plugin'; -export * from './storage'; -export { universalId } from '@affine/nbstore'; + +export const NbStore = registerPlugin('NbStoreDocStorage'); + +export const NbStoreNativeDBApis: NativeDBApis = { + connect: async function (id: string): Promise { + const { peer, type, id: spaceId } = parseUniversalId(id); + return await NbStore.connect({ id, spaceId, spaceType: type, peer }); + }, + disconnect: function (id: string): Promise { + return NbStore.disconnect({ id }); + }, + pushUpdate: async function ( + id: string, + docId: string, + update: Uint8Array + ): Promise { + const { timestamp } = await NbStore.pushUpdate({ + id, + docId, + data: await uint8ArrayToBase64(update), + }); + return new Date(timestamp); + }, + getDocSnapshot: async function ( + id: string, + docId: string + ): Promise { + const snapshot = await NbStore.getDocSnapshot({ id, docId }); + return snapshot + ? { + bin: base64ToUint8Array(snapshot.bin), + docId: snapshot.docId, + timestamp: new Date(snapshot.timestamp), + } + : null; + }, + setDocSnapshot: async function ( + id: string, + snapshot: DocRecord + ): Promise { + const { success } = await NbStore.setDocSnapshot({ + id, + docId: snapshot.docId, + bin: await uint8ArrayToBase64(snapshot.bin), + timestamp: snapshot.timestamp.getTime(), + }); + return success; + }, + getDocUpdates: async function ( + id: string, + docId: string + ): Promise { + const { updates } = await NbStore.getDocUpdates({ id, docId }); + return updates.map(update => ({ + bin: base64ToUint8Array(update.bin), + docId: update.docId, + timestamp: new Date(update.timestamp), + })); + }, + markUpdatesMerged: async function ( + id: string, + docId: string, + updates: Date[] + ): Promise { + const { count } = await NbStore.markUpdatesMerged({ + id, + docId, + timestamps: updates.map(t => t.getTime()), + }); + return count; + }, + deleteDoc: async function (id: string, docId: string): Promise { + await NbStore.deleteDoc({ + id, + docId, + }); + }, + getDocClocks: async function ( + id: string, + after?: Date | undefined | null + ): Promise { + const clocks = await NbStore.getDocClocks({ + id, + after: after?.getTime(), + }); + return clocks.map(c => ({ + docId: c.docId, + timestamp: new Date(c.timestamp), + })); + }, + getDocClock: async function ( + id: string, + docId: string + ): Promise { + const clock = await NbStore.getDocClock({ + id, + docId, + }); + return clock + ? { + timestamp: new Date(clock.timestamp), + docId: clock.docId, + } + : null; + }, + getBlob: async function ( + id: string, + key: string + ): Promise { + const record = await NbStore.getBlob({ + id, + key, + }); + return record + ? { + data: base64ToUint8Array(record.data), + key: record.key, + mime: record.mime, + createdAt: new Date(record.createdAt), + } + : null; + }, + setBlob: async function (id: string, blob: BlobRecord): Promise { + await NbStore.setBlob({ + id, + data: await uint8ArrayToBase64(blob.data), + key: blob.key, + mime: blob.mime, + }); + }, + deleteBlob: async function ( + id: string, + key: string, + permanently: boolean + ): Promise { + await NbStore.deleteBlob({ + id, + key, + permanently, + }); + }, + releaseBlobs: async function (id: string): Promise { + await NbStore.releaseBlobs({ + id, + }); + }, + listBlobs: async function (id: string): Promise { + const listed = await NbStore.listBlobs({ + id, + }); + return listed.map(b => ({ + key: b.key, + mime: b.mime, + size: b.size, + createdAt: new Date(b.createdAt), + })); + }, + getPeerRemoteClocks: async function ( + id: string, + peer: string + ): Promise { + const clocks = await NbStore.getPeerRemoteClocks({ + id, + peer, + }); + + return clocks.map(c => ({ + docId: c.docId, + timestamp: new Date(c.timestamp), + })); + }, + getPeerRemoteClock: async function ( + id: string, + peer: string, + docId: string + ): Promise { + const clock = await NbStore.getPeerRemoteClock({ + id, + peer, + docId, + }); + return { + docId: clock.docId, + timestamp: new Date(clock.timestamp), + }; + }, + setPeerRemoteClock: async function ( + id: string, + peer: string, + docId: string, + clock: Date + ): Promise { + await NbStore.setPeerRemoteClock({ + id, + peer, + docId, + clock: clock.getTime(), + }); + }, + getPeerPulledRemoteClocks: async function ( + id: string, + peer: string + ): Promise { + const clocks = await NbStore.getPeerPulledRemoteClocks({ + id, + peer, + }); + return clocks.map(c => ({ + docId: c.docId, + timestamp: new Date(c.timestamp), + })); + }, + getPeerPulledRemoteClock: async function ( + id: string, + peer: string, + docId: string + ): Promise { + const clock = await NbStore.getPeerPulledRemoteClock({ + id, + peer, + docId, + }); + return { + docId: clock.docId, + timestamp: new Date(clock.timestamp), + }; + }, + setPeerPulledRemoteClock: async function ( + id: string, + peer: string, + docId: string, + clock: Date + ): Promise { + await NbStore.setPeerPulledRemoteClock({ + id, + peer, + docId, + clock: clock.getTime(), + }); + }, + getPeerPushedClocks: async function ( + id: string, + peer: string + ): Promise { + const clocks = await NbStore.getPeerPushedClocks({ + id, + peer, + }); + return clocks.map(c => ({ + docId: c.docId, + timestamp: new Date(c.timestamp), + })); + }, + getPeerPushedClock: async function ( + id: string, + peer: string, + docId: string + ): Promise { + const clock = await NbStore.getPeerPushedClock({ + id, + peer, + docId, + }); + return { + docId: clock.docId, + timestamp: new Date(clock.timestamp), + }; + }, + setPeerPushedClock: async function ( + id: string, + peer: string, + docId: string, + clock: Date + ): Promise { + await NbStore.setPeerPushedClock({ + id, + peer, + docId, + clock: clock.getTime(), + }); + }, + clearClocks: async function (id: string): Promise { + await NbStore.clearClocks({ + id, + }); + }, +}; diff --git a/packages/frontend/apps/ios/src/plugins/nbstore/plugin.ts b/packages/frontend/apps/ios/src/plugins/nbstore/plugin.ts deleted file mode 100644 index ae9ef3e8387a7..0000000000000 --- a/packages/frontend/apps/ios/src/plugins/nbstore/plugin.ts +++ /dev/null @@ -1,312 +0,0 @@ -import { - base64ToUint8Array, - uint8ArrayToBase64, -} from '@affine/core/modules/workspace-engine'; -import { - type Blob, - type DocClock, - type DocRecord, - type DocStorage, - type DocUpdate, - type ListedBlob, -} from '@affine/native'; -import { registerPlugin } from '@capacitor/core'; - -import type { NbStorePlugin } from './definitions'; - -export const NbStoreDocStorage = - registerPlugin('NbStoreDocStorage'); - -export interface SetBlob { - key: string; - data: Uint8Array; - mime: string; -} - -export class NativeDocStorage implements DocStorage { - constructor(private readonly universalId: string) {} - - /** Initialize the database and run migrations. */ - connect(): Promise { - return NbStoreDocStorage.connect({ - id: this.universalId, - }); - } - - close(): Promise { - return NbStoreDocStorage.close({ - id: this.universalId, - }); - } - - get isClosed(): Promise { - return NbStoreDocStorage.isClosed({ - id: this.universalId, - }).then(result => result.isClosed); - } - /** - * Flush the WAL file to the database file. - * See https://www.sqlite.org/pragma.html#pragma_wal_checkpoint:~:text=PRAGMA%20schema.wal_checkpoint%3B - */ - checkpoint(): Promise { - return NbStoreDocStorage.checkpoint({ - id: this.universalId, - }); - } - - validate(): Promise { - return NbStoreDocStorage.validate({ - id: this.universalId, - }).then(result => result.isValidate); - } - - setSpaceId(spaceId: string): Promise { - return NbStoreDocStorage.setSpaceId({ - id: this.universalId, - spaceId, - }); - } - - async pushUpdate(docId: string, update: Uint8Array): Promise { - return NbStoreDocStorage.pushUpdate({ - id: this.universalId, - docId, - data: await uint8ArrayToBase64(update), - }).then(result => new Date(result.timestamp)); - } - - getDocSnapshot(docId: string): Promise { - return NbStoreDocStorage.getDocSnapshot({ - id: this.universalId, - docId, - }).then(result => { - if (result) { - return { - ...result, - data: base64ToUint8Array(result.data), - timestamp: new Date(result.timestamp), - }; - } - return null; - }); - } - - async setDocSnapshot(snapshot: DocRecord): Promise { - return NbStoreDocStorage.setDocSnapshot({ - id: this.universalId, - docId: snapshot.docId, - data: await uint8ArrayToBase64(snapshot.data), - }).then(result => result.success); - } - - getDocUpdates(docId: string): Promise> { - return NbStoreDocStorage.getDocUpdates({ - id: this.universalId, - docId, - }).then(result => - result.map(update => ({ - ...update, - data: base64ToUint8Array(update.data), - createdAt: new Date(update.createdAt), - })) - ); - } - - markUpdatesMerged(docId: string, updates: Array): Promise { - return NbStoreDocStorage.markUpdatesMerged({ - id: this.universalId, - docId, - timestamps: updates.map(date => date.getTime()), - }).then(result => result.count); - } - - deleteDoc(docId: string): Promise { - return NbStoreDocStorage.deleteDoc({ - id: this.universalId, - docId, - }); - } - - getDocClocks(after: Date): Promise> { - return NbStoreDocStorage.getDocClocks({ - id: this.universalId, - after: after.getTime(), - }).then(result => - result.map(clock => ({ - ...clock, - timestamp: new Date(clock.timestamp), - })) - ); - } - - getDocClock(docId: string): Promise { - return NbStoreDocStorage.getDocClock({ - id: this.universalId, - docId, - }).then(result => { - if (result) { - return { - ...result, - timestamp: new Date(result.timestamp), - }; - } - return null; - }); - } - - getBlob(key: string): Promise { - return NbStoreDocStorage.getBlob({ - id: this.universalId, - key, - }).then(result => { - if (result) { - return { - ...result, - data: base64ToUint8Array(result.data), - createdAt: new Date(result.createdAt), - }; - } - return null; - }); - } - - async setBlob(blob: SetBlob): Promise { - return NbStoreDocStorage.setBlob({ - id: this.universalId, - key: blob.key, - data: await uint8ArrayToBase64(blob.data), - mime: blob.mime, - }); - } - - deleteBlob(key: string, permanently: boolean): Promise { - return NbStoreDocStorage.deleteBlob({ - id: this.universalId, - key, - permanently, - }); - } - - releaseBlobs(): Promise { - return NbStoreDocStorage.releaseBlobs({ - id: this.universalId, - }); - } - - async listBlobs(): Promise> { - return ( - await NbStoreDocStorage.listBlobs({ - id: this.universalId, - }) - ).map(blob => ({ - ...blob, - createdAt: new Date(blob.createdAt), - })); - } - - getPeerRemoteClocks(peer: string): Promise> { - return NbStoreDocStorage.getPeerRemoteClocks({ - id: this.universalId, - peer, - }).then(result => - result.map(clock => ({ - ...clock, - timestamp: new Date(clock.timestamp), - })) - ); - } - - getPeerRemoteClock(peer: string, docId: string): Promise { - return NbStoreDocStorage.getPeerRemoteClock({ - id: this.universalId, - peer, - docId, - }).then(result => ({ - ...result, - timestamp: new Date(result.timestamp), - })); - } - - setPeerRemoteClock(peer: string, docId: string, clock: Date): Promise { - return NbStoreDocStorage.setPeerRemoteClock({ - id: this.universalId, - peer, - docId, - clock: clock.getTime(), - }); - } - - getPeerPulledRemoteClocks(peer: string): Promise> { - return NbStoreDocStorage.getPeerPulledRemoteClocks({ - id: this.universalId, - peer, - }).then(result => - result.map(clock => ({ - ...clock, - timestamp: new Date(clock.timestamp), - })) - ); - } - - getPeerPulledRemoteClock(peer: string, docId: string): Promise { - return NbStoreDocStorage.getPeerPulledRemoteClock({ - id: this.universalId, - peer, - docId, - }).then(result => ({ - ...result, - timestamp: new Date(result.timestamp), - })); - } - - setPeerPulledRemoteClock( - peer: string, - docId: string, - clock: Date - ): Promise { - return NbStoreDocStorage.setPeerPulledRemoteClock({ - id: this.universalId, - peer, - docId, - clock: clock.getTime(), - }); - } - - getPeerPushedClocks(peer: string): Promise> { - return NbStoreDocStorage.getPeerPushedClocks({ - id: this.universalId, - peer, - }).then(result => - result.map(clock => ({ - ...clock, - timestamp: new Date(clock.timestamp), - })) - ); - } - - getPeerPushedClock(peer: string, docId: string): Promise { - return NbStoreDocStorage.getPeerPushedClock({ - id: this.universalId, - peer, - docId, - }).then(result => ({ - ...result, - timestamp: new Date(result.timestamp), - })); - } - - setPeerPushedClock(peer: string, docId: string, clock: Date): Promise { - return NbStoreDocStorage.setPeerPushedClock({ - id: this.universalId, - peer, - docId, - clock: clock.getTime(), - }); - } - - clearClocks(): Promise { - return NbStoreDocStorage.clearClocks({ - id: this.universalId, - }); - } -} diff --git a/packages/frontend/apps/ios/src/plugins/nbstore/storage.ts b/packages/frontend/apps/ios/src/plugins/nbstore/storage.ts deleted file mode 100644 index 5685743160f61..0000000000000 --- a/packages/frontend/apps/ios/src/plugins/nbstore/storage.ts +++ /dev/null @@ -1,83 +0,0 @@ -import { parseUniversalId, SpaceStorage } from '@affine/nbstore'; -import { applyUpdate, Doc as YDoc } from 'yjs'; - -import { SqliteBlobStorage } from './blob'; -import { NativeDBConnection } from './db'; -import { SqliteDocStorage } from './doc'; -import { SqliteSyncStorage } from './sync'; - -export class SqliteSpaceStorage extends SpaceStorage { - get connection() { - const docStore = this.get('doc'); - - if (!docStore) { - throw new Error('doc store not found'); - } - - const connection = docStore.connection; - - if (!(connection instanceof NativeDBConnection)) { - throw new Error('doc store connection is not a Sqlite connection'); - } - - return connection; - } - - async getDBPath() { - return this.connection.getDBPath(); - } - - async getWorkspaceName() { - const docStore = this.tryGet('doc'); - - if (!docStore) { - return null; - } - - const doc = await docStore.getDoc(docStore.spaceId); - if (!doc) { - return null; - } - - const ydoc = new YDoc(); - applyUpdate(ydoc, doc.bin); - return ydoc.getMap('meta').get('name') as string; - } - - async checkpoint() { - await this.connection.inner.checkpoint(); - } -} - -const STORE_CACHE = new Map(); - -export function getStorage(universalId: string) { - return STORE_CACHE.get(universalId); -} - -export async function ensureStorage(universalId: string) { - const { peer, type, id } = parseUniversalId(universalId); - let store = STORE_CACHE.get(universalId); - - if (!store) { - const opts = { - peer, - type, - id, - }; - - store = new SqliteSpaceStorage([ - new SqliteDocStorage(opts), - new SqliteBlobStorage(opts), - new SqliteSyncStorage(opts), - ]); - - store.connect(); - - await store.waitForConnected(); - - STORE_CACHE.set(universalId, store); - } - - return store; -} diff --git a/packages/frontend/apps/ios/src/plugins/nbstore/sync.ts b/packages/frontend/apps/ios/src/plugins/nbstore/sync.ts deleted file mode 100644 index 2942371b59be2..0000000000000 --- a/packages/frontend/apps/ios/src/plugins/nbstore/sync.ts +++ /dev/null @@ -1,70 +0,0 @@ -import { - BasicSyncStorage, - type DocClock, - type DocClocks, - share, -} from '@affine/nbstore'; - -import { NativeDBConnection } from './db'; - -export class SqliteSyncStorage extends BasicSyncStorage { - override connection = share( - new NativeDBConnection(this.peer, this.spaceType, this.spaceId) - ); - - get db() { - return this.connection.inner; - } - - override async getPeerRemoteClocks(peer: string) { - const records = await this.db.getPeerRemoteClocks(peer); - return records.reduce((clocks, { docId, timestamp }) => { - clocks[docId] = timestamp; - return clocks; - }, {} as DocClocks); - } - - override async getPeerRemoteClock(peer: string, docId: string) { - return this.db.getPeerRemoteClock(peer, docId); - } - - override async setPeerRemoteClock(peer: string, clock: DocClock) { - await this.db.setPeerRemoteClock(peer, clock.docId, clock.timestamp); - } - - override async getPeerPulledRemoteClock(peer: string, docId: string) { - return this.db.getPeerPulledRemoteClock(peer, docId); - } - - override async getPeerPulledRemoteClocks(peer: string) { - const records = await this.db.getPeerPulledRemoteClocks(peer); - return records.reduce((clocks, { docId, timestamp }) => { - clocks[docId] = timestamp; - return clocks; - }, {} as DocClocks); - } - - override async setPeerPulledRemoteClock(peer: string, clock: DocClock) { - await this.db.setPeerPulledRemoteClock(peer, clock.docId, clock.timestamp); - } - - override async getPeerPushedClocks(peer: string) { - const records = await this.db.getPeerPushedClocks(peer); - return records.reduce((clocks, { docId, timestamp }) => { - clocks[docId] = timestamp; - return clocks; - }, {} as DocClocks); - } - - override async getPeerPushedClock(peer: string, docId: string) { - return this.db.getPeerPushedClock(peer, docId); - } - - override async setPeerPushedClock(peer: string, clock: DocClock) { - await this.db.setPeerPushedClock(peer, clock.docId, clock.timestamp); - } - - override async clearClocks() { - await this.db.clearClocks(); - } -} diff --git a/packages/frontend/mobile-native/Cargo.toml b/packages/frontend/mobile-native/Cargo.toml index 18ea605cd5c3f..eeed92cb1b334 100644 --- a/packages/frontend/mobile-native/Cargo.toml +++ b/packages/frontend/mobile-native/Cargo.toml @@ -20,7 +20,7 @@ chrono = { workspace = true } dashmap = { workspace = true } sqlx = { workspace = true } thiserror = { workspace = true } -uniffi = { workspace = true, features = ["cli"] } +uniffi = { workspace = true, features = ["cli", "tokio"] } [target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies] objc2 = { workspace = true } diff --git a/packages/frontend/mobile-native/src/error.rs b/packages/frontend/mobile-native/src/error.rs deleted file mode 100644 index 2974955754663..0000000000000 --- a/packages/frontend/mobile-native/src/error.rs +++ /dev/null @@ -1,29 +0,0 @@ -use thiserror::Error; - -#[derive(uniffi::Error, Error, Debug)] -pub enum UniffiError { - #[error("Get user document directory failed")] - GetUserDocumentDirectoryFailed, - #[error("Create affine dir failed: {0}")] - CreateAffineDirFailed(String), - #[error("Empty doc storage path")] - EmptyDocStoragePath, - #[error("Empty space id")] - EmptySpaceId, - #[error("Sqlx error: {0}")] - SqlxError(String), - #[error("Base64 decoding error: {0}")] - Base64DecodingError(String), - #[error("Invalid universal storage id: {0}. It should be in format of @peer($peer);@type($type);@id($id);")] - InvalidUniversalId(String), - #[error("Invalid space type: {0}")] - InvalidSpaceType(String), - #[error("Concat space dir failed: {0}")] - ConcatSpaceDirFailed(String), -} - -impl From for UniffiError { - fn from(err: sqlx::Error) -> Self { - UniffiError::SqlxError(err.to_string()) - } -} diff --git a/packages/frontend/mobile-native/src/lib.rs b/packages/frontend/mobile-native/src/lib.rs index 1f4b77bde08ae..59163f5b63460 100644 --- a/packages/frontend/mobile-native/src/lib.rs +++ b/packages/frontend/mobile-native/src/lib.rs @@ -1,15 +1,23 @@ -use std::fmt::Display; -use std::str::FromStr; -use std::time::SystemTime; - use affine_common::hashcash::Stamp; -use affine_nbstore::storage; -use dashmap::{mapref::one::RefMut, DashMap, Entry}; +use affine_nbstore::pool::SqliteDocStoragePool; + +#[derive(uniffi::Error, thiserror::Error, Debug)] +pub enum UniffiError { + #[error("Error: {0}")] + Err(String), + #[error("Base64 decoding error: {0}")] + Base64DecodingError(String), + #[error("Timestamp decoding error")] + TimestampDecodingError, +} -use crate::error::UniffiError; +impl From for UniffiError { + fn from(err: affine_nbstore::error::Error) -> Self { + Self::Err(err.to_string()) + } +} -mod error; -mod utils; +type Result = std::result::Result; uniffi::setup_scaffolding!("affine_mobile_native"); @@ -22,16 +30,16 @@ pub fn hashcash_mint(resource: String, bits: u32) -> String { pub struct DocRecord { pub doc_id: String, // base64 encoded data - pub data: String, - pub timestamp: SystemTime, + pub bin: String, + pub timestamp: i64, } impl From for DocRecord { fn from(record: affine_nbstore::DocRecord) -> Self { Self { doc_id: record.doc_id, - data: base64_simd::STANDARD.encode_to_string(&record.data), - timestamp: record.timestamp.and_utc().into(), + bin: base64_simd::STANDARD.encode_to_string(&record.bin), + timestamp: record.timestamp.and_utc().timestamp_millis(), } } } @@ -39,13 +47,15 @@ impl From for DocRecord { impl TryFrom for affine_nbstore::DocRecord { type Error = UniffiError; - fn try_from(record: DocRecord) -> Result { + fn try_from(record: DocRecord) -> Result { Ok(Self { doc_id: record.doc_id, - data: base64_simd::STANDARD - .decode_to_vec(record.data) + bin: base64_simd::STANDARD + .decode_to_vec(record.bin) .map_err(|e| UniffiError::Base64DecodingError(e.to_string()))?, - timestamp: chrono::DateTime::::from(record.timestamp).naive_utc(), + timestamp: chrono::DateTime::::from_timestamp_millis(record.timestamp) + .ok_or(UniffiError::TimestampDecodingError)? + .naive_utc(), }) } } @@ -53,52 +63,60 @@ impl TryFrom for affine_nbstore::DocRecord { #[derive(uniffi::Record)] pub struct DocUpdate { pub doc_id: String, - pub created_at: SystemTime, + pub timestamp: i64, // base64 encoded data - pub data: String, + pub bin: String, } impl From for DocUpdate { fn from(update: affine_nbstore::DocUpdate) -> Self { Self { doc_id: update.doc_id, - created_at: update.created_at.and_utc().into(), - data: base64_simd::STANDARD.encode_to_string(&update.data), + timestamp: update.timestamp.and_utc().timestamp_millis(), + bin: base64_simd::STANDARD.encode_to_string(&update.bin), } } } -impl From for affine_nbstore::DocUpdate { - fn from(update: DocUpdate) -> Self { - Self { +impl TryFrom for affine_nbstore::DocUpdate { + type Error = UniffiError; + + fn try_from(update: DocUpdate) -> Result { + Ok(Self { doc_id: update.doc_id, - created_at: chrono::DateTime::::from(update.created_at).naive_utc(), - data: update.data.into(), - } + timestamp: chrono::DateTime::::from_timestamp_millis(update.timestamp) + .ok_or(UniffiError::TimestampDecodingError)? + .naive_utc(), + bin: update.bin.into(), + }) } } #[derive(uniffi::Record)] pub struct DocClock { pub doc_id: String, - pub timestamp: SystemTime, + pub timestamp: i64, } impl From for DocClock { fn from(clock: affine_nbstore::DocClock) -> Self { Self { doc_id: clock.doc_id, - timestamp: clock.timestamp.and_utc().into(), + timestamp: clock.timestamp.and_utc().timestamp_millis(), } } } -impl From for affine_nbstore::DocClock { - fn from(clock: DocClock) -> Self { - Self { +impl TryFrom for affine_nbstore::DocClock { + type Error = UniffiError; + + fn try_from(clock: DocClock) -> Result { + Ok(Self { doc_id: clock.doc_id, - timestamp: chrono::DateTime::::from(clock.timestamp).naive_utc(), - } + timestamp: chrono::DateTime::::from_timestamp_millis(clock.timestamp) + .ok_or(UniffiError::TimestampDecodingError)? + .naive_utc(), + }) } } @@ -109,7 +127,7 @@ pub struct Blob { pub data: String, pub mime: String, pub size: i64, - pub created_at: SystemTime, + pub created_at: i64, } impl From for Blob { @@ -119,7 +137,7 @@ impl From for Blob { data: base64_simd::STANDARD.encode_to_string(&blob.data), mime: blob.mime, size: blob.size, - created_at: blob.created_at.and_utc().into(), + created_at: blob.created_at.and_utc().timestamp_millis(), } } } @@ -135,7 +153,7 @@ pub struct SetBlob { impl TryFrom for affine_nbstore::SetBlob { type Error = UniffiError; - fn try_from(blob: SetBlob) -> Result { + fn try_from(blob: SetBlob) -> Result { Ok(Self { key: blob.key, data: base64_simd::STANDARD @@ -151,7 +169,7 @@ pub struct ListedBlob { pub key: String, pub size: i64, pub mime: String, - pub created_at: SystemTime, + pub created_at: i64, } impl From for ListedBlob { @@ -160,76 +178,43 @@ impl From for ListedBlob { key: blob.key, size: blob.size, mime: blob.mime, - created_at: blob.created_at.and_utc().into(), + created_at: blob.created_at.and_utc().timestamp_millis(), } } } #[derive(uniffi::Object)] pub struct DocStoragePool { - inner: DashMap, + inner: SqliteDocStoragePool, } -impl DocStoragePool { - fn ensure_storage<'a>( - &'a self, - universal_id: &str, - ) -> Result, UniffiError> { - let entry = self.inner.entry(universal_id.to_string()); - - if let Entry::Occupied(storage) = entry { - return Ok(storage.into_ref()); - } - let options = parse_universal_id(entry.key())?; - let db_path = utils::get_db_path(&options)?; - if db_path.is_empty() { - return Err(UniffiError::EmptyDocStoragePath); - } - let storage = storage::SqliteDocStorage::new(db_path); - Ok(entry.or_insert(storage)) +#[uniffi::export] +pub fn new_doc_storage_pool() -> DocStoragePool { + DocStoragePool { + inner: Default::default(), } } -#[uniffi::export] +#[uniffi::export(async_runtime = "tokio")] impl DocStoragePool { /// Initialize the database and run migrations. - pub async fn connect(&self, universal_id: String) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.connect().await?) + pub async fn connect(&self, universal_id: String, path: String) -> Result<()> { + Ok(self.inner.connect(universal_id, path).await?) } - pub async fn close(&self, universal_id: String) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - storage.close().await; - self.inner.remove(&universal_id); + pub async fn disconnect(&self, universal_id: String) -> Result<()> { + self.inner.disconnect(universal_id).await?; Ok(()) } - pub fn is_closed(&self, universal_id: String) -> bool { - let storage = self.ensure_storage(&universal_id).unwrap(); - storage.is_closed() - } - - pub async fn checkpoint(&self, universal_id: String) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.checkpoint().await?) - } - - pub async fn validate(&self, universal_id: String) -> Result { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.validate().await?) - } - - pub async fn set_space_id( - &self, - universal_id: String, - space_id: String, - ) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - if space_id.is_empty() { - return Err(UniffiError::EmptySpaceId); - } - Ok(storage.set_space_id(space_id).await?) + pub async fn set_space_id(&self, universal_id: String, space_id: String) -> Result<()> { + Ok( + self + .inner + .ensure_storage(universal_id)? + .set_space_id(space_id) + .await?, + ) } pub async fn push_update( @@ -237,10 +222,11 @@ impl DocStoragePool { universal_id: String, doc_id: String, update: String, - ) -> Result { - let storage = self.ensure_storage(&universal_id)?; + ) -> Result { Ok( - storage + self + .inner + .ensure_storage(universal_id)? .push_update( doc_id, base64_simd::STANDARD @@ -249,7 +235,7 @@ impl DocStoragePool { ) .await? .and_utc() - .into(), + .timestamp_millis(), ) } @@ -257,28 +243,36 @@ impl DocStoragePool { &self, universal_id: String, doc_id: String, - ) -> Result, UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.get_doc_snapshot(doc_id).await?.map(Into::into)) + ) -> Result> { + Ok( + self + .inner + .ensure_storage(universal_id)? + .get_doc_snapshot(doc_id) + .await? + .map(Into::into), + ) } - pub async fn set_doc_snapshot( - &self, - universal_id: String, - snapshot: DocRecord, - ) -> Result { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.set_doc_snapshot(snapshot.try_into()?).await?) + pub async fn set_doc_snapshot(&self, universal_id: String, snapshot: DocRecord) -> Result { + Ok( + self + .inner + .ensure_storage(universal_id)? + .set_doc_snapshot(snapshot.try_into()?) + .await?, + ) } pub async fn get_doc_updates( &self, universal_id: String, doc_id: String, - ) -> Result, UniffiError> { - let storage = self.ensure_storage(&universal_id)?; + ) -> Result> { Ok( - storage + self + .inner + .ensure_storage(universal_id)? .get_doc_updates(doc_id) .await? .into_iter() @@ -291,36 +285,55 @@ impl DocStoragePool { &self, universal_id: String, doc_id: String, - updates: Vec, - ) -> Result { - let storage = self.ensure_storage(&universal_id)?; + updates: Vec, + ) -> Result { Ok( - storage + self + .inner + .ensure_storage(universal_id)? .mark_updates_merged( doc_id, updates .into_iter() - .map(|t| chrono::DateTime::::from(t).naive_utc()) - .collect(), + .map(|t| { + chrono::DateTime::::from_timestamp_millis(t) + .ok_or(UniffiError::TimestampDecodingError) + .map(|t| t.naive_utc()) + }) + .collect::>>()?, ) .await?, ) } - pub async fn delete_doc(&self, universal_id: String, doc_id: String) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.delete_doc(doc_id).await?) + pub async fn delete_doc(&self, universal_id: String, doc_id: String) -> Result<()> { + Ok( + self + .inner + .ensure_storage(universal_id)? + .delete_doc(doc_id) + .await?, + ) } pub async fn get_doc_clocks( &self, universal_id: String, - after: Option, - ) -> Result, UniffiError> { - let storage = self.ensure_storage(&universal_id)?; + after: Option, + ) -> Result> { Ok( - storage - .get_doc_clocks(after.map(|t| chrono::DateTime::::from(t).naive_utc())) + self + .inner + .ensure_storage(universal_id)? + .get_doc_clocks( + after + .map(|t| { + chrono::DateTime::::from_timestamp_millis(t) + .ok_or(UniffiError::TimestampDecodingError) + .map(|t| t.naive_utc()) + }) + .transpose()?, + ) .await? .into_iter() .map(Into::into) @@ -332,23 +345,36 @@ impl DocStoragePool { &self, universal_id: String, doc_id: String, - ) -> Result, UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.get_doc_clock(doc_id).await?.map(Into::into)) + ) -> Result> { + Ok( + self + .inner + .ensure_storage(universal_id)? + .get_doc_clock(doc_id) + .await? + .map(Into::into), + ) } - pub async fn get_blob( - &self, - universal_id: String, - key: String, - ) -> Result, UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.get_blob(key).await?.map(Into::into)) + pub async fn get_blob(&self, universal_id: String, key: String) -> Result> { + Ok( + self + .inner + .ensure_storage(universal_id)? + .get_blob(key) + .await? + .map(Into::into), + ) } - pub async fn set_blob(&self, universal_id: String, blob: SetBlob) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.set_blob(blob.try_into()?).await?) + pub async fn set_blob(&self, universal_id: String, blob: SetBlob) -> Result<()> { + Ok( + self + .inner + .ensure_storage(universal_id)? + .set_blob(blob.try_into()?) + .await?, + ) } pub async fn delete_blob( @@ -356,20 +382,31 @@ impl DocStoragePool { universal_id: String, key: String, permanently: bool, - ) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.delete_blob(key, permanently).await?) + ) -> Result<()> { + Ok( + self + .inner + .ensure_storage(universal_id)? + .delete_blob(key, permanently) + .await?, + ) } - pub async fn release_blobs(&self, universal_id: String) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.release_blobs().await?) + pub async fn release_blobs(&self, universal_id: String) -> Result<()> { + Ok( + self + .inner + .ensure_storage(universal_id)? + .release_blobs() + .await?, + ) } - pub async fn list_blobs(&self, universal_id: String) -> Result, UniffiError> { - let storage = self.ensure_storage(&universal_id)?; + pub async fn list_blobs(&self, universal_id: String) -> Result> { Ok( - storage + self + .inner + .ensure_storage(universal_id)? .list_blobs() .await? .into_iter() @@ -382,10 +419,11 @@ impl DocStoragePool { &self, universal_id: String, peer: String, - ) -> Result, UniffiError> { - let storage = self.ensure_storage(&universal_id)?; + ) -> Result> { Ok( - storage + self + .inner + .ensure_storage(universal_id)? .get_peer_remote_clocks(peer) .await? .into_iter() @@ -399,9 +437,15 @@ impl DocStoragePool { universal_id: String, peer: String, doc_id: String, - ) -> Result { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.get_peer_remote_clock(peer, doc_id).await?.into()) + ) -> Result { + Ok( + self + .inner + .ensure_storage(universal_id)? + .get_peer_remote_clock(peer, doc_id) + .await? + .into(), + ) } pub async fn set_peer_remote_clock( @@ -409,15 +453,18 @@ impl DocStoragePool { universal_id: String, peer: String, doc_id: String, - clock: SystemTime, - ) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; + clock: i64, + ) -> Result<()> { Ok( - storage + self + .inner + .ensure_storage(universal_id)? .set_peer_remote_clock( peer, doc_id, - chrono::DateTime::::from(clock).naive_utc(), + chrono::DateTime::::from_timestamp_millis(clock) + .ok_or(UniffiError::TimestampDecodingError)? + .naive_utc(), ) .await?, ) @@ -427,10 +474,11 @@ impl DocStoragePool { &self, universal_id: String, peer: String, - ) -> Result, UniffiError> { - let storage = self.ensure_storage(&universal_id)?; + ) -> Result> { Ok( - storage + self + .inner + .ensure_storage(universal_id)? .get_peer_pulled_remote_clocks(peer) .await? .into_iter() @@ -444,10 +492,11 @@ impl DocStoragePool { universal_id: String, peer: String, doc_id: String, - ) -> Result { - let storage = self.ensure_storage(&universal_id)?; + ) -> Result { Ok( - storage + self + .inner + .ensure_storage(universal_id)? .get_peer_pulled_remote_clock(peer, doc_id) .await? .into(), @@ -459,15 +508,18 @@ impl DocStoragePool { universal_id: String, peer: String, doc_id: String, - clock: SystemTime, - ) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; + clock: i64, + ) -> Result<()> { Ok( - storage + self + .inner + .ensure_storage(universal_id)? .set_peer_pulled_remote_clock( peer, doc_id, - chrono::DateTime::::from(clock).naive_utc(), + chrono::DateTime::::from_timestamp_millis(clock) + .ok_or(UniffiError::TimestampDecodingError)? + .naive_utc(), ) .await?, ) @@ -477,10 +529,11 @@ impl DocStoragePool { &self, universal_id: String, peer: String, - ) -> Result, UniffiError> { - let storage = self.ensure_storage(&universal_id)?; + ) -> Result> { Ok( - storage + self + .inner + .ensure_storage(universal_id)? .get_peer_pushed_clocks(peer) .await? .into_iter() @@ -494,203 +547,30 @@ impl DocStoragePool { universal_id: String, peer: String, doc_id: String, - clock: SystemTime, - ) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; + clock: i64, + ) -> Result<()> { Ok( - storage + self + .inner + .ensure_storage(universal_id)? .set_peer_pushed_clock( peer, doc_id, - chrono::DateTime::::from(clock).naive_utc(), + chrono::DateTime::::from_timestamp_millis(clock) + .ok_or(UniffiError::TimestampDecodingError)? + .naive_utc(), ) .await?, ) } - pub async fn clear_clocks(&self, universal_id: String) -> Result<(), UniffiError> { - let storage = self.ensure_storage(&universal_id)?; - Ok(storage.clear_clocks().await?) - } -} - -#[uniffi::export] -pub fn get_db_path(peer: String, space_type: String, id: String) -> Result { - let options = StorageOptions { - peer, - space_type: SpaceType::from_str(&space_type)?, - id, - }; - utils::get_db_path(&options) -} - -#[derive(Debug, PartialEq, Eq, Clone, Copy, Default)] -pub enum SpaceType { - #[default] - Userspace, - Workspace, -} - -impl Display for SpaceType { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - SpaceType::Userspace => write!(f, "userspace"), - SpaceType::Workspace => write!(f, "workspace"), - } - } -} - -impl FromStr for SpaceType { - type Err = UniffiError; - - fn from_str(s: &str) -> Result { - Ok(match s { - "userspace" => Self::Userspace, - "workspace" => Self::Workspace, - _ => return Err(UniffiError::InvalidSpaceType(s.to_string())), - }) - } -} - -pub struct StorageOptions { - pub peer: String, - pub space_type: SpaceType, - pub id: String, -} - -pub fn parse_universal_id(id: &str) -> Result { - let mut result = StorageOptions { - peer: String::new(), - space_type: SpaceType::default(), - id: String::new(), - }; - - let mut key = String::new(); - let mut value = String::new(); - let mut is_in_value = false; - let mut chars = id.chars().peekable(); - - while let Some(ch) = chars.next() { - if is_in_value { - if ch == ')' && chars.peek() == Some(&';') { - // Store the collected value in the appropriate field - match key.as_str() { - "peer" => result.peer = value.clone(), - "type" => result.space_type = SpaceType::from_str(&value)?, - "id" => result.id = value.clone(), - _ => return Err(UniffiError::InvalidUniversalId(id.to_string())), - } - key.clear(); - value.clear(); - is_in_value = false; - chars.next(); // Skip the semicolon - continue; - } - value.push(ch); - continue; - } - - if ch == '@' { - // Find the position of next '(' - let mut temp_chars = chars.clone(); - let mut found_paren = false; - let mut key_chars = Vec::new(); - - while let Some(next_ch) = temp_chars.next() { - if next_ch == '(' { - found_paren = true; - break; - } - key_chars.push(next_ch); - } - - // Invalid format if no '(' found or it's immediately after '@' - if !found_paren || key_chars.is_empty() { - return Err(UniffiError::InvalidUniversalId(id.to_string())); - } - - key = key_chars.into_iter().collect(); - // Advance the original iterator to the position after the key - for _ in 0..key.len() + 1 { - chars.next(); - } - is_in_value = true; - } else { - return Err(UniffiError::InvalidUniversalId(id.to_string())); - } - } - - // Validate the parsed results - if result.peer.is_empty() || result.id.is_empty() { - return Err(UniffiError::InvalidUniversalId(id.to_string())); - } - - Ok(result) -} - -#[cfg(test)] -mod tests { - use super::*; - - // ... existing test functions ... - - #[test] - fn test_universal_id() { - let options = StorageOptions { - peer: "123".to_string(), - space_type: SpaceType::Workspace, - id: "456".to_string(), - }; - - let id = format!( - "@peer({});@type({});@id({});", - options.peer, options.space_type, options.id - ); - let result = parse_universal_id(&id).unwrap(); - - assert_eq!(result.peer, "123"); - assert_eq!(result.space_type, SpaceType::Workspace); - assert_eq!(result.id, "456"); - } - - #[test] - fn test_parse_universal_id_valid_cases() { - let testcases = vec![ - "@peer(123);@type(userspace);@id(456);", - "@peer(123);@type(workspace);@id(456);", - "@peer(https://app.affine.pro);@type(userspace);@id(hello:world);", - "@peer(@name);@type(userspace);@id(@id);", - "@peer(@peer(name);@type(userspace);@id(@id);", - ]; - - for id in testcases { - let result = parse_universal_id(id); - assert!(result.is_ok(), "Failed to parse: {}", id); - - let parsed = result.unwrap(); - assert!(!parsed.peer.is_empty()); - assert!(!parsed.id.is_empty()); - } - } - - #[test] - fn test_parse_universal_id_invalid_cases() { - let testcases = vec![ - // invalid space type - "@peer(123);@type(anyspace);@id(456);", - // invalid peer - "@peer(@peer(name););@type(userspace);@id(@id);", - ]; - - for id in testcases { - let result = parse_universal_id(id); - assert!(result.is_err(), "Should have failed to parse: {}", id); - - match result { - Err(UniffiError::InvalidUniversalId(_)) => (), - Err(UniffiError::InvalidSpaceType(_)) => (), - _ => panic!("Expected InvalidUniversalId error for: {}", id), - } - } + pub async fn clear_clocks(&self, universal_id: String) -> Result<()> { + Ok( + self + .inner + .ensure_storage(universal_id)? + .clear_clocks() + .await?, + ) } } diff --git a/packages/frontend/mobile-native/src/utils.rs b/packages/frontend/mobile-native/src/utils.rs deleted file mode 100644 index bfe9078139972..0000000000000 --- a/packages/frontend/mobile-native/src/utils.rs +++ /dev/null @@ -1,141 +0,0 @@ -use std::fs; - -#[cfg(not(any(target_os = "ios", target_os = "macos")))] -use homedir::my_home; -#[cfg(any(target_os = "ios", target_os = "macos"))] -use objc2::rc::autoreleasepool; -#[cfg(any(target_os = "ios", target_os = "macos"))] -use objc2_foundation::{NSFileManager, NSSearchPathDirectory, NSSearchPathDomainMask, NSString}; - -use crate::{error::UniffiError, SpaceType, StorageOptions}; - -const DB_FILE_NAME: &str = "storage.db"; - -#[cfg(any(target_os = "ios", target_os = "macos"))] -pub(crate) fn get_db_path(options: &StorageOptions) -> Result { - let file_manager = unsafe { NSFileManager::defaultManager() }; - // equivalent to Swift: - // ```swift - // guard let documentsPath = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else { - // return nil - // } - // ``` - let urls = unsafe { - file_manager.URLsForDirectory_inDomains( - NSSearchPathDirectory::NSDocumentDirectory, - NSSearchPathDomainMask::NSUserDomainMask, - ) - }; - let document_directory = urls - .first() - .ok_or(UniffiError::GetUserDocumentDirectoryFailed)?; - - let affine_dir = unsafe { - let spaces_dir = match options.space_type { - SpaceType::Userspace => "userspaces", - SpaceType::Workspace => "workspaces", - }; - let escaped_peer = escape_filename(&options.peer); - document_directory - .URLByAppendingPathComponent(&NSString::from_str(".affine")) - .and_then(|url| url.URLByAppendingPathComponent(&NSString::from_str(spaces_dir))) - .and_then(|url| url.URLByAppendingPathComponent(&NSString::from_str(&escaped_peer))) - .and_then(|url| url.URLByAppendingPathComponent(&NSString::from_str(&options.id))) - } - .ok_or(UniffiError::ConcatSpaceDirFailed(format!( - "{}:{}:{}", - options.peer, options.space_type, options.id - )))?; - let affine_dir_str = autoreleasepool(|pool| { - Ok::( - unsafe { affine_dir.path() } - .ok_or(UniffiError::GetUserDocumentDirectoryFailed)? - .as_str(pool) - .to_string(), - ) - })?; - - // Replicate Swift's appending ".affine" subdir, creating it if necessary - fs::create_dir_all(&affine_dir_str) - .map_err(|_| UniffiError::CreateAffineDirFailed(affine_dir_str.clone()))?; - - let db_path = autoreleasepool(|pool| { - let db_path = - unsafe { affine_dir.URLByAppendingPathComponent(&NSString::from_str(DB_FILE_NAME)) }.ok_or( - UniffiError::ConcatSpaceDirFailed(format!( - "{}:{}:{}/{DB_FILE_NAME}", - options.peer, options.space_type, options.id - )), - )?; - Ok::( - unsafe { db_path.path() } - .ok_or(UniffiError::GetUserDocumentDirectoryFailed)? - .as_str(pool) - .to_string(), - ) - })?; - - Ok(db_path) -} - -#[cfg(not(any(target_os = "ios", target_os = "macos")))] -pub(crate) fn get_db_path(options: &StorageOptions) -> Result { - let home_dir = my_home() - .map_err(|_| UniffiError::GetUserDocumentDirectoryFailed)? - .ok_or(UniffiError::GetUserDocumentDirectoryFailed)?; - let spaces_dir = match options.space_type { - SpaceType::Userspace => "userspaces", - SpaceType::Workspace => "workspaces", - }; - let escaped_peer = escape_filename(&options.peer); - let db_path = home_dir - .join(".affine") - .join(spaces_dir) - .join(&escaped_peer) - .join(&options.id); - fs::create_dir_all(&db_path) - .map_err(|_| UniffiError::CreateAffineDirFailed(db_path.to_string_lossy().to_string()))?; - db_path - .join(DB_FILE_NAME) - .to_str() - .map(|p| p.to_owned()) - .ok_or(UniffiError::GetUserDocumentDirectoryFailed) -} - -fn escape_filename(name: &str) -> String { - // First replace special chars with '_' - let with_underscores = name.replace(|c: char| "\\/!@#$%^&*()+~`\"':;,?<>|".contains(c), "_"); - - // Then collapse multiple '_' into single '_' - let mut result = String::with_capacity(with_underscores.len()); - let mut last_was_underscore = false; - - for c in with_underscores.chars() { - if c == '_' { - if !last_was_underscore { - result.push(c); - } - last_was_underscore = true; - } else { - result.push(c); - last_was_underscore = false; - } - } - - // Remove trailing underscore - result.trim_end_matches('_').to_string() -} - -#[cfg(all(test, any(target_os = "ios", target_os = "macos")))] -mod tests { - use super::*; - - #[test] - fn test_escape_filename() { - assert_eq!(escape_filename("hello@world"), "hello_world"); - assert_eq!(escape_filename("test!!file"), "test_file"); - assert_eq!(escape_filename("_test_"), "_test"); // Leading underscore preserved - assert_eq!(escape_filename("multi___under"), "multi_under"); - assert_eq!(escape_filename("path/to\\file"), "path_to_file"); - } -} diff --git a/packages/frontend/native/index.d.ts b/packages/frontend/native/index.d.ts index dc657ae023552..1b3b3904c1f99 100644 --- a/packages/frontend/native/index.d.ts +++ b/packages/frontend/native/index.d.ts @@ -1,41 +1,34 @@ /* auto-generated by NAPI-RS */ /* eslint-disable */ -export declare class DocStorage { - constructor(path: string) +export declare class DocStoragePool { + constructor() /** Initialize the database and run migrations. */ - connect(): Promise - close(): Promise - get isClosed(): Promise - /** - * Flush the WAL file to the database file. - * See https://www.sqlite.org/pragma.html#pragma_wal_checkpoint:~:text=PRAGMA%20schema.wal_checkpoint%3B - */ - checkpoint(): Promise - validate(): Promise - setSpaceId(spaceId: string): Promise - pushUpdate(docId: string, update: Uint8Array): Promise - getDocSnapshot(docId: string): Promise - setDocSnapshot(snapshot: DocRecord): Promise - getDocUpdates(docId: string): Promise> - markUpdatesMerged(docId: string, updates: Array): Promise - deleteDoc(docId: string): Promise - getDocClocks(after?: Date | undefined | null): Promise> - getDocClock(docId: string): Promise - getBlob(key: string): Promise - setBlob(blob: SetBlob): Promise - deleteBlob(key: string, permanently: boolean): Promise - releaseBlobs(): Promise - listBlobs(): Promise> - getPeerRemoteClocks(peer: string): Promise> - getPeerRemoteClock(peer: string, docId: string): Promise - setPeerRemoteClock(peer: string, docId: string, clock: Date): Promise - getPeerPulledRemoteClocks(peer: string): Promise> - getPeerPulledRemoteClock(peer: string, docId: string): Promise - setPeerPulledRemoteClock(peer: string, docId: string, clock: Date): Promise - getPeerPushedClocks(peer: string): Promise> - getPeerPushedClock(peer: string, docId: string): Promise - setPeerPushedClock(peer: string, docId: string, clock: Date): Promise - clearClocks(): Promise + connect(universalId: string, path: string): Promise + disconnect(universalId: string): Promise + setSpaceId(universalId: string, spaceId: string): Promise + pushUpdate(universalId: string, docId: string, update: Uint8Array): Promise + getDocSnapshot(universalId: string, docId: string): Promise + setDocSnapshot(universalId: string, snapshot: DocRecord): Promise + getDocUpdates(universalId: string, docId: string): Promise> + markUpdatesMerged(universalId: string, docId: string, updates: Array): Promise + deleteDoc(universalId: string, docId: string): Promise + getDocClocks(universalId: string, after?: Date | undefined | null): Promise> + getDocClock(universalId: string, docId: string): Promise + getBlob(universalId: string, key: string): Promise + setBlob(universalId: string, blob: SetBlob): Promise + deleteBlob(universalId: string, key: string, permanently: boolean): Promise + releaseBlobs(universalId: string): Promise + listBlobs(universalId: string): Promise> + getPeerRemoteClocks(universalId: string, peer: string): Promise> + getPeerRemoteClock(universalId: string, peer: string, docId: string): Promise + setPeerRemoteClock(universalId: string, peer: string, docId: string, clock: Date): Promise + getPeerPulledRemoteClocks(universalId: string, peer: string): Promise> + getPeerPulledRemoteClock(universalId: string, peer: string, docId: string): Promise + setPeerPulledRemoteClock(universalId: string, peer: string, docId: string, clock: Date): Promise + getPeerPushedClocks(universalId: string, peer: string): Promise> + getPeerPushedClock(universalId: string, peer: string, docId: string): Promise + setPeerPushedClock(universalId: string, peer: string, docId: string, clock: Date): Promise + clearClocks(universalId: string): Promise } export declare class SqliteConnection { @@ -96,14 +89,14 @@ export interface DocClock { export interface DocRecord { docId: string - data: Uint8Array + bin: Uint8Array timestamp: Date } export interface DocUpdate { docId: string - createdAt: Date - data: Uint8Array + timestamp: Date + bin: Uint8Array } export interface InsertRow { diff --git a/packages/frontend/native/index.js b/packages/frontend/native/index.js index 378709a9eeb23..90cedd559727c 100644 --- a/packages/frontend/native/index.js +++ b/packages/frontend/native/index.js @@ -364,7 +364,7 @@ if (!nativeBinding) { throw new Error(`Failed to load native binding`) } -module.exports.DocStorage = nativeBinding.DocStorage +module.exports.DocStoragePool = nativeBinding.DocStoragePool module.exports.SqliteConnection = nativeBinding.SqliteConnection module.exports.mintChallengeResponse = nativeBinding.mintChallengeResponse module.exports.ValidationResult = nativeBinding.ValidationResult diff --git a/packages/frontend/native/nbstore/Cargo.toml b/packages/frontend/native/nbstore/Cargo.toml index 757dcb3267f51..a3bc0e814693f 100644 --- a/packages/frontend/native/nbstore/Cargo.toml +++ b/packages/frontend/native/nbstore/Cargo.toml @@ -13,8 +13,10 @@ use-as-lib = ["napi-derive/noop", "napi/noop"] affine_schema = { path = "../schema" } anyhow = { workspace = true } chrono = { workspace = true } +dashmap = { workspace = true } napi = { workspace = true } napi-derive = { workspace = true } +thiserror = { workspace = true } sqlx = { workspace = true, default-features = false, features = ["chrono", "macros", "migrate", "runtime-tokio", "sqlite", "tls-rustls"] } tokio = { workspace = true, features = ["full"] } diff --git a/packages/frontend/native/nbstore/src/blob.rs b/packages/frontend/native/nbstore/src/blob.rs index 17996168ae96d..095d2ada3821f 100644 --- a/packages/frontend/native/nbstore/src/blob.rs +++ b/packages/frontend/native/nbstore/src/blob.rs @@ -1,18 +1,18 @@ use std::ops::Deref; -use super::{storage::SqliteDocStorage, Blob, ListedBlob, SetBlob}; - -type Result = std::result::Result; +use super::{error::Result, storage::SqliteDocStorage, Blob, ListedBlob, SetBlob}; impl SqliteDocStorage { pub async fn get_blob(&self, key: String) -> Result> { - sqlx::query_as!( + let result = sqlx::query_as!( Blob, "SELECT key, data, size, mime, created_at FROM blobs WHERE key = ? AND deleted_at IS NULL", key ) .fetch_optional(&self.pool) - .await + .await?; + + Ok(result) } pub async fn set_blob(&self, blob: SetBlob) -> Result<()> { @@ -58,12 +58,14 @@ impl SqliteDocStorage { } pub async fn list_blobs(&self) -> Result> { - sqlx::query_as!( + let result = sqlx::query_as!( ListedBlob, "SELECT key, size, mime, created_at FROM blobs WHERE deleted_at IS NULL ORDER BY created_at DESC;" ) .fetch_all(&self.pool) - .await + .await?; + + Ok(result) } } diff --git a/packages/frontend/native/nbstore/src/doc.rs b/packages/frontend/native/nbstore/src/doc.rs index 00706f1de1e8a..d9bfade80f027 100644 --- a/packages/frontend/native/nbstore/src/doc.rs +++ b/packages/frontend/native/nbstore/src/doc.rs @@ -3,8 +3,7 @@ use std::ops::Deref; use chrono::{DateTime, NaiveDateTime}; use sqlx::{QueryBuilder, Row}; -use super::storage::{Result, SqliteDocStorage}; -use super::{DocClock, DocRecord, DocUpdate}; +use super::{error::Result, storage::SqliteDocStorage, DocClock, DocRecord, DocUpdate}; struct Meta { space_id: String, @@ -81,7 +80,7 @@ impl SqliteDocStorage { Ok(()) => break, Err(e) => { if tried > 10 { - return Err(e); + return Err(e.into()); } // Increment timestamp by 1ms and retry @@ -126,13 +125,15 @@ impl SqliteDocStorage { } pub async fn get_doc_snapshot(&self, doc_id: String) -> Result> { - sqlx::query_as!( + let result = sqlx::query_as!( DocRecord, - "SELECT doc_id, data, updated_at as timestamp FROM snapshots WHERE doc_id = ?", + "SELECT doc_id, data as bin, updated_at as timestamp FROM snapshots WHERE doc_id = ?", doc_id ) .fetch_optional(&self.pool) - .await + .await?; + + Ok(result) } pub async fn set_doc_snapshot(&self, snapshot: DocRecord) -> Result { @@ -145,7 +146,7 @@ impl SqliteDocStorage { WHERE updated_at <= $3;"#, ) .bind(snapshot.doc_id) - .bind(snapshot.data.deref()) + .bind(snapshot.bin.deref()) .bind(snapshot.timestamp) .execute(&self.pool) .await?; @@ -154,13 +155,15 @@ impl SqliteDocStorage { } pub async fn get_doc_updates(&self, doc_id: String) -> Result> { - sqlx::query_as!( + let result = sqlx::query_as!( DocUpdate, - "SELECT doc_id, created_at, data FROM updates WHERE doc_id = ?", + "SELECT doc_id, created_at as timestamp, data as bin FROM updates WHERE doc_id = ?", doc_id ) .fetch_all(&self.pool) - .await + .await?; + + Ok(result) } pub async fn mark_updates_merged( @@ -204,7 +207,9 @@ impl SqliteDocStorage { .execute(&mut *tx) .await?; - tx.commit().await + tx.commit().await?; + + Ok(()) } pub async fn get_doc_clocks(&self, after: Option) -> Result> { @@ -228,13 +233,15 @@ impl SqliteDocStorage { } pub async fn get_doc_clock(&self, doc_id: String) -> Result> { - sqlx::query_as!( + let result = sqlx::query_as!( DocClock, "SELECT doc_id, timestamp FROM clocks WHERE doc_id = ?", doc_id ) .fetch_optional(&self.pool) - .await + .await?; + + Ok(result) } } @@ -286,7 +293,7 @@ mod tests { storage .set_doc_snapshot(DocRecord { doc_id: "test".to_string(), - data: vec![0, 0].into(), + bin: vec![0, 0].into(), timestamp: Utc::now().naive_utc(), }) .await @@ -350,7 +357,7 @@ mod tests { assert_eq!(result.len(), 4); assert_eq!( - result.iter().map(|u| u.data.as_ref()).collect::>(), + result.iter().map(|u| u.bin.as_ref()).collect::>(), updates ); } @@ -365,7 +372,7 @@ mod tests { let snapshot = DocRecord { doc_id: "test".to_string(), - data: vec![0, 0].into(), + bin: vec![0, 0].into(), timestamp: Utc::now().naive_utc(), }; @@ -374,7 +381,7 @@ mod tests { let result = storage.get_doc_snapshot("test".to_string()).await.unwrap(); assert!(result.is_some()); - assert_eq!(result.unwrap().data.as_ref(), vec![0, 0]); + assert_eq!(result.unwrap().bin.as_ref(), vec![0, 0]); } #[tokio::test] @@ -383,7 +390,7 @@ mod tests { let snapshot = DocRecord { doc_id: "test".to_string(), - data: vec![0, 0].into(), + bin: vec![0, 0].into(), timestamp: Utc::now().naive_utc(), }; @@ -392,11 +399,11 @@ mod tests { let result = storage.get_doc_snapshot("test".to_string()).await.unwrap(); assert!(result.is_some()); - assert_eq!(result.unwrap().data.as_ref(), vec![0, 0]); + assert_eq!(result.unwrap().bin.as_ref(), vec![0, 0]); let snapshot = DocRecord { doc_id: "test".to_string(), - data: vec![0, 1].into(), + bin: vec![0, 1].into(), timestamp: DateTime::from_timestamp_millis(Utc::now().timestamp_millis() - 1000) .unwrap() .naive_utc(), @@ -408,7 +415,7 @@ mod tests { let result = storage.get_doc_snapshot("test".to_string()).await.unwrap(); assert!(result.is_some()); - assert_eq!(result.unwrap().data.as_ref(), vec![0, 0]); + assert_eq!(result.unwrap().bin.as_ref(), vec![0, 0]); } #[tokio::test] @@ -468,7 +475,7 @@ mod tests { updates .iter() .skip(1) - .map(|u| u.created_at) + .map(|u| u.timestamp) .collect::>(), ) .await diff --git a/packages/frontend/native/nbstore/src/error.rs b/packages/frontend/native/nbstore/src/error.rs new file mode 100644 index 0000000000000..2e20a99da7f77 --- /dev/null +++ b/packages/frontend/native/nbstore/src/error.rs @@ -0,0 +1,11 @@ +pub type Result = std::result::Result; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("Sqlite Error: {0}")] + SqlxError(#[from] sqlx::Error), + #[error("Migrate Error: {0}")] + MigrateError(#[from] sqlx::migrate::MigrateError), + #[error("Invalid operation")] + InvalidOperation, +} diff --git a/packages/frontend/native/nbstore/src/lib.rs b/packages/frontend/native/nbstore/src/lib.rs index 7925363ff5a0e..b9d6b0df19636 100644 --- a/packages/frontend/native/nbstore/src/lib.rs +++ b/packages/frontend/native/nbstore/src/lib.rs @@ -1,11 +1,14 @@ pub mod blob; pub mod doc; +pub mod error; +pub mod pool; pub mod storage; pub mod sync; use chrono::NaiveDateTime; use napi::bindgen_prelude::*; use napi_derive::napi; +use pool::SqliteDocStoragePool; #[cfg(feature = "use-as-lib")] type Result = anyhow::Result; @@ -14,13 +17,10 @@ type Result = anyhow::Result; type Result = napi::Result; #[cfg(not(feature = "use-as-lib"))] -fn map_err(err: sqlx::Error) -> Error { - Error::from(anyhow::Error::from(err)) -} - -#[cfg(feature = "use-as-lib")] -fn map_err(err: sqlx::Error) -> anyhow::Error { - anyhow::Error::from(err) +impl From for napi::Error { + fn from(err: error::Error) -> Self { + napi::Error::new(napi::Status::GenericFailure, err.to_string()) + } } #[cfg(feature = "use-as-lib")] @@ -32,16 +32,16 @@ pub type Data = Uint8Array; #[napi(object)] pub struct DocUpdate { pub doc_id: String, - pub created_at: NaiveDateTime, + pub timestamp: NaiveDateTime, #[napi(ts_type = "Uint8Array")] - pub data: Data, + pub bin: Data, } #[napi(object)] pub struct DocRecord { pub doc_id: String, #[napi(ts_type = "Uint8Array")] - pub data: Data, + pub bin: Data, pub timestamp: NaiveDateTime, } @@ -79,243 +79,354 @@ pub struct ListedBlob { } #[napi] -pub struct DocStorage { - storage: storage::SqliteDocStorage, +pub struct DocStoragePool { + pool: SqliteDocStoragePool, } #[napi] -impl DocStorage { +impl DocStoragePool { #[napi(constructor, async_runtime)] - pub fn new(path: String) -> Result { + pub fn new() -> Result { Ok(Self { - storage: storage::SqliteDocStorage::new(path), + pool: SqliteDocStoragePool::default(), }) } #[napi] /// Initialize the database and run migrations. - pub async fn connect(&self) -> Result<()> { - self.storage.connect().await.map_err(map_err) - } - - #[napi] - pub async fn close(&self) -> Result<()> { - self.storage.close().await; - + pub async fn connect(&self, universal_id: String, path: String) -> Result<()> { + self.pool.connect(universal_id, path).await?; Ok(()) } - #[napi(getter)] - pub async fn is_closed(&self) -> Result { - Ok(self.storage.is_closed()) - } - - /** - * Flush the WAL file to the database file. - * See https://www.sqlite.org/pragma.html#pragma_wal_checkpoint:~:text=PRAGMA%20schema.wal_checkpoint%3B - */ - #[napi] - pub async fn checkpoint(&self) -> Result<()> { - self.storage.checkpoint().await.map_err(map_err) - } - #[napi] - pub async fn validate(&self) -> Result { - self.storage.validate().await.map_err(map_err) + pub async fn disconnect(&self, universal_id: String) -> Result<()> { + self.pool.disconnect(universal_id).await?; + Ok(()) } #[napi] - pub async fn set_space_id(&self, space_id: String) -> Result<()> { - self.storage.set_space_id(space_id).await.map_err(map_err) + pub async fn set_space_id(&self, universal_id: String, space_id: String) -> Result<()> { + self + .pool + .ensure_storage(universal_id)? + .set_space_id(space_id) + .await?; + Ok(()) } #[napi] - pub async fn push_update(&self, doc_id: String, update: Uint8Array) -> Result { - self - .storage - .push_update(doc_id, update) - .await - .map_err(map_err) + pub async fn push_update( + &self, + universal_id: String, + doc_id: String, + update: Uint8Array, + ) -> Result { + Ok( + self + .pool + .ensure_storage(universal_id)? + .push_update(doc_id, update) + .await?, + ) } #[napi] - pub async fn get_doc_snapshot(&self, doc_id: String) -> Result> { - self.storage.get_doc_snapshot(doc_id).await.map_err(map_err) + pub async fn get_doc_snapshot( + &self, + universal_id: String, + doc_id: String, + ) -> Result> { + Ok( + self + .pool + .ensure_storage(universal_id)? + .get_doc_snapshot(doc_id) + .await?, + ) } #[napi] - pub async fn set_doc_snapshot(&self, snapshot: DocRecord) -> Result { - self - .storage - .set_doc_snapshot(snapshot) - .await - .map_err(map_err) + pub async fn set_doc_snapshot(&self, universal_id: String, snapshot: DocRecord) -> Result { + Ok( + self + .pool + .ensure_storage(universal_id)? + .set_doc_snapshot(snapshot) + .await?, + ) } #[napi] - pub async fn get_doc_updates(&self, doc_id: String) -> Result> { - self.storage.get_doc_updates(doc_id).await.map_err(map_err) + pub async fn get_doc_updates( + &self, + universal_id: String, + doc_id: String, + ) -> Result> { + Ok( + self + .pool + .ensure_storage(universal_id)? + .get_doc_updates(doc_id) + .await?, + ) } #[napi] pub async fn mark_updates_merged( &self, + universal_id: String, doc_id: String, updates: Vec, ) -> Result { - self - .storage - .mark_updates_merged(doc_id, updates) - .await - .map_err(map_err) + Ok( + self + .pool + .ensure_storage(universal_id)? + .mark_updates_merged(doc_id, updates) + .await?, + ) } #[napi] - pub async fn delete_doc(&self, doc_id: String) -> Result<()> { - self.storage.delete_doc(doc_id).await.map_err(map_err) + pub async fn delete_doc(&self, universal_id: String, doc_id: String) -> Result<()> { + self + .pool + .ensure_storage(universal_id)? + .delete_doc(doc_id) + .await?; + Ok(()) } #[napi] - pub async fn get_doc_clocks(&self, after: Option) -> Result> { - self.storage.get_doc_clocks(after).await.map_err(map_err) + pub async fn get_doc_clocks( + &self, + universal_id: String, + after: Option, + ) -> Result> { + Ok( + self + .pool + .ensure_storage(universal_id)? + .get_doc_clocks(after) + .await?, + ) } #[napi] - pub async fn get_doc_clock(&self, doc_id: String) -> Result> { - self.storage.get_doc_clock(doc_id).await.map_err(map_err) + pub async fn get_doc_clock( + &self, + universal_id: String, + doc_id: String, + ) -> Result> { + Ok( + self + .pool + .ensure_storage(universal_id)? + .get_doc_clock(doc_id) + .await?, + ) } #[napi] - pub async fn get_blob(&self, key: String) -> Result> { - self.storage.get_blob(key).await.map_err(map_err) + pub async fn get_blob(&self, universal_id: String, key: String) -> Result> { + Ok( + self + .pool + .ensure_storage(universal_id)? + .get_blob(key) + .await?, + ) } #[napi] - pub async fn set_blob(&self, blob: SetBlob) -> Result<()> { - self.storage.set_blob(blob).await.map_err(map_err) + pub async fn set_blob(&self, universal_id: String, blob: SetBlob) -> Result<()> { + self + .pool + .ensure_storage(universal_id)? + .set_blob(blob) + .await?; + Ok(()) } #[napi] - pub async fn delete_blob(&self, key: String, permanently: bool) -> Result<()> { + pub async fn delete_blob( + &self, + universal_id: String, + key: String, + permanently: bool, + ) -> Result<()> { self - .storage + .pool + .ensure_storage(universal_id)? .delete_blob(key, permanently) - .await - .map_err(map_err) + .await?; + Ok(()) } #[napi] - pub async fn release_blobs(&self) -> Result<()> { - self.storage.release_blobs().await.map_err(map_err) + pub async fn release_blobs(&self, universal_id: String) -> Result<()> { + self + .pool + .ensure_storage(universal_id)? + .release_blobs() + .await?; + Ok(()) } #[napi] - pub async fn list_blobs(&self) -> Result> { - self.storage.list_blobs().await.map_err(map_err) + pub async fn list_blobs(&self, universal_id: String) -> Result> { + Ok(self.pool.ensure_storage(universal_id)?.list_blobs().await?) } #[napi] - pub async fn get_peer_remote_clocks(&self, peer: String) -> Result> { - self - .storage - .get_peer_remote_clocks(peer) - .await - .map_err(map_err) + pub async fn get_peer_remote_clocks( + &self, + universal_id: String, + peer: String, + ) -> Result> { + Ok( + self + .pool + .ensure_storage(universal_id)? + .get_peer_remote_clocks(peer) + .await?, + ) } #[napi] - pub async fn get_peer_remote_clock(&self, peer: String, doc_id: String) -> Result { - self - .storage - .get_peer_remote_clock(peer, doc_id) - .await - .map_err(map_err) + pub async fn get_peer_remote_clock( + &self, + universal_id: String, + peer: String, + doc_id: String, + ) -> Result { + Ok( + self + .pool + .ensure_storage(universal_id)? + .get_peer_remote_clock(peer, doc_id) + .await?, + ) } #[napi] pub async fn set_peer_remote_clock( &self, + universal_id: String, peer: String, doc_id: String, clock: NaiveDateTime, ) -> Result<()> { self - .storage + .pool + .ensure_storage(universal_id)? .set_peer_remote_clock(peer, doc_id, clock) - .await - .map_err(map_err) + .await?; + Ok(()) } #[napi] - pub async fn get_peer_pulled_remote_clocks(&self, peer: String) -> Result> { - self - .storage - .get_peer_pulled_remote_clocks(peer) - .await - .map_err(map_err) + pub async fn get_peer_pulled_remote_clocks( + &self, + universal_id: String, + peer: String, + ) -> Result> { + Ok( + self + .pool + .ensure_storage(universal_id)? + .get_peer_pulled_remote_clocks(peer) + .await?, + ) } #[napi] pub async fn get_peer_pulled_remote_clock( &self, + universal_id: String, peer: String, doc_id: String, ) -> Result { - self - .storage - .get_peer_pulled_remote_clock(peer, doc_id) - .await - .map_err(map_err) + Ok( + self + .pool + .ensure_storage(universal_id)? + .get_peer_pulled_remote_clock(peer, doc_id) + .await?, + ) } #[napi] pub async fn set_peer_pulled_remote_clock( &self, + universal_id: String, peer: String, doc_id: String, clock: NaiveDateTime, ) -> Result<()> { self - .storage + .pool + .ensure_storage(universal_id)? .set_peer_pulled_remote_clock(peer, doc_id, clock) - .await - .map_err(map_err) + .await?; + Ok(()) } #[napi] - pub async fn get_peer_pushed_clocks(&self, peer: String) -> Result> { - self - .storage - .get_peer_pushed_clocks(peer) - .await - .map_err(map_err) + pub async fn get_peer_pushed_clocks( + &self, + universal_id: String, + peer: String, + ) -> Result> { + Ok( + self + .pool + .ensure_storage(universal_id)? + .get_peer_pushed_clocks(peer) + .await?, + ) } #[napi] - pub async fn get_peer_pushed_clock(&self, peer: String, doc_id: String) -> Result { - self - .storage - .get_peer_pushed_clock(peer, doc_id) - .await - .map_err(map_err) + pub async fn get_peer_pushed_clock( + &self, + universal_id: String, + peer: String, + doc_id: String, + ) -> Result { + Ok( + self + .pool + .ensure_storage(universal_id)? + .get_peer_pushed_clock(peer, doc_id) + .await?, + ) } #[napi] pub async fn set_peer_pushed_clock( &self, + universal_id: String, peer: String, doc_id: String, clock: NaiveDateTime, ) -> Result<()> { self - .storage + .pool + .ensure_storage(universal_id)? .set_peer_pushed_clock(peer, doc_id, clock) - .await - .map_err(map_err) + .await?; + Ok(()) } #[napi] - pub async fn clear_clocks(&self) -> Result<()> { - self.storage.clear_clocks().await.map_err(map_err) + pub async fn clear_clocks(&self, universal_id: String) -> Result<()> { + self + .pool + .ensure_storage(universal_id)? + .clear_clocks() + .await?; + Ok(()) } } diff --git a/packages/frontend/native/nbstore/src/pool.rs b/packages/frontend/native/nbstore/src/pool.rs new file mode 100644 index 0000000000000..fce885384123a --- /dev/null +++ b/packages/frontend/native/nbstore/src/pool.rs @@ -0,0 +1,54 @@ +use dashmap::{mapref::one::RefMut, DashMap, Entry}; + +use super::{ + error::{Error, Result}, + storage::SqliteDocStorage, +}; + +#[derive(Default)] +pub struct SqliteDocStoragePool { + inner: DashMap, +} + +impl SqliteDocStoragePool { + fn get_or_create_storage<'a>( + &'a self, + universal_id: String, + path: &str, + ) -> RefMut<'a, String, SqliteDocStorage> { + let entry = self.inner.entry(universal_id); + if let Entry::Occupied(storage) = entry { + return storage.into_ref(); + } + let storage = SqliteDocStorage::new(path.to_string()); + + entry.or_insert(storage) + } + + pub fn ensure_storage<'a>( + &'a self, + universal_id: String, + ) -> Result> { + let entry = self.inner.entry(universal_id); + + if let Entry::Occupied(storage) = entry { + Ok(storage.into_ref()) + } else { + Err(Error::InvalidOperation) + } + } + + /// Initialize the database and run migrations. + pub async fn connect(&self, universal_id: String, path: String) -> Result<()> { + let storage = self.get_or_create_storage(universal_id.to_owned(), &path); + storage.connect().await?; + Ok(()) + } + + pub async fn disconnect(&self, universal_id: String) -> Result<()> { + let storage = self.ensure_storage(universal_id.to_owned())?; + storage.close().await; + self.inner.remove(&universal_id); + Ok(()) + } +} diff --git a/packages/frontend/native/nbstore/src/storage.rs b/packages/frontend/native/nbstore/src/storage.rs index 0f47075d65acc..056f5ee90c926 100644 --- a/packages/frontend/native/nbstore/src/storage.rs +++ b/packages/frontend/native/nbstore/src/storage.rs @@ -5,7 +5,7 @@ use sqlx::{ Pool, Row, }; -pub type Result = std::result::Result; +use super::error::Result; pub struct SqliteDocStorage { pub pool: Pool, @@ -52,7 +52,7 @@ impl SqliteDocStorage { } pub async fn connect(&self) -> Result<()> { - if !Sqlite::database_exists(&self.path).await.unwrap_or(false) { + if !Sqlite::database_exists(&self.path).await? { Sqlite::create_database(&self.path).await?; }; @@ -79,7 +79,6 @@ impl SqliteDocStorage { /// /// Flush the WAL file to the database file. /// See https://www.sqlite.org/pragma.html#pragma_wal_checkpoint:~:text=PRAGMA%20schema.wal_checkpoint%3B - /// pub async fn checkpoint(&self) -> Result<()> { sqlx::query("PRAGMA wal_checkpoint(FULL);") .execute(&self.pool) diff --git a/packages/frontend/native/nbstore/src/sync.rs b/packages/frontend/native/nbstore/src/sync.rs index 37eabc5abe7bb..42f5f9a623cbd 100644 --- a/packages/frontend/native/nbstore/src/sync.rs +++ b/packages/frontend/native/nbstore/src/sync.rs @@ -1,28 +1,32 @@ use chrono::NaiveDateTime; -use super::storage::{Result, SqliteDocStorage}; use super::DocClock; +use super::{error::Result, storage::SqliteDocStorage}; impl SqliteDocStorage { pub async fn get_peer_remote_clocks(&self, peer: String) -> Result> { - sqlx::query_as!( + let result = sqlx::query_as!( DocClock, "SELECT doc_id, remote_clock as timestamp FROM peer_clocks WHERE peer = ?", peer ) .fetch_all(&self.pool) - .await + .await?; + + Ok(result) } pub async fn get_peer_remote_clock(&self, peer: String, doc_id: String) -> Result { - sqlx::query_as!( + let result = sqlx::query_as!( DocClock, "SELECT doc_id, remote_clock as timestamp FROM peer_clocks WHERE peer = ? AND doc_id = ?", peer, doc_id ) .fetch_one(&self.pool) - .await + .await?; + + Ok(result) } pub async fn set_peer_remote_clock( @@ -48,13 +52,15 @@ impl SqliteDocStorage { } pub async fn get_peer_pulled_remote_clocks(&self, peer: String) -> Result> { - sqlx::query_as!( + let result = sqlx::query_as!( DocClock, "SELECT doc_id, pulled_remote_clock as timestamp FROM peer_clocks WHERE peer = ?", peer ) .fetch_all(&self.pool) - .await + .await?; + + Ok(result) } pub async fn get_peer_pulled_remote_clock( @@ -62,14 +68,16 @@ impl SqliteDocStorage { peer: String, doc_id: String, ) -> Result { - sqlx::query_as!( + let result = sqlx::query_as!( DocClock, "SELECT doc_id, pulled_remote_clock as timestamp FROM peer_clocks WHERE peer = ? AND doc_id = ?", peer, doc_id ) .fetch_one(&self.pool) - .await + .await?; + + Ok(result) } pub async fn set_peer_pulled_remote_clock( @@ -95,24 +103,28 @@ impl SqliteDocStorage { } pub async fn get_peer_pushed_clocks(&self, peer: String) -> Result> { - sqlx::query_as!( + let result = sqlx::query_as!( DocClock, "SELECT doc_id, pushed_clock as timestamp FROM peer_clocks WHERE peer = ?", peer ) .fetch_all(&self.pool) - .await + .await?; + + Ok(result) } pub async fn get_peer_pushed_clock(&self, peer: String, doc_id: String) -> Result { - sqlx::query_as!( + let result = sqlx::query_as!( DocClock, "SELECT doc_id, pushed_clock as timestamp FROM peer_clocks WHERE peer = ? AND doc_id = ?", peer, doc_id ) .fetch_one(&self.pool) - .await + .await?; + + Ok(result) } pub async fn set_peer_pushed_clock( diff --git a/yarn.lock b/yarn.lock index d36769391c626..f8e699375da21 100644 --- a/yarn.lock +++ b/yarn.lock @@ -702,7 +702,6 @@ __metadata: version: 0.0.0-use.local resolution: "@affine/nbstore@workspace:packages/common/nbstore" dependencies: - "@affine/electron-api": "workspace:*" "@affine/graphql": "workspace:*" "@datastructures-js/binary-search-tree": "npm:^5.3.2" "@toeverything/infra": "workspace:*" @@ -717,7 +716,6 @@ __metadata: y-protocols: "npm:^1.0.6" yjs: "npm:^13.6.21" peerDependencies: - "@affine/electron-api": "workspace:*" "@affine/graphql": "workspace:*" idb: ^8.0.0 socket.io-client: ^4.7.5