diff --git a/.github/workflows/bumper.yml b/.github/workflows/bumper.yml
index 5a88ad87bc..375f97e516 100644
--- a/.github/workflows/bumper.yml
+++ b/.github/workflows/bumper.yml
@@ -24,13 +24,14 @@ jobs:
repository: ${{ matrix.target.repo }}
ref: ${{ matrix.target.branch }}
path: nbc
- submodules: true
fetch-depth: 0
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
- name: Checkout this ref
run: |
- cd nbc/vendor/nim-libp2p
+ cd nbc
+ git submodule update --init vendor/nim-libp2p
+ cd vendor/nim-libp2p
git checkout $GITHUB_SHA
- name: Commit this bump
@@ -38,7 +39,7 @@ jobs:
cd nbc
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
git config --global user.name = "${{ github.actor }}"
- git commit -a -m "auto-bump nim-libp2p"
+ git commit --allow-empty -a -m "auto-bump nim-libp2p"
git branch -D nim-libp2p-auto-bump-${GITHUB_REF##*/} || true
git switch -c nim-libp2p-auto-bump-${GITHUB_REF##*/}
git push -f origin nim-libp2p-auto-bump-${GITHUB_REF##*/}
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index c5a72b8fd3..eb48f2e428 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -28,7 +28,7 @@ jobs:
cpu: amd64
#- os: windows
#cpu: i386
- branch: [version-1-2, version-1-6]
+ branch: [version-1-6]
include:
- target:
os: linux
diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml
index dce867672d..445d5e77dd 100644
--- a/.github/workflows/doc.yml
+++ b/.github/workflows/doc.yml
@@ -19,7 +19,7 @@ jobs:
- uses: jiro4989/setup-nim-action@v1
with:
- nim-version: 'stable'
+ nim-version: '1.6.x'
- name: Generate doc
run: |
diff --git a/.github/workflows/interop.yml b/.github/workflows/interop.yml
index ef7ca41c11..41054e298f 100644
--- a/.github/workflows/interop.yml
+++ b/.github/workflows/interop.yml
@@ -23,7 +23,7 @@ jobs:
- name: Build image
run: >
- cd multidim-interop/impl/nim/v1.0 &&
+ cd transport-interop/impl/nim/v1.0 &&
make commitSha=$GITHUB_SHA image_name=nim-libp2p-head
- name: Create ping-version.json
@@ -45,10 +45,10 @@ jobs:
]
}
EOF
-
+
) > ${{ github.workspace }}/test_head.json
- - uses: libp2p/test-plans/.github/actions/run-interop-ping-test@master
+ - uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/test_head.json
diff --git a/.github/workflows/multi_nim.yml b/.github/workflows/multi_nim.yml
index 1334983ec2..61d1d827bc 100644
--- a/.github/workflows/multi_nim.yml
+++ b/.github/workflows/multi_nim.yml
@@ -27,7 +27,7 @@ jobs:
cpu: amd64
#- os: windows
#cpu: i386
- branch: [version-1-2, version-1-6, devel]
+ branch: [version-1-6, version-2-0, devel]
include:
- target:
os: linux
@@ -48,7 +48,7 @@ jobs:
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
- continue-on-error: ${{ matrix.branch == 'devel' }}
+ continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
steps:
- name: Checkout
uses: actions/checkout@v2
diff --git a/.pinned b/.pinned
index e89bfaea41..3d79f8b63c 100644
--- a/.pinned
+++ b/.pinned
@@ -1,16 +1,17 @@
-bearssl;https://github.com/status-im/nim-bearssl@#acf9645e328bdcab481cfda1c158e07ecd46bd7b
-chronicles;https://github.com/status-im/nim-chronicles@#1e6350870855541b381d77d4659688bc0d2c4227
-chronos;https://github.com/status-im/nim-chronos@#ab5a8c2e0f6941fe3debd61dff0293790079d1b0
-dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823
-faststreams;https://github.com/status-im/nim-faststreams@#814f8927e1f356f39219f37f069b83066bcc893a
-httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f
-json_serialization;https://github.com/status-im/nim-json-serialization@#a7d815ed92f200f490c95d3cfd722089cc923ce6
-metrics;https://github.com/status-im/nim-metrics@#abf3acc7f06cee9ee2c287d2f31413dc3df4c04e
-nimcrypto;https://github.com/cheatfate/nimcrypto@#4014ef939b51e02053c2e16dd3481d47bc9267dd
-secp256k1;https://github.com/status-im/nim-secp256k1@#fd173fdff863ce2e211cf64c9a03bc7539fe40b0
-serialization;https://github.com/status-im/nim-serialization@#5b7cea55efeb074daa8abd8146a03a34adb4521a
-stew;https://github.com/status-im/nim-stew@#003fe9f0c83c2b0b2ccbd37087e6d1ccd30a3234
+bearssl;https://github.com/status-im/nim-bearssl@#e4157639db180e52727712a47deaefcbbac6ec86
+chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
+chronos;https://github.com/status-im/nim-chronos@#ba143e029f35fd9b4cd3d89d007cc834d0d5ba3c
+dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
+faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
+httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
+json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
+metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
+nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
+results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
+secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
+serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35
+stew;https://github.com/status-im/nim-stew@#3159137d9a3110edb4024145ce0ba778975de40e
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
-unittest2;https://github.com/status-im/nim-unittest2@#883c7a50ad3b82158e64d074c5578fe33ab3c452
-websock;https://github.com/status-im/nim-websock@#fea05cde8b123b38d1a0a8524b77efbc84daa848
-zlib;https://github.com/status-im/nim-zlib@#826e2fc013f55b4478802d4f2e39f187c50d520a
+unittest2;https://github.com/status-im/nim-unittest2@#2300fa9924a76e6c96bc4ea79d043e3a0f27120c
+websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
+zlib;https://github.com/status-im/nim-zlib@#38b72eda9d70067df4a953f56b5ed59630f2a17b
\ No newline at end of file
diff --git a/README.md b/README.md
index f91a730a2b..5d3b4771f2 100644
--- a/README.md
+++ b/README.md
@@ -105,7 +105,7 @@ The versioning follows [semver](https://semver.org/), with some additions:
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
-We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.2 & 1.6`
+We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
## Development
Clone and Install dependencies:
diff --git a/config.nims b/config.nims
index 5d3e88d2aa..69f9485d45 100644
--- a/config.nims
+++ b/config.nims
@@ -9,11 +9,8 @@ switch("warning", "ObservableStores:off")
switch("warning", "LockLevel:off")
--define:chronosStrictException
--styleCheck:usages
-if (NimMajor, NimMinor) < (1, 6):
- --styleCheck:hint
-else:
- switch("warningAsError", "UseBase:on")
- --styleCheck:error
+switch("warningAsError", "UseBase:on")
+--styleCheck:error
# Avoid some rare stack corruption while using exceptions with a SEH-enabled
# toolchain: https://github.com/status-im/nimbus-eth2/issues/3121
diff --git a/libp2p.nimble b/libp2p.nimble
index 5f82b3742c..7fd5fb8548 100644
--- a/libp2p.nimble
+++ b/libp2p.nimble
@@ -7,7 +7,7 @@ description = "LibP2P implementation"
license = "MIT"
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
-requires "nim >= 1.2.0",
+requires "nim >= 1.6.0",
"nimcrypto >= 0.4.1",
"dnsclient >= 0.3.0 & < 0.4.0",
"bearssl >= 0.1.4",
@@ -17,14 +17,24 @@ requires "nim >= 1.2.0",
"secp256k1",
"stew#head",
"websock",
- "unittest2 >= 0.0.5 & < 0.1.0"
+ "unittest2 >= 0.0.5 & <= 0.1.0"
+
+let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
+let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
+let flags = getEnv("NIMFLAGS", "") # Extra flags for the compiler
+let verbose = getEnv("V", "") notin ["", "0"]
+
+let cfg =
+ " --styleCheck:usages --styleCheck:error" &
+ (if verbose: "" else: " --verbosity:0 --hints:off") &
+ " --skipParentCfg --skipUserCfg -f" &
+ " --threads:on --opt:speed"
+
+import hashes, strutils
-import hashes
proc runTest(filename: string, verify: bool = true, sign: bool = true,
moreoptions: string = "") =
- var excstr = "nim c --skipParentCfg --opt:speed -d:debug "
- excstr.add(" " & getEnv("NIMFLAGS") & " ")
- excstr.add(" --verbosity:0 --hints:off ")
+ var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
excstr.add(" " & moreoptions & " ")
@@ -34,7 +44,7 @@ proc runTest(filename: string, verify: bool = true, sign: bool = true,
rmFile "tests/" & filename.toExe
proc buildSample(filename: string, run = false, extraFlags = "") =
- var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off -p:. " & extraFlags
+ var excstr = nimc & " " & lang & " " & cfg & " " & flags & " -p:. " & extraFlags
excstr.add(" examples/" & filename)
exec excstr
if run:
@@ -42,7 +52,7 @@ proc buildSample(filename: string, run = false, extraFlags = "") =
rmFile "examples/" & filename.toExe
proc tutorialToMd(filename: string) =
- let markdown = gorge "cat " & filename & " | nim c -r --verbosity:0 --hints:off tools/markdown_builder.nim "
+ let markdown = gorge "cat " & filename & " | " & nimc & " " & lang & " -r --verbosity:0 --hints:off tools/markdown_builder.nim "
writeFile(filename.replace(".nim", ".md"), markdown)
task testnative, "Runs libp2p native tests":
@@ -104,15 +114,12 @@ task examples_build, "Build the samples":
buildSample("circuitrelay", true)
buildSample("tutorial_1_connect", true)
buildSample("tutorial_2_customproto", true)
- if (NimMajor, NimMinor) > (1, 2):
- # These tutorials relies on post 1.4 exception tracking
- buildSample("tutorial_3_protobuf", true)
- buildSample("tutorial_4_gossipsub", true)
- buildSample("tutorial_5_discovery", true)
- # Nico doesn't work in 1.2
- exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
- exec "nimble install -y nico"
- buildSample("tutorial_6_game", false, "--styleCheck:off")
+ buildSample("tutorial_3_protobuf", true)
+ buildSample("tutorial_4_gossipsub", true)
+ buildSample("tutorial_5_discovery", true)
+ exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
+ exec "nimble install -y nico"
+ buildSample("tutorial_6_game", false, "--styleCheck:off")
# pin system
# while nimble lockfile
@@ -123,7 +130,7 @@ task pin, "Create a lockfile":
# pinner.nim was originally here
# but you can't read output from
# a command in a nimscript
- exec "nim c -r tools/pinner.nim"
+ exec nimc & " c -r tools/pinner.nim"
import sequtils
import os
diff --git a/libp2p/builders.nim b/libp2p/builders.nim
index a0d91261c8..71320826ac 100644
--- a/libp2p/builders.nim
+++ b/libp2p/builders.nim
@@ -16,10 +16,7 @@ runnableExamples:
# etc
.build()
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import
options, tables, chronos, chronicles, sequtils,
@@ -36,7 +33,7 @@ export
switch, peerid, peerinfo, connection, multiaddress, crypto, errors
type
- TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [Defect].}
+ TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
SecureProtocol* {.pure.} = enum
Noise,
@@ -57,7 +54,7 @@ type
protoVersion: string
agentVersion: string
nameResolver: NameResolver
- peerStoreCapacity: Option[int]
+ peerStoreCapacity: Opt[int]
autonat: bool
circuitRelay: Relay
rdv: RendezVous
@@ -173,7 +170,7 @@ proc withMaxConnsPerPeer*(b: SwitchBuilder, maxConnsPerPeer: int): SwitchBuilder
b
proc withPeerStore*(b: SwitchBuilder, capacity: int): SwitchBuilder {.public.} =
- b.peerStoreCapacity = some(capacity)
+ b.peerStoreCapacity = Opt.some(capacity)
b
proc withProtoVersion*(b: SwitchBuilder, protoVersion: string): SwitchBuilder {.public.} =
@@ -205,7 +202,7 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
b
proc build*(b: SwitchBuilder): Switch
- {.raises: [Defect, LPError], public.} =
+ {.raises: [LPError], public.} =
if b.rng == nil: # newRng could fail
raise newException(Defect, "Cannot initialize RNG")
@@ -230,7 +227,7 @@ proc build*(b: SwitchBuilder): Switch
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
ms = MultistreamSelect.new()
- muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, connManager, ms)
+ muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
let
transports = block:
@@ -245,9 +242,9 @@ proc build*(b: SwitchBuilder): Switch
if isNil(b.rng):
b.rng = newRng()
- let peerStore =
- if isSome(b.peerStoreCapacity):
- PeerStore.new(identify, b.peerStoreCapacity.get())
+ let peerStore = block:
+ b.peerStoreCapacity.withValue(capacity):
+ PeerStore.new(identify, capacity)
else:
PeerStore.new(identify)
@@ -296,7 +293,7 @@ proc newStandardSwitch*(
nameResolver: NameResolver = nil,
sendSignedPeerRecord = false,
peerStoreCapacity = 1000): Switch
- {.raises: [Defect, LPError], public.} =
+ {.raises: [LPError], public.} =
## Helper for common switch configurations.
{.push warning[Deprecated]:off.}
if SecureProtocol.Secio in secureManagers:
@@ -319,7 +316,7 @@ proc newStandardSwitch*(
.withNameResolver(nameResolver)
.withNoise()
- if privKey.isSome():
- b = b.withPrivateKey(privKey.get())
+ privKey.withValue(pkey):
+ b = b.withPrivateKey(pkey)
b.build()
diff --git a/libp2p/cid.nim b/libp2p/cid.nim
index 846cc71fd0..970988814e 100644
--- a/libp2p/cid.nim
+++ b/libp2p/cid.nim
@@ -9,10 +9,7 @@
## This module implementes CID (Content IDentifier).
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import tables, hashes
import multibase, multicodec, multihash, vbuffer, varint
@@ -279,9 +276,6 @@ proc `$`*(cid: Cid): string =
BTCBase58.encode(cid.data.buffer)
elif cid.cidver == CIDv1:
let res = MultiBase.encode("base58btc", cid.data.buffer)
- if res.isOk():
- res.get()
- else:
- ""
+ res.get("")
else:
""
diff --git a/libp2p/connmanager.nim b/libp2p/connmanager.nim
index 48d3ead467..0ab7d8b12b 100644
--- a/libp2p/connmanager.nim
+++ b/libp2p/connmanager.nim
@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import std/[options, tables, sequtils, sets]
+import std/[tables, sequtils, sets]
import pkg/[chronos, chronicles, metrics]
import peerinfo,
peerstore,
@@ -51,7 +48,7 @@ type
ConnEventHandler* =
proc(peerId: PeerId, event: ConnEvent): Future[void]
- {.gcsafe, raises: [Defect].}
+ {.gcsafe, raises: [].}
PeerEventKind* {.pure.} = enum
Left,
@@ -65,7 +62,7 @@ type
discard
PeerEventHandler* =
- proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [Defect].}
+ proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [].}
ConnManager* = ref object of RootObj
maxConnsPerPeer: int
@@ -285,7 +282,7 @@ proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
proc storeMuxer*(c: ConnManager,
muxer: Muxer)
- {.raises: [Defect, CatchableError].} =
+ {.raises: [CatchableError].} =
## store the connection and muxer
##
@@ -338,7 +335,7 @@ proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
await c.inSema.acquire()
return ConnectionSlot(connManager: c, direction: In)
-proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [Defect, TooManyConnectionsError].} =
+proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [TooManyConnectionsError].} =
if forceDial:
c.outSema.forceAcquire()
elif not c.outSema.tryAcquire():
diff --git a/libp2p/crypto/chacha20poly1305.nim b/libp2p/crypto/chacha20poly1305.nim
index d26f31ce92..bddbed81c9 100644
--- a/libp2p/crypto/chacha20poly1305.nim
+++ b/libp2p/crypto/chacha20poly1305.nim
@@ -15,10 +15,7 @@
# RFC @ https://tools.ietf.org/html/rfc7539
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import bearssl/blockx
from stew/assign2 import assign
diff --git a/libp2p/crypto/crypto.nim b/libp2p/crypto/crypto.nim
index 27df105d42..4f7c198fb5 100644
--- a/libp2p/crypto/crypto.nim
+++ b/libp2p/crypto/crypto.nim
@@ -8,10 +8,7 @@
# those terms.
## This module implements Public Key and Private Key interface for libp2p.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
from strutils import split, strip, cmpIgnoreCase
@@ -68,11 +65,13 @@ when supported(PKScheme.Ed25519):
import ed25519/ed25519
when supported(PKScheme.Secp256k1):
import secp
+when supported(PKScheme.ECDSA):
+ import ecnist
-# We are still importing `ecnist` because, it is used for SECIO handshake,
-# but it will be impossible to create ECNIST keys or import ECNIST keys.
+ # These used to be declared in `crypto` itself
+ export ecnist.ephemeral, ecnist.ECDHEScheme
-import ecnist, bearssl/rand, bearssl/hash as bhash
+import bearssl/rand, bearssl/hash as bhash
import ../protobuf/minprotobuf, ../vbuffer, ../multihash, ../multicodec
import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
@@ -89,8 +88,6 @@ type
Sha256,
Sha512
- ECDHEScheme* = EcCurveKind
-
PublicKey* = object
case scheme*: PKScheme
of PKScheme.RSA:
@@ -458,7 +455,8 @@ proc getBytes*(sig: Signature): seq[byte] =
## Return signature ``sig`` in binary form.
result = sig.data
-proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
+template initImpl[T: PrivateKey|PublicKey](
+ key: var T, data: openArray[byte]): bool =
## Initialize private key ``key`` from libp2p's protobuf serialized raw
## binary form.
##
@@ -471,7 +469,7 @@ proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
var pb = initProtoBuffer(@data)
let r1 = pb.getField(1, id)
let r2 = pb.getField(2, buffer)
- if not(r1.isOk() and r1.get() and r2.isOk() and r2.get()):
+ if not(r1.get(false) and r2.get(false)):
false
else:
if cast[int8](id) notin SupportedSchemesInt or len(buffer) <= 0:
@@ -520,6 +518,14 @@ proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
else:
false
+{.push warning[ProveField]:off.} # https://github.com/nim-lang/Nim/issues/22060
+proc init*(key: var PrivateKey, data: openArray[byte]): bool =
+ initImpl(key, data)
+
+proc init*(key: var PublicKey, data: openArray[byte]): bool =
+ initImpl(key, data)
+{.pop.}
+
proc init*(sig: var Signature, data: openArray[byte]): bool =
## Initialize signature ``sig`` from raw binary form.
##
@@ -873,34 +879,6 @@ proc mac*(secret: Secret, id: int): seq[byte] {.inline.} =
offset += secret.ivsize + secret.keysize
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.macsize)
-proc ephemeral*(
- scheme: ECDHEScheme,
- rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
- ## Generate ephemeral keys used to perform ECDHE.
- var keypair: EcKeyPair
- if scheme == Secp256r1:
- keypair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
- elif scheme == Secp384r1:
- keypair = ? EcKeyPair.random(Secp384r1, rng).orError(KeyError)
- elif scheme == Secp521r1:
- keypair = ? EcKeyPair.random(Secp521r1, rng).orError(KeyError)
- ok(keypair)
-
-proc ephemeral*(
- scheme: string, rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
- ## Generate ephemeral keys used to perform ECDHE using string encoding.
- ##
- ## Currently supported encoding strings are P-256, P-384, P-521, if encoding
- ## string is not supported P-521 key will be generated.
- if scheme == "P-256":
- ephemeral(Secp256r1, rng)
- elif scheme == "P-384":
- ephemeral(Secp384r1, rng)
- elif scheme == "P-521":
- ephemeral(Secp521r1, rng)
- else:
- ephemeral(Secp521r1, rng)
-
proc getOrder*(remotePubkey, localNonce: openArray[byte],
localPubkey, remoteNonce: openArray[byte]): CryptoResult[int] =
## Compare values and calculate `order` parameter.
@@ -976,9 +954,8 @@ proc decodeProposal*(message: seq[byte], nonce, pubkey: var seq[byte],
let r4 = pb.getField(4, ciphers)
let r5 = pb.getField(5, hashes)
- r1.isOk() and r1.get() and r2.isOk() and r2.get() and
- r3.isOk() and r3.get() and r4.isOk() and r4.get() and
- r5.isOk() and r5.get()
+ r1.get(false) and r2.get(false) and r3.get(false) and
+ r4.get(false) and r5.get(false)
proc createExchange*(epubkey, signature: openArray[byte]): seq[byte] =
## Create SecIO exchange message using ephemeral public key ``epubkey`` and
@@ -998,32 +975,32 @@ proc decodeExchange*(message: seq[byte],
var pb = initProtoBuffer(message)
let r1 = pb.getField(1, pubkey)
let r2 = pb.getField(2, signature)
- r1.isOk() and r1.get() and r2.isOk() and r2.get()
+ r1.get(false) and r2.get(false)
## Serialization/Deserialization helpers
proc write*(vb: var VBuffer, pubkey: PublicKey) {.
- inline, raises: [Defect, ResultError[CryptoError]].} =
+ inline, raises: [ResultError[CryptoError]].} =
## Write PublicKey value ``pubkey`` to buffer ``vb``.
vb.writeSeq(pubkey.getBytes().tryGet())
proc write*(vb: var VBuffer, seckey: PrivateKey) {.
- inline, raises: [Defect, ResultError[CryptoError]].} =
+ inline, raises: [ResultError[CryptoError]].} =
## Write PrivateKey value ``seckey`` to buffer ``vb``.
vb.writeSeq(seckey.getBytes().tryGet())
proc write*(vb: var VBuffer, sig: PrivateKey) {.
- inline, raises: [Defect, ResultError[CryptoError]].} =
+ inline, raises: [ResultError[CryptoError]].} =
## Write Signature value ``sig`` to buffer ``vb``.
vb.writeSeq(sig.getBytes().tryGet())
proc write*[T: PublicKey|PrivateKey](pb: var ProtoBuffer, field: int,
key: T) {.
- inline, raises: [Defect, ResultError[CryptoError]].} =
+ inline, raises: [ResultError[CryptoError]].} =
write(pb, field, key.getBytes().tryGet())
proc write*(pb: var ProtoBuffer, field: int, sig: Signature) {.
- inline, raises: [Defect].} =
+ inline, raises: [].} =
write(pb, field, sig.getBytes())
proc getField*[T: PublicKey|PrivateKey](pb: ProtoBuffer, field: int,
diff --git a/libp2p/crypto/curve25519.nim b/libp2p/crypto/curve25519.nim
index 94255ac37a..b9d6410884 100644
--- a/libp2p/crypto/curve25519.nim
+++ b/libp2p/crypto/curve25519.nim
@@ -15,10 +15,7 @@
# RFC @ https://tools.ietf.org/html/rfc7748
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import bearssl/[ec, rand]
import stew/results
diff --git a/libp2p/crypto/ecnist.nim b/libp2p/crypto/ecnist.nim
index cbc30e6f95..8471e0e97e 100644
--- a/libp2p/crypto/ecnist.nim
+++ b/libp2p/crypto/ecnist.nim
@@ -14,10 +14,7 @@
## BearSSL library
## Copyright(C) 2018 Thomas Pornin .
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import bearssl/[ec, rand, hash]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
@@ -997,3 +994,33 @@ proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
# Clear context with initial value
kv.init(addr hc.vtable)
result = (res == 1)
+
+type ECDHEScheme* = EcCurveKind
+
+proc ephemeral*(
+ scheme: ECDHEScheme,
+ rng: var HmacDrbgContext): EcResult[EcKeyPair] =
+ ## Generate ephemeral keys used to perform ECDHE.
+ var keypair: EcKeyPair
+ if scheme == Secp256r1:
+ keypair = ? EcKeyPair.random(Secp256r1, rng)
+ elif scheme == Secp384r1:
+ keypair = ? EcKeyPair.random(Secp384r1, rng)
+ elif scheme == Secp521r1:
+ keypair = ? EcKeyPair.random(Secp521r1, rng)
+ ok(keypair)
+
+proc ephemeral*(
+ scheme: string, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
+ ## Generate ephemeral keys used to perform ECDHE using string encoding.
+ ##
+ ## Currently supported encoding strings are P-256, P-384, P-521, if encoding
+ ## string is not supported P-521 key will be generated.
+ if scheme == "P-256":
+ ephemeral(Secp256r1, rng)
+ elif scheme == "P-384":
+ ephemeral(Secp384r1, rng)
+ elif scheme == "P-521":
+ ephemeral(Secp521r1, rng)
+ else:
+ ephemeral(Secp521r1, rng)
diff --git a/libp2p/crypto/ed25519/ed25519.nim b/libp2p/crypto/ed25519/ed25519.nim
index 0033417051..dc49d47134 100644
--- a/libp2p/crypto/ed25519/ed25519.nim
+++ b/libp2p/crypto/ed25519/ed25519.nim
@@ -11,10 +11,7 @@
## This code is a port of the public domain, "ref10" implementation of ed25519
## from SUPERCOP.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import bearssl/rand
import constants
diff --git a/libp2p/crypto/hkdf.nim b/libp2p/crypto/hkdf.nim
index f29d34b66d..4808120dcb 100644
--- a/libp2p/crypto/hkdf.nim
+++ b/libp2p/crypto/hkdf.nim
@@ -9,10 +9,7 @@
# https://tools.ietf.org/html/rfc5869
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import nimcrypto
import bearssl/[kdf, hash]
diff --git a/libp2p/crypto/minasn1.nim b/libp2p/crypto/minasn1.nim
index 8185a0f734..2c06d3db39 100644
--- a/libp2p/crypto/minasn1.nim
+++ b/libp2p/crypto/minasn1.nim
@@ -9,10 +9,7 @@
## This module implements minimal ASN.1 encoding/decoding primitives.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import stew/[endians2, results, ctops]
export results
diff --git a/libp2p/crypto/rsa.nim b/libp2p/crypto/rsa.nim
index 5c5890a3cd..53f0985b1c 100644
--- a/libp2p/crypto/rsa.nim
+++ b/libp2p/crypto/rsa.nim
@@ -13,10 +13,7 @@
## BearSSL library
## Copyright(C) 2018 Thomas Pornin .
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import bearssl/[rsa, rand, hash]
import minasn1
diff --git a/libp2p/crypto/secp.nim b/libp2p/crypto/secp.nim
index e7be086679..caf17c2eed 100644
--- a/libp2p/crypto/secp.nim
+++ b/libp2p/crypto/secp.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import bearssl/rand
import
diff --git a/libp2p/daemon/daemonapi.nim b/libp2p/daemon/daemonapi.nim
index bced3eb053..27bafa7be8 100644
--- a/libp2p/daemon/daemonapi.nim
+++ b/libp2p/daemon/daemonapi.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
## This module implementes API for `go-libp2p-daemon`.
import std/[os, osproc, strutils, tables, strtabs, sequtils]
@@ -153,10 +150,10 @@ type
key*: PublicKey
P2PStreamCallback* = proc(api: DaemonAPI,
- stream: P2PStream): Future[void] {.gcsafe, raises: [Defect, CatchableError].}
+ stream: P2PStream): Future[void] {.gcsafe, raises: [CatchableError].}
P2PPubSubCallback* = proc(api: DaemonAPI,
ticket: PubsubTicket,
- message: PubSubMessage): Future[bool] {.gcsafe, raises: [Defect, CatchableError].}
+ message: PubSubMessage): Future[bool] {.gcsafe, raises: [CatchableError].}
DaemonError* = object of LPError
DaemonRemoteError* = object of DaemonError
@@ -474,7 +471,7 @@ proc checkResponse(pb: ProtoBuffer): ResponseKind {.inline.} =
else:
result = ResponseKind.Error
-proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [Defect, DaemonLocalError].} =
+proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [DaemonLocalError].} =
var error: seq[byte]
if pb.getRequiredField(ResponseType.ERROR.int, error).isOk():
if initProtoBuffer(error).getRequiredField(1, result).isErr():
@@ -504,7 +501,7 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
result = buffer
proc newConnection*(api: DaemonAPI): Future[StreamTransport]
- {.raises: [Defect, LPError].} =
+ {.raises: [LPError].} =
result = connect(api.address)
proc closeConnection*(api: DaemonAPI, transp: StreamTransport): Future[void] =
@@ -515,7 +512,7 @@ proc socketExists(address: MultiAddress): Future[bool] {.async.} =
var transp = await connect(address)
await transp.closeWait()
result = true
- except CatchableError, Defect:
+ except CatchableError:
result = false
when defined(windows):
@@ -837,7 +834,7 @@ proc transactMessage(transp: StreamTransport,
result = initProtoBuffer(message)
proc getPeerInfo(pb: ProtoBuffer): PeerInfo
- {.raises: [Defect, DaemonLocalError].} =
+ {.raises: [DaemonLocalError].} =
## Get PeerInfo object from ``pb``.
result.addresses = newSeq[MultiAddress]()
if pb.getRequiredField(1, result.peer).isErr():
@@ -868,7 +865,7 @@ proc connect*(api: DaemonAPI, peer: PeerId,
timeout))
pb.withMessage() do:
discard
- except CatchableError, Defect:
+ except CatchableError:
await api.closeConnection(transp)
proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
@@ -928,7 +925,7 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
asyncSpawn handler(api, stream)
proc addHandler*(api: DaemonAPI, protocols: seq[string],
- handler: P2PStreamCallback) {.async, raises: [Defect, LPError].} =
+ handler: P2PStreamCallback) {.async, raises: [LPError].} =
## Add stream handler ``handler`` for set of protocols ``protocols``.
var transp = await api.newConnection()
let maddress = await getSocket(api.pattern, addr api.ucounter)
@@ -998,7 +995,7 @@ proc cmTrimPeers*(api: DaemonAPI) {.async.} =
await api.closeConnection(transp)
proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo
- {.raises: [Defect, DaemonLocalError].} =
+ {.raises: [DaemonLocalError].} =
var res: seq[byte]
if pb.getRequiredField(2, res).isOk():
result = initProtoBuffer(res).getPeerInfo()
@@ -1006,23 +1003,23 @@ proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo
raise newException(DaemonLocalError, "Missing required field `peer`!")
proc dhtGetSingleValue(pb: ProtoBuffer): seq[byte]
- {.raises: [Defect, DaemonLocalError].} =
+ {.raises: [DaemonLocalError].} =
result = newSeq[byte]()
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
proc dhtGetSinglePublicKey(pb: ProtoBuffer): PublicKey
- {.raises: [Defect, DaemonLocalError].} =
+ {.raises: [DaemonLocalError].} =
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
proc dhtGetSinglePeerId(pb: ProtoBuffer): PeerId
- {.raises: [Defect, DaemonLocalError].} =
+ {.raises: [DaemonLocalError].} =
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
- {.inline, raises: [Defect, DaemonLocalError].} =
+ {.inline, raises: [DaemonLocalError].} =
var dhtResponse: seq[byte]
if pb.getRequiredField(ResponseType.DHT.int, dhtResponse).isOk():
var pbDhtResponse = initProtoBuffer(dhtResponse)
@@ -1041,7 +1038,7 @@ proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
raise newException(DaemonLocalError, "Wrong message type!")
proc enterPsMessage(pb: ProtoBuffer): ProtoBuffer
- {.inline, raises: [Defect, DaemonLocalError].} =
+ {.inline, raises: [DaemonLocalError].} =
var res: seq[byte]
if pb.getRequiredField(ResponseType.PUBSUB.int, res).isErr():
raise newException(DaemonLocalError, "Wrong message type!")
@@ -1049,7 +1046,7 @@ proc enterPsMessage(pb: ProtoBuffer): ProtoBuffer
initProtoBuffer(res)
proc getDhtMessageType(pb: ProtoBuffer): DHTResponseType
- {.inline, raises: [Defect, DaemonLocalError].} =
+ {.inline, raises: [DaemonLocalError].} =
var dtype: uint
if pb.getRequiredField(1, dtype).isErr():
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
diff --git a/libp2p/daemon/transpool.nim b/libp2p/daemon/transpool.nim
index ce00617617..ae1090b9cc 100644
--- a/libp2p/daemon/transpool.nim
+++ b/libp2p/daemon/transpool.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
## This module implements Pool of StreamTransport.
import chronos
diff --git a/libp2p/debugutils.nim b/libp2p/debugutils.nim
index ecc4c1d805..1bec5292e6 100644
--- a/libp2p/debugutils.nim
+++ b/libp2p/debugutils.nim
@@ -25,7 +25,7 @@
## 5. LocalAddress: optional bytes
## 6. RemoteAddress: optional bytes
## 7. Message: required bytes
-import os, options
+import os
import nimcrypto/utils, stew/endians2
import protobuf/minprotobuf, stream/connection, protocols/secure/secure,
multiaddress, peerid, varint, muxers/mplex/coder
@@ -33,7 +33,7 @@ import protobuf/minprotobuf, stream/connection, protocols/secure/secure,
from times import getTime, toUnix, fromUnix, nanosecond, format, Time,
NanosecondRange, initTime
from strutils import toHex, repeat
-export peerid, options, multiaddress
+export peerid, multiaddress
type
FlowDirection* = enum
@@ -43,10 +43,10 @@ type
timestamp*: uint64
direction*: FlowDirection
message*: seq[byte]
- seqID*: Option[uint64]
- mtype*: Option[uint64]
- local*: Option[MultiAddress]
- remote*: Option[MultiAddress]
+ seqID*: Opt[uint64]
+ mtype*: Opt[uint64]
+ local*: Opt[MultiAddress]
+ remote*: Opt[MultiAddress]
const
libp2p_dump_dir* {.strdefine.} = "nim-libp2p"
@@ -72,7 +72,8 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
var pb = initProtoBuffer(options = {WithVarintLength})
pb.write(2, getTimestamp())
pb.write(4, uint64(direction))
- pb.write(6, conn.observedAddr)
+ conn.observedAddr.withValue(oaddr):
+ pb.write(6, oaddr)
pb.write(7, data)
pb.finish()
@@ -100,7 +101,7 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
finally:
close(handle)
-proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
+proc decodeDumpMessage*(data: openArray[byte]): Opt[ProtoMessage] =
## Decode protobuf's message ProtoMessage from array of bytes ``data``.
var
pb = initProtoBuffer(data)
@@ -108,13 +109,12 @@ proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
ma1, ma2: MultiAddress
pmsg: ProtoMessage
- let res2 = pb.getField(2, pmsg.timestamp)
- if res2.isErr() or not(res2.get()):
- return none[ProtoMessage]()
-
- let res4 = pb.getField(4, value)
- if res4.isErr() or not(res4.get()):
- return none[ProtoMessage]()
+ let
+ r2 = pb.getField(2, pmsg.timestamp)
+ r4 = pb.getField(4, value)
+ r7 = pb.getField(7, pmsg.message)
+ if not r2.get(false) or not r4.get(false) or not r7.get(false):
+ return Opt.none(ProtoMessage)
# `case` statement could not work here with an error "selector must be of an
# ordinal type, float or string"
@@ -124,30 +124,27 @@ proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
elif value == uint64(Incoming):
Incoming
else:
- return none[ProtoMessage]()
+ return Opt.none(ProtoMessage)
+
+ let r1 = pb.getField(1, value)
+ if r1.get(false):
+ pmsg.seqID = Opt.some(value)
- let res7 = pb.getField(7, pmsg.message)
- if res7.isErr() or not(res7.get()):
- return none[ProtoMessage]()
+ let r3 = pb.getField(3, value)
+ if r3.get(false):
+ pmsg.mtype = Opt.some(value)
- value = 0'u64
- let res1 = pb.getField(1, value)
- if res1.isOk() and res1.get():
- pmsg.seqID = some(value)
- value = 0'u64
- let res3 = pb.getField(3, value)
- if res3.isOk() and res3.get():
- pmsg.mtype = some(value)
- let res5 = pb.getField(5, ma1)
- if res5.isOk() and res5.get():
- pmsg.local = some(ma1)
- let res6 = pb.getField(6, ma2)
- if res6.isOk() and res6.get():
- pmsg.remote = some(ma2)
+ let
+ r5 = pb.getField(5, ma1)
+ r6 = pb.getField(6, ma2)
+ if r5.get(false):
+ pmsg.local = Opt.some(ma1)
+ if r6.get(false):
+ pmsg.remote = Opt.some(ma2)
- some(pmsg)
+ Opt.some(pmsg)
-iterator messages*(data: seq[byte]): Option[ProtoMessage] =
+iterator messages*(data: seq[byte]): Opt[ProtoMessage] =
## Iterate over sequence of bytes and decode all the ``ProtoMessage``
## messages we found.
var value: uint64
@@ -242,27 +239,19 @@ proc toString*(msg: ProtoMessage, dump = true): string =
" >> "
let address =
block:
- let local =
- if msg.local.isSome():
- "[" & $(msg.local.get()) & "]"
- else:
- "[LOCAL]"
- let remote =
- if msg.remote.isSome():
- "[" & $(msg.remote.get()) & "]"
- else:
- "[REMOTE]"
+ let local = block:
+ msg.local.withValue(loc): "[" & $loc & "]"
+ else: "[LOCAL]"
+ let remote = block:
+ msg.remote.withValue(rem): "[" & $rem & "]"
+ else: "[REMOTE]"
local & direction & remote
- let seqid =
- if msg.seqID.isSome():
- "seqID = " & $(msg.seqID.get()) & " "
- else:
- ""
- let mtype =
- if msg.mtype.isSome():
- "type = " & $(msg.mtype.get()) & " "
- else:
- ""
+ let seqid = block:
+ msg.seqID.wihValue(seqid): "seqID = " & $seqid & " "
+ else: ""
+ let mtype = block:
+ msg.mtype.withValue(typ): "type = " & $typ & " "
+ else: ""
res.add(" ")
res.add(address)
res.add(" ")
diff --git a/libp2p/dial.nim b/libp2p/dial.nim
index 089ebdb69f..422af7018f 100644
--- a/libp2p/dial.nim
+++ b/libp2p/dial.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos
import stew/results
diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim
index 2ff052c7c4..9fabd3f3d0 100644
--- a/libp2p/dialer.nim
+++ b/libp2p/dialer.nim
@@ -58,16 +58,16 @@ proc dialAndUpgrade(
for transport in self.transports: # for each transport
if transport.handles(address): # check if it can dial it
- trace "Dialing address", address, peerId, hostname
+ trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
let dialed =
try:
libp2p_total_dial_attempts.inc()
await transport.dial(hostname, address, peerId)
except CancelledError as exc:
- debug "Dialing canceled", msg = exc.msg, peerId
+ debug "Dialing canceled", err = exc.msg, peerId = peerId.get(default(PeerId))
raise exc
except CatchableError as exc:
- debug "Dialing failed", msg = exc.msg, peerId
+ debug "Dialing failed", err = exc.msg, peerId = peerId.get(default(PeerId))
libp2p_failed_dials.inc()
return nil # Try the next address
@@ -81,7 +81,7 @@ proc dialAndUpgrade(
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
await dialed.close()
- debug "Upgrade failed", msg = exc.msg, peerId
+ debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
if exc isnot CancelledError:
if upgradeDir == Direction.Out:
libp2p_failed_upgrades_outgoing.inc()
@@ -131,7 +131,7 @@ proc dialAndUpgrade(
upgradeDir = Direction.Out):
Future[Muxer] {.async.} =
- debug "Dialing peer", peerId
+ debug "Dialing peer", peerId = peerId.get(default(PeerId))
for rawAddress in addrs:
# resolve potential dnsaddr
@@ -150,7 +150,7 @@ proc dialAndUpgrade(
if not isNil(result):
return result
-proc tryReusingConnection(self: Dialer, peerId: PeerId): Future[Opt[Muxer]] {.async.} =
+proc tryReusingConnection(self: Dialer, peerId: PeerId): Opt[Muxer] =
let muxer = self.connManager.selectMuxer(peerId)
if muxer == nil:
return Opt.none(Muxer)
@@ -174,10 +174,10 @@ proc internalConnect(
try:
await lock.acquire()
- if peerId.isSome and reuseConnection:
- let muxOpt = await self.tryReusingConnection(peerId.get())
- if muxOpt.isSome:
- return muxOpt.get()
+ if reuseConnection:
+ peerId.withValue(peerId):
+ self.tryReusingConnection(peerId).withValue(mux):
+ return mux
let slot = self.connManager.getOutgoingSlot(forceDial)
let muxed =
@@ -225,20 +225,20 @@ method connect*(
allowUnknownPeerId = false): Future[PeerId] {.async.} =
## Connects to a peer and retrieve its PeerId
- let fullAddress = parseFullAddress(address)
- if fullAddress.isOk:
+ parseFullAddress(address).toOpt().withValue(fullAddress):
return (await self.internalConnect(
- Opt.some(fullAddress.get()[0]),
- @[fullAddress.get()[1]],
- false)).connection.peerId
- else:
- if allowUnknownPeerId == false:
- raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
- return (await self.internalConnect(
- Opt.none(PeerId),
- @[address],
+ Opt.some(fullAddress[0]),
+ @[fullAddress[1]],
false)).connection.peerId
+ if allowUnknownPeerId == false:
+ raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
+
+ return (await self.internalConnect(
+ Opt.none(PeerId),
+ @[address],
+ false)).connection.peerId
+
proc negotiateStream(
self: Dialer,
conn: Connection,
@@ -324,7 +324,7 @@ method dial*(
await cleanup()
raise exc
except CatchableError as exc:
- debug "Error dialing", conn, msg = exc.msg
+ debug "Error dialing", conn, err = exc.msg
await cleanup()
raise exc
diff --git a/libp2p/discovery/discoverymngr.nim b/libp2p/discovery/discoverymngr.nim
index 55f71820cf..bff1c9bd5c 100644
--- a/libp2p/discovery/discoverymngr.nim
+++ b/libp2p/discovery/discoverymngr.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/sequtils
import chronos, chronicles, stew/results
@@ -18,7 +15,7 @@ import ../errors
type
BaseAttr = ref object of RootObj
- comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [Defect].}
+ comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [].}
Attribute[T] = ref object of BaseAttr
value: T
@@ -60,7 +57,7 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
return Opt.some(f.to(T))
Opt.none(T)
-proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [Defect, KeyError].} =
+proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
pa{T}.valueOr: raise newException(KeyError, "Attritute not found")
proc match*(pa, candidate: PeerAttributes): bool =
@@ -73,7 +70,7 @@ proc match*(pa, candidate: PeerAttributes): bool =
return true
type
- PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [Defect], gcsafe.}
+ PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [], gcsafe.}
DiscoveryInterface* = ref object of RootObj
onPeerFound*: PeerFoundCallback
@@ -125,20 +122,15 @@ proc request*[T](dm: DiscoveryManager, value: T): DiscoveryQuery =
pa.add(value)
return dm.request(pa)
-proc advertise*(dm: DiscoveryManager, pa: PeerAttributes) =
+proc advertise*[T](dm: DiscoveryManager, value: T) =
for i in dm.interfaces:
- i.toAdvertise = pa
+ i.toAdvertise.add(value)
if i.advertiseLoop.isNil:
i.advertisementUpdated = newAsyncEvent()
i.advertiseLoop = i.advertise()
else:
i.advertisementUpdated.fire()
-proc advertise*[T](dm: DiscoveryManager, value: T) =
- var pa: PeerAttributes
- pa.add(value)
- dm.advertise(pa)
-
template forEach*(query: DiscoveryQuery, code: untyped) =
## Will execute `code` for each discovered peer. The
## peer attritubtes are available through the variable
diff --git a/libp2p/discovery/rendezvousinterface.nim b/libp2p/discovery/rendezvousinterface.nim
index c903244a28..e857466cd2 100644
--- a/libp2p/discovery/rendezvousinterface.nim
+++ b/libp2p/discovery/rendezvousinterface.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos
import ./discoverymngr,
@@ -22,6 +19,7 @@ type
rdv*: RendezVous
timeToRequest: Duration
timeToAdvertise: Duration
+ ttl: Duration
RdvNamespace* = distinct string
@@ -65,12 +63,16 @@ method advertise*(self: RendezVousInterface) {.async.} =
self.advertisementUpdated.clear()
for toAdv in toAdvertise:
- await self.rdv.advertise(toAdv, self.timeToAdvertise)
+ try:
+ await self.rdv.advertise(toAdv, self.ttl)
+ except CatchableError as error:
+ debug "RendezVous advertise error: ", msg = error.msg
await sleepAsync(self.timeToAdvertise) or self.advertisementUpdated.wait()
proc new*(T: typedesc[RendezVousInterface],
rdv: RendezVous,
ttr: Duration = 1.minutes,
- tta: Duration = MinimumDuration): RendezVousInterface =
- T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta)
+ tta: Duration = 1.minutes,
+ ttl: Duration = MinimumDuration): RendezVousInterface =
+ T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta, ttl: ttl)
diff --git a/libp2p/errors.nim b/libp2p/errors.nim
index 3b6f3ed403..e7da7094da 100644
--- a/libp2p/errors.nim
+++ b/libp2p/errors.nim
@@ -19,7 +19,8 @@ func toException*(e: string): ref LPError =
# sadly nim needs more love for hygienic templates
# so here goes the macro, its based on the proc/template version
# and uses quote do so it's quite readable
-macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
+# TODO https://github.com/nim-lang/Nim/issues/22936
+macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
let nexclude = exclude.len
case nexclude
of 0:
diff --git a/libp2p/multiaddress.nim b/libp2p/multiaddress.nim
index 2c5d099951..02f5c48037 100644
--- a/libp2p/multiaddress.nim
+++ b/libp2p/multiaddress.nim
@@ -9,10 +9,7 @@
## This module implements MultiAddress.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
{.push public.}
import pkg/chronos, chronicles
@@ -419,6 +416,9 @@ const
MAProtocol(
mcodec: multiCodec("wss"), kind: Marker, size: 0
),
+ MAProtocol(
+ mcodec: multiCodec("tls"), kind: Marker, size: 0
+ ),
MAProtocol(
mcodec: multiCodec("ipfs"), kind: Length, size: 0,
coder: TranscoderP2P
@@ -471,7 +471,7 @@ const
IP* = mapOr(IP4, IP6)
DNS_OR_IP* = mapOr(DNS, IP)
TCP_DNS* = mapAnd(DNS, mapEq("tcp"))
- TCP_IP* =mapAnd(IP, mapEq("tcp"))
+ TCP_IP* = mapAnd(IP, mapEq("tcp"))
TCP* = mapOr(TCP_DNS, TCP_IP)
UDP_DNS* = mapAnd(DNS, mapEq("udp"))
UDP_IP* = mapAnd(IP, mapEq("udp"))
@@ -482,9 +482,10 @@ const
WS_DNS* = mapAnd(TCP_DNS, mapEq("ws"))
WS_IP* = mapAnd(TCP_IP, mapEq("ws"))
WS* = mapAnd(TCP, mapEq("ws"))
- WSS_DNS* = mapAnd(TCP_DNS, mapEq("wss"))
- WSS_IP* = mapAnd(TCP_IP, mapEq("wss"))
- WSS* = mapAnd(TCP, mapEq("wss"))
+ TLS_WS* = mapOr(mapEq("wss"), mapAnd(mapEq("tls"), mapEq("ws")))
+ WSS_DNS* = mapAnd(TCP_DNS, TLS_WS)
+ WSS_IP* = mapAnd(TCP_IP, TLS_WS)
+ WSS* = mapAnd(TCP, TLS_WS)
WebSockets_DNS* = mapOr(WS_DNS, WSS_DNS)
WebSockets_IP* = mapOr(WS_IP, WSS_IP)
WebSockets* = mapOr(WS, WSS)
@@ -775,7 +776,7 @@ proc toString*(value: MultiAddress): MaResult[string] =
res = "/" & parts.join("/")
ok(res)
-proc `$`*(value: MultiAddress): string {.raises: [Defect].} =
+proc `$`*(value: MultiAddress): string =
## Return string representation of MultiAddress ``value``.
let s = value.toString()
if s.isErr: s.error
@@ -1025,7 +1026,7 @@ proc append*(m1: var MultiAddress, m2: MultiAddress): MaResult[void] =
ok()
proc `&`*(m1, m2: MultiAddress): MultiAddress {.
- raises: [Defect, LPError].} =
+ raises: [LPError].} =
## Concatenates two addresses ``m1`` and ``m2``, and returns result.
##
## This procedure performs validation of concatenated result and can raise
@@ -1035,7 +1036,7 @@ proc `&`*(m1, m2: MultiAddress): MultiAddress {.
concat(m1, m2).tryGet()
proc `&=`*(m1: var MultiAddress, m2: MultiAddress) {.
- raises: [Defect, LPError].} =
+ raises: [LPError].} =
## Concatenates two addresses ``m1`` and ``m2``.
##
## This procedure performs validation of concatenated result and can raise
@@ -1079,19 +1080,15 @@ proc matchPart(pat: MaPattern, protos: seq[MultiCodec]): MaPatResult =
proc match*(pat: MaPattern, address: MultiAddress): bool =
## Match full ``address`` using pattern ``pat`` and return ``true`` if
## ``address`` satisfies pattern.
- let protos = address.protocols()
- if protos.isErr():
- return false
- let res = matchPart(pat, protos.get())
+ let protos = address.protocols().valueOr: return false
+ let res = matchPart(pat, protos)
res.flag and (len(res.rem) == 0)
proc matchPartial*(pat: MaPattern, address: MultiAddress): bool =
## Match prefix part of ``address`` using pattern ``pat`` and return
## ``true`` if ``address`` starts with pattern.
- let protos = address.protocols()
- if protos.isErr():
- return false
- let res = matchPart(pat, protos.get())
+ let protos = address.protocols().valueOr: return false
+ let res = matchPart(pat, protos)
res.flag
proc `$`*(pat: MaPattern): string =
@@ -1120,12 +1117,8 @@ proc getField*(pb: ProtoBuffer, field: int,
if not(res):
ok(false)
else:
- let ma = MultiAddress.init(buffer)
- if ma.isOk():
- value = ma.get()
- ok(true)
- else:
- err(ProtoError.IncorrectBlob)
+ value = MultiAddress.init(buffer).valueOr: return err(ProtoError.IncorrectBlob)
+ ok(true)
proc getRepeatedField*(pb: ProtoBuffer, field: int,
value: var seq[MultiAddress]): ProtoResult[bool] {.
@@ -1141,11 +1134,11 @@ proc getRepeatedField*(pb: ProtoBuffer, field: int,
ok(false)
else:
for item in items:
- let ma = MultiAddress.init(item)
- if ma.isOk():
- value.add(ma.get())
- else:
- debug "Not supported MultiAddress in blob", ma = item
+ let ma = MultiAddress.init(item).valueOr:
+ debug "Unsupported MultiAddress in blob", ma = item
+ continue
+
+ value.add(ma)
if value.len == 0:
err(ProtoError.IncorrectBlob)
else:
diff --git a/libp2p/multibase.nim b/libp2p/multibase.nim
index 49bdd5fe96..2fad33be23 100644
--- a/libp2p/multibase.nim
+++ b/libp2p/multibase.nim
@@ -13,10 +13,7 @@
## 1. base32z
##
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import tables
import stew/[base32, base58, base64, results]
@@ -27,17 +24,17 @@ type
MultiBase* = object
- MBCodeSize = proc(length: int): int {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
+ MBCodeSize = proc(length: int): int {.nimcall, gcsafe, noSideEffect, raises: [].}
MBCodec = object
code: char
name: string
encr: proc(inbytes: openArray[byte],
outbytes: var openArray[char],
- outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
+ outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
decr: proc(inbytes: openArray[char],
outbytes: var openArray[byte],
- outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
+ outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
encl: MBCodeSize
decl: MBCodeSize
diff --git a/libp2p/multicodec.nim b/libp2p/multicodec.nim
index a406763608..184da57121 100644
--- a/libp2p/multicodec.nim
+++ b/libp2p/multicodec.nim
@@ -9,10 +9,7 @@
## This module implements MultiCodec.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import tables, hashes
import varint, vbuffer
@@ -194,9 +191,10 @@ const MultiCodecList = [
("p2p", 0x01A5),
("http", 0x01E0),
("https", 0x01BB),
+ ("tls", 0x01C0),
("quic", 0x01CC),
("ws", 0x01DD),
- ("wss", 0x01DE), # not in multicodec list
+ ("wss", 0x01DE),
("p2p-websocket-star", 0x01DF), # not in multicodec list
("p2p-webrtc-star", 0x0113), # not in multicodec list
("p2p-webrtc-direct", 0x0114), # not in multicodec list
diff --git a/libp2p/multihash.nim b/libp2p/multihash.nim
index 32fddf7b9b..4d890d1548 100644
--- a/libp2p/multihash.nim
+++ b/libp2p/multihash.nim
@@ -21,10 +21,7 @@
## 1. SKEIN
## 2. MURMUR
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import tables
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
@@ -45,7 +42,7 @@ const
type
MHashCoderProc* = proc(data: openArray[byte],
- output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
+ output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [].}
MHash* = object
mcodec*: MultiCodec
size*: int
diff --git a/libp2p/multistream.nim b/libp2p/multistream.nim
index 9f9e75f1b9..9c54442640 100644
--- a/libp2p/multistream.nim
+++ b/libp2p/multistream.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[strutils, sequtils, tables]
import chronos, chronicles, stew/byteutils
@@ -28,7 +25,7 @@ const
Ls = "ls\n"
type
- Matcher* = proc (proto: string): bool {.gcsafe, raises: [Defect].}
+ Matcher* = proc (proto: string): bool {.gcsafe, raises: [].}
MultiStreamError* = object of LPError
diff --git a/libp2p/muxers/mplex/coder.nim b/libp2p/muxers/mplex/coder.nim
index 8a05b20efa..d1068e3675 100644
--- a/libp2p/muxers/mplex/coder.nim
+++ b/libp2p/muxers/mplex/coder.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import pkg/[chronos, chronicles, stew/byteutils]
import ../../stream/connection,
diff --git a/libp2p/muxers/mplex/lpchannel.nim b/libp2p/muxers/mplex/lpchannel.nim
index bbd10d6640..d5c604c307 100644
--- a/libp2p/muxers/mplex/lpchannel.nim
+++ b/libp2p/muxers/mplex/lpchannel.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[oids, strformat]
import pkg/[chronos, chronicles, metrics]
diff --git a/libp2p/muxers/mplex/mplex.nim b/libp2p/muxers/mplex/mplex.nim
index fed8e678e5..b8d287ec35 100644
--- a/libp2p/muxers/mplex/mplex.nim
+++ b/libp2p/muxers/mplex/mplex.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import tables, sequtils, oids
import chronos, chronicles, stew/byteutils, metrics
@@ -79,7 +76,7 @@ proc newStreamInternal*(m: Mplex,
chanId: uint64 = 0,
name: string = "",
timeout: Duration): LPChannel
- {.gcsafe, raises: [Defect, InvalidChannelIdError].} =
+ {.gcsafe, raises: [InvalidChannelIdError].} =
## create new channel/stream
##
let id = if initiator:
diff --git a/libp2p/muxers/muxer.nim b/libp2p/muxers/muxer.nim
index 5b93c57454..6c59fc4637 100644
--- a/libp2p/muxers/muxer.nim
+++ b/libp2p/muxers/muxer.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos, chronicles
import ../stream/connection,
@@ -26,8 +23,8 @@ type
MuxerError* = object of LPError
TooManyChannels* = object of MuxerError
- StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [Defect].}
- MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [Defect].}
+ StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [].}
+ MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [].}
Muxer* = ref object of RootObj
streamHandler*: StreamHandler
@@ -35,7 +32,7 @@ type
connection*: Connection
# user provider proc that returns a constructed Muxer
- MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [Defect].}
+ MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
# this wraps a creator proc that knows how to make muxers
MuxerProvider* = object
diff --git a/libp2p/muxers/yamux/yamux.nim b/libp2p/muxers/yamux/yamux.nim
index 9e15844c47..e71772da1b 100644
--- a/libp2p/muxers/yamux/yamux.nim
+++ b/libp2p/muxers/yamux/yamux.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import sequtils, std/[tables]
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
diff --git a/libp2p/nameresolving/dnsresolver.nim b/libp2p/nameresolving/dnsresolver.nim
index 92313b158f..58bfdf208e 100644
--- a/libp2p/nameresolving/dnsresolver.nim
+++ b/libp2p/nameresolving/dnsresolver.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import
std/[streams, strutils, sets, sequtils],
diff --git a/libp2p/nameresolving/mockresolver.nim b/libp2p/nameresolving/mockresolver.nim
index 266933b356..492dc1c421 100644
--- a/libp2p/nameresolving/mockresolver.nim
+++ b/libp2p/nameresolving/mockresolver.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import
std/tables,
diff --git a/libp2p/nameresolving/nameresolver.nim b/libp2p/nameresolving/nameresolver.nim
index 81ecef6d03..015901d612 100644
--- a/libp2p/nameresolving/nameresolver.nim
+++ b/libp2p/nameresolving/nameresolver.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[sugar, sets, sequtils, strutils]
import
@@ -55,7 +52,7 @@ proc resolveOneAddress(
ma: MultiAddress,
domain: Domain = Domain.AF_UNSPEC,
prefix = ""): Future[seq[MultiAddress]]
- {.async, raises: [Defect, MaError, TransportAddressError].} =
+ {.async, raises: [MaError, TransportAddressError].} =
#Resolve a single address
var pbuf: array[2, byte]
@@ -121,7 +118,7 @@ proc resolveMAddress*(
if not DNS.matchPartial(address):
res.incl(address)
else:
- let code = address[0].get().protoCode().get()
+ let code = address[0].tryGet().protoCode().tryGet()
let seq = case code:
of multiCodec("dns"):
await self.resolveOneAddress(address)
@@ -132,7 +129,7 @@ proc resolveMAddress*(
of multiCodec("dnsaddr"):
await self.resolveDnsAddr(address)
else:
- doAssert false
+ assert false
@[address]
for ad in seq:
res.incl(ad)
diff --git a/libp2p/observedaddrmanager.nim b/libp2p/observedaddrmanager.nim
index 882e16ea7c..e1644aba1a 100644
--- a/libp2p/observedaddrmanager.nim
+++ b/libp2p/observedaddrmanager.nim
@@ -7,15 +7,11 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import
- std/[sequtils, tables],
- chronos, chronicles,
- multiaddress, multicodec
+import std/[sequtils, tables, sugar]
+import chronos
+import multiaddress, multicodec
type
## Manages observed MultiAddresses by reomte peers. It keeps track of the most observed IP and IP/Port.
@@ -36,14 +32,16 @@ proc getProtocol(self: ObservedAddrManager, observations: seq[MultiAddress], mul
countTable.sort()
var orderedPairs = toSeq(countTable.pairs)
for (ma, count) in orderedPairs:
- let maFirst = ma[0].get()
- if maFirst.protoCode.get() == multiCodec and count >= self.minCount:
+ let protoCode = (ma[0].flatMap(protoCode)).valueOr: continue
+ if protoCode == multiCodec and count >= self.minCount:
return Opt.some(ma)
return Opt.none(MultiAddress)
proc getMostObservedProtocol(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
## Returns the most observed IP address or none if the number of observations are less than minCount.
- let observedIPs = self.observedIPsAndPorts.mapIt(it[0].get())
+ let observedIPs = collect:
+ for observedIp in self.observedIPsAndPorts:
+ observedIp[0].valueOr: continue
return self.getProtocol(observedIPs, multiCodec)
proc getMostObservedProtoAndPort(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
@@ -54,34 +52,24 @@ proc getMostObservedProtosAndPorts*(self: ObservedAddrManager): seq[MultiAddress
## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations
## are less than minCount.
var res: seq[MultiAddress]
- let ip4 = self.getMostObservedProtoAndPort(multiCodec("ip4"))
- if ip4.isSome():
- res.add(ip4.get())
- let ip6 = self.getMostObservedProtoAndPort(multiCodec("ip6"))
- if ip6.isSome():
- res.add(ip6.get())
+ self.getMostObservedProtoAndPort(multiCodec("ip4")).withValue(ip4):
+ res.add(ip4)
+ self.getMostObservedProtoAndPort(multiCodec("ip6")).withValue(ip6):
+ res.add(ip6)
return res
proc guessDialableAddr*(
self: ObservedAddrManager,
ma: MultiAddress): MultiAddress =
- ## Replaces the first proto valeu of each listen address by the corresponding (matching the proto code) most observed value.
+ ## Replaces the first proto value of each listen address by the corresponding (matching the proto code) most observed value.
## If the most observed value is not available, the original MultiAddress is returned.
- try:
- let maFirst = ma[0]
- let maRest = ma[1..^1]
- if maRest.isErr():
- return ma
+ let
+ maFirst = ma[0].valueOr: return ma
+ maRest = ma[1..^1].valueOr: return ma
+ maFirstProto = maFirst.protoCode().valueOr: return ma
- let observedIP = self.getMostObservedProtocol(maFirst.get().protoCode().get())
- return
- if observedIP.isNone() or maFirst.get() == observedIP.get():
- ma
- else:
- observedIP.get() & maRest.get()
- except CatchableError as error:
- debug "Error while handling manual port forwarding", msg = error.msg
- return ma
+ let observedIP = self.getMostObservedProtocol(maFirstProto).valueOr: return ma
+ return concat(observedIP, maRest).valueOr: ma
proc `$`*(self: ObservedAddrManager): string =
## Returns a string representation of the ObservedAddrManager.
diff --git a/libp2p/peerid.nim b/libp2p/peerid.nim
index a26605181e..4f7e6f9d14 100644
--- a/libp2p/peerid.nim
+++ b/libp2p/peerid.nim
@@ -9,10 +9,7 @@
## This module implementes API for libp2p peer.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
{.push public.}
import
@@ -44,10 +41,7 @@ func shortLog*(pid: PeerId): string =
if len(spid) > 10:
spid[3] = '*'
- when (NimMajor, NimMinor) > (1, 4):
- spid.delete(4 .. spid.high - 6)
- else:
- spid.delete(4, spid.high - 6)
+ spid.delete(4 .. spid.high - 6)
spid
@@ -191,19 +185,11 @@ proc random*(t: typedesc[PeerId], rng = newRng()): Result[PeerId, cstring] =
func match*(pid: PeerId, pubkey: PublicKey): bool =
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
- let p = PeerId.init(pubkey)
- if p.isErr:
- false
- else:
- pid == p.get()
+ PeerId.init(pubkey) == Result[PeerId, cstring].ok(pid)
func match*(pid: PeerId, seckey: PrivateKey): bool =
## Returns ``true`` if ``pid`` matches private key ``seckey``.
- let p = PeerId.init(seckey)
- if p.isErr:
- false
- else:
- pid == p.get()
+ PeerId.init(seckey) == Result[PeerId, cstring].ok(pid)
## Serialization/Deserialization helpers
diff --git a/libp2p/peerinfo.nim b/libp2p/peerinfo.nim
index a96a347e4d..a31f42eb5f 100644
--- a/libp2p/peerinfo.nim
+++ b/libp2p/peerinfo.nim
@@ -7,13 +7,10 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
{.push public.}
-import std/[options, sequtils]
+import std/sequtils
import pkg/[chronos, chronicles, stew/results]
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
@@ -26,7 +23,7 @@ type
AddressMapper* =
proc(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]]
- {.gcsafe, raises: [Defect].}
+ {.gcsafe, raises: [].}
PeerInfo* {.public.} = ref object
peerId*: PeerId
@@ -56,15 +53,12 @@ proc update*(p: PeerInfo) {.async.} =
for mapper in p.addressMappers:
p.addrs = await mapper(p.addrs)
- let sprRes = SignedPeerRecord.init(
+ p.signedPeerRecord = SignedPeerRecord.init(
p.privateKey,
PeerRecord.init(p.peerId, p.addrs)
- )
- if sprRes.isOk:
- p.signedPeerRecord = sprRes.get()
- else:
- discard
- #info "Can't update the signed peer record"
+ ).valueOr():
+ info "Can't update the signed peer record"
+ return
proc addrs*(p: PeerInfo): seq[MultiAddress] =
p.addrs
@@ -99,7 +93,7 @@ proc new*(
agentVersion: string = "",
addressMappers = newSeq[AddressMapper](),
): PeerInfo
- {.raises: [Defect, LPError].} =
+ {.raises: [LPError].} =
let pubkey = try:
key.getPublicKey().tryGet()
diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim
index 67f71d1d21..41698b75de 100644
--- a/libp2p/peerstore.nim
+++ b/libp2p/peerstore.nim
@@ -16,15 +16,12 @@ runnableExamples:
# Create a custom book type
type MoodBook = ref object of PeerBook[string]
- var somePeerId = PeerId.random().get()
+ var somePeerId = PeerId.random().expect("get random key")
peerStore[MoodBook][somePeerId] = "Happy"
doAssert peerStore[MoodBook][somePeerId] == "Happy"
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import
std/[tables, sets, options, macros],
@@ -45,7 +42,7 @@ type
# Handler types #
#################
- PeerBookChangeHandler* = proc(peerId: PeerId) {.gcsafe, raises: [Defect].}
+ PeerBookChangeHandler* = proc(peerId: PeerId) {.gcsafe, raises: [].}
#########
# Books #
@@ -161,20 +158,20 @@ proc updatePeerInfo*(
if info.addrs.len > 0:
peerStore[AddressBook][info.peerId] = info.addrs
- if info.pubkey.isSome:
- peerStore[KeyBook][info.peerId] = info.pubkey.get()
+ info.pubkey.withValue(pubkey):
+ peerStore[KeyBook][info.peerId] = pubkey
- if info.agentVersion.isSome:
- peerStore[AgentBook][info.peerId] = info.agentVersion.get().string
+ info.agentVersion.withValue(agentVersion):
+ peerStore[AgentBook][info.peerId] = agentVersion.string
- if info.protoVersion.isSome:
- peerStore[ProtoVersionBook][info.peerId] = info.protoVersion.get().string
+ info.protoVersion.withValue(protoVersion):
+ peerStore[ProtoVersionBook][info.peerId] = protoVersion.string
if info.protos.len > 0:
peerStore[ProtoBook][info.peerId] = info.protos
- if info.signedPeerRecord.isSome:
- peerStore[SPRBook][info.peerId] = info.signedPeerRecord.get()
+ info.signedPeerRecord.withValue(signedPeerRecord):
+ peerStore[SPRBook][info.peerId] = signedPeerRecord
let cleanupPos = peerStore.toClean.find(info.peerId)
if cleanupPos >= 0:
@@ -210,11 +207,11 @@ proc identify*(
let info = await peerStore.identify.identify(stream, stream.peerId)
when defined(libp2p_agents_metrics):
- var knownAgent = "unknown"
- if info.agentVersion.isSome and info.agentVersion.get().len > 0:
- let shortAgent = info.agentVersion.get().split("/")[0].safeToLowerAscii()
- if shortAgent.isOk() and KnownLibP2PAgentsSeq.contains(shortAgent.get()):
- knownAgent = shortAgent.get()
+ var
+ knownAgent = "unknown"
+ shortAgent = info.agentVersion.get("").split("/")[0].safeToLowerAscii().get("")
+ if KnownLibP2PAgentsSeq.contains(shortAgent):
+ knownAgent = shortAgent
muxer.connection.setShortAgent(knownAgent)
peerStore.updatePeerInfo(info)
diff --git a/libp2p/protobuf/minprotobuf.nim b/libp2p/protobuf/minprotobuf.nim
index 5d28cd5ff7..31c98d1cfd 100644
--- a/libp2p/protobuf/minprotobuf.nim
+++ b/libp2p/protobuf/minprotobuf.nim
@@ -9,10 +9,7 @@
## This module implements minimal Google's ProtoBuf primitives.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import ../varint, ../utility, stew/[endians2, results]
export results, utility
@@ -579,26 +576,18 @@ proc getField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
proc getField*(pb: ProtoBuffer, field: int,
output: var ProtoBuffer): ProtoResult[bool] {.inline.} =
var buffer: seq[byte]
- let res = pb.getField(field, buffer)
- if res.isOk():
- if res.get():
- output = initProtoBuffer(buffer)
- ok(true)
- else:
- ok(false)
+ if ? pb.getField(field, buffer):
+ output = initProtoBuffer(buffer)
+ ok(true)
else:
- err(res.error)
+ ok(false)
proc getRequiredField*[T](pb: ProtoBuffer, field: int,
output: var T): ProtoResult[void] {.inline.} =
- let res = pb.getField(field, output)
- if res.isOk():
- if res.get():
- ok()
- else:
- err(RequiredFieldMissing)
+ if ? pb.getField(field, output):
+ ok()
else:
- err(res.error)
+ err(RequiredFieldMissing)
proc getRepeatedField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
output: var seq[T]): ProtoResult[bool] =
@@ -678,14 +667,10 @@ proc getRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
proc getRequiredRepeatedField*[T](pb: ProtoBuffer, field: int,
output: var seq[T]): ProtoResult[void] {.inline.} =
- let res = pb.getRepeatedField(field, output)
- if res.isOk():
- if res.get():
- ok()
- else:
- err(RequiredFieldMissing)
+ if ? pb.getRepeatedField(field, output):
+ ok()
else:
- err(res.error)
+ err(RequiredFieldMissing)
proc getPackedRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
output: var seq[T]): ProtoResult[bool] =
diff --git a/libp2p/protocols/connectivity/autonat/client.nim b/libp2p/protocols/connectivity/autonat/client.nim
index e6ec928ffd..81a4efe2ca 100644
--- a/libp2p/protocols/connectivity/autonat/client.nim
+++ b/libp2p/protocols/connectivity/autonat/client.nim
@@ -7,12 +7,8 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import std/options
import stew/results
import chronos, chronicles
import ../../../switch,
@@ -27,8 +23,8 @@ type
AutonatClient* = ref object of RootObj
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
- let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
- id: some(pid),
+ let pb = AutonatDial(peerInfo: Opt.some(AutonatPeerInfo(
+ id: Opt.some(pid),
addrs: addrs
))).encode()
await conn.writeLp(pb.buffer)
@@ -36,15 +32,13 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
Future[MultiAddress] {.base, async.} =
- proc getResponseOrRaise(autonatMsg: Option[AutonatMsg]): AutonatDialResponse {.raises: [Defect, AutonatError].} =
- if autonatMsg.isNone() or
- autonatMsg.get().msgType != DialResponse or
- autonatMsg.get().response.isNone() or
- (autonatMsg.get().response.get().status == Ok and
- autonatMsg.get().response.get().ma.isNone()):
- raise newException(AutonatError, "Unexpected response")
- else:
- autonatMsg.get().response.get()
+ proc getResponseOrRaise(autonatMsg: Opt[AutonatMsg]): AutonatDialResponse {.raises: [AutonatError].} =
+ autonatMsg.withValue(msg):
+ if msg.msgType == DialResponse:
+ msg.response.withValue(res):
+ if not (res.status == Ok and res.ma.isNone()):
+ return res
+ raise newException(AutonatError, "Unexpected response")
let conn =
try:
@@ -69,7 +63,7 @@ method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[Mult
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
return case response.status:
of ResponseStatus.Ok:
- response.ma.get()
+ response.ma.tryGet()
of ResponseStatus.DialError:
raise newException(AutonatUnreachableError, "Peer could not dial us back: " & response.text.get(""))
else:
diff --git a/libp2p/protocols/connectivity/autonat/core.nim b/libp2p/protocols/connectivity/autonat/core.nim
index c3d1a22e70..bfb9beaaaa 100644
--- a/libp2p/protocols/connectivity/autonat/core.nim
+++ b/libp2p/protocols/connectivity/autonat/core.nim
@@ -7,12 +7,8 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import std/[options]
import stew/[results, objects]
import chronos, chronicles
import ../../../multiaddress,
@@ -42,29 +38,29 @@ type
InternalError = 300
AutonatPeerInfo* = object
- id*: Option[PeerId]
+ id*: Opt[PeerId]
addrs*: seq[MultiAddress]
AutonatDial* = object
- peerInfo*: Option[AutonatPeerInfo]
+ peerInfo*: Opt[AutonatPeerInfo]
AutonatDialResponse* = object
status*: ResponseStatus
- text*: Option[string]
- ma*: Option[MultiAddress]
+ text*: Opt[string]
+ ma*: Opt[MultiAddress]
AutonatMsg* = object
msgType*: MsgType
- dial*: Option[AutonatDial]
- response*: Option[AutonatDialResponse]
+ dial*: Opt[AutonatDial]
+ response*: Opt[AutonatDialResponse]
NetworkReachability* {.pure.} = enum
Unknown, NotReachable, Reachable
proc encode(p: AutonatPeerInfo): ProtoBuffer =
result = initProtoBuffer()
- if p.id.isSome():
- result.write(1, p.id.get())
+ p.id.withValue(id):
+ result.write(1, id)
for ma in p.addrs:
result.write(2, ma.data.buffer)
result.finish()
@@ -73,8 +69,8 @@ proc encode*(d: AutonatDial): ProtoBuffer =
result = initProtoBuffer()
result.write(1, MsgType.Dial.uint)
var dial = initProtoBuffer()
- if d.peerInfo.isSome():
- dial.write(1, encode(d.peerInfo.get()))
+ d.peerInfo.withValue(pinfo):
+ dial.write(1, encode(pinfo))
dial.finish()
result.write(2, dial.buffer)
result.finish()
@@ -84,72 +80,60 @@ proc encode*(r: AutonatDialResponse): ProtoBuffer =
result.write(1, MsgType.DialResponse.uint)
var bufferResponse = initProtoBuffer()
bufferResponse.write(1, r.status.uint)
- if r.text.isSome():
- bufferResponse.write(2, r.text.get())
- if r.ma.isSome():
- bufferResponse.write(3, r.ma.get())
+ r.text.withValue(text):
+ bufferResponse.write(2, text)
+ r.ma.withValue(ma):
+ bufferResponse.write(3, ma)
bufferResponse.finish()
result.write(3, bufferResponse.buffer)
result.finish()
proc encode*(msg: AutonatMsg): ProtoBuffer =
- if msg.dial.isSome():
- return encode(msg.dial.get())
- if msg.response.isSome():
- return encode(msg.response.get())
+ msg.dial.withValue(dial):
+ return encode(dial)
+ msg.response.withValue(res):
+ return encode(res)
-proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Option[AutonatMsg] =
+proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Opt[AutonatMsg] =
var
msgTypeOrd: uint32
pbDial: ProtoBuffer
pbResponse: ProtoBuffer
msg: AutonatMsg
- let
- pb = initProtoBuffer(buf)
- r1 = pb.getField(1, msgTypeOrd)
- r2 = pb.getField(2, pbDial)
- r3 = pb.getField(3, pbResponse)
- if r1.isErr() or r2.isErr() or r3.isErr(): return none(AutonatMsg)
+ let pb = initProtoBuffer(buf)
- if r1.get() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
- return none(AutonatMsg)
- if r2.get():
+ if ? pb.getField(1, msgTypeOrd).toOpt() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
+ return Opt.none(AutonatMsg)
+ if ? pb.getField(2, pbDial).toOpt():
var
pbPeerInfo: ProtoBuffer
dial: AutonatDial
- let
- r4 = pbDial.getField(1, pbPeerInfo)
- if r4.isErr(): return none(AutonatMsg)
+ let r4 = ? pbDial.getField(1, pbPeerInfo).toOpt()
var peerInfo: AutonatPeerInfo
- if r4.get():
+ if r4:
var pid: PeerId
let
- r5 = pbPeerInfo.getField(1, pid)
- r6 = pbPeerInfo.getRepeatedField(2, peerInfo.addrs)
- if r5.isErr() or r6.isErr(): return none(AutonatMsg)
- if r5.get(): peerInfo.id = some(pid)
- dial.peerInfo = some(peerInfo)
- msg.dial = some(dial)
-
- if r3.get():
+ r5 = ? pbPeerInfo.getField(1, pid).toOpt()
+ r6 = ? pbPeerInfo.getRepeatedField(2, peerInfo.addrs).toOpt()
+ if r5: peerInfo.id = Opt.some(pid)
+ dial.peerInfo = Opt.some(peerInfo)
+ msg.dial = Opt.some(dial)
+
+ if ? pb.getField(3, pbResponse).toOpt():
var
statusOrd: uint
text: string
ma: MultiAddress
response: AutonatDialResponse
- let
- r4 = pbResponse.getField(1, statusOrd)
- r5 = pbResponse.getField(2, text)
- r6 = pbResponse.getField(3, ma)
-
- if r4.isErr() or r5.isErr() or r6.isErr() or
- (r4.get() and not checkedEnumAssign(response.status, statusOrd)):
- return none(AutonatMsg)
- if r5.get(): response.text = some(text)
- if r6.get(): response.ma = some(ma)
- msg.response = some(response)
-
- return some(msg)
\ No newline at end of file
+ if ? pbResponse.getField(1, statusOrd).optValue():
+ if not checkedEnumAssign(response.status, statusOrd):
+ return Opt.none(AutonatMsg)
+ if ? pbResponse.getField(2, text).optValue():
+ response.text = Opt.some(text)
+ if ? pbResponse.getField(3, ma).optValue():
+ response.ma = Opt.some(ma)
+ msg.response = Opt.some(response)
+ return Opt.some(msg)
diff --git a/libp2p/protocols/connectivity/autonat/server.nim b/libp2p/protocols/connectivity/autonat/server.nim
index 0927369fd6..a15f0b2497 100644
--- a/libp2p/protocols/connectivity/autonat/server.nim
+++ b/libp2p/protocols/connectivity/autonat/server.nim
@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import std/[options, sets, sequtils]
+import std/[sets, sequtils]
import stew/results
import chronos, chronicles
import ../../protocol,
@@ -36,8 +33,8 @@ type
dialTimeout: Duration
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
- let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
- id: some(pid),
+ let pb = AutonatDial(peerInfo: Opt.some(AutonatPeerInfo(
+ id: Opt.some(pid),
addrs: addrs
))).encode()
await conn.writeLp(pb.buffer)
@@ -45,16 +42,16 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
proc sendResponseError(conn: Connection, status: ResponseStatus, text: string = "") {.async.} =
let pb = AutonatDialResponse(
status: status,
- text: if text == "": none(string) else: some(text),
- ma: none(MultiAddress)
+ text: if text == "": Opt.none(string) else: Opt.some(text),
+ ma: Opt.none(MultiAddress)
).encode()
await conn.writeLp(pb.buffer)
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
let pb = AutonatDialResponse(
status: ResponseStatus.Ok,
- text: some("Ok"),
- ma: some(ma)
+ text: Opt.some("Ok"),
+ ma: Opt.some(ma)
).encode()
await conn.writeLp(pb.buffer)
@@ -73,8 +70,8 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
futs = addrs.mapIt(autonat.switch.dialer.tryDial(conn.peerId, @[it]))
let fut = await anyCompleted(futs).wait(autonat.dialTimeout)
let ma = await fut
- if ma.isSome:
- await conn.sendResponseOk(ma.get())
+ ma.withValue(maddr):
+ await conn.sendResponseOk(maddr)
else:
await conn.sendResponseError(DialError, "Missing observed address")
except CancelledError as exc:
@@ -95,42 +92,40 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
f.cancel()
proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
- if msg.dial.isNone() or msg.dial.get().peerInfo.isNone():
+ let dial = msg.dial.valueOr:
+ return conn.sendResponseError(BadRequest, "Missing Dial")
+ let peerInfo = dial.peerInfo.valueOr:
return conn.sendResponseError(BadRequest, "Missing Peer Info")
- let peerInfo = msg.dial.get().peerInfo.get()
- if peerInfo.id.isSome() and peerInfo.id.get() != conn.peerId:
- return conn.sendResponseError(BadRequest, "PeerId mismatch")
+ peerInfo.id.withValue(id):
+ if id != conn.peerId:
+ return conn.sendResponseError(BadRequest, "PeerId mismatch")
- if conn.observedAddr.isNone:
+ let observedAddr = conn.observedAddr.valueOr:
return conn.sendResponseError(BadRequest, "Missing observed address")
- let observedAddr = conn.observedAddr.get()
- var isRelayed = observedAddr.contains(multiCodec("p2p-circuit"))
- if isRelayed.isErr() or isRelayed.get():
+ var isRelayed = observedAddr.contains(multiCodec("p2p-circuit")).valueOr:
+ return conn.sendResponseError(DialRefused, "Invalid observed address")
+ if isRelayed:
return conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
- let hostIp = observedAddr[0]
- if hostIp.isErr() or not IP.match(hostIp.get()):
- trace "wrong observed address", address=observedAddr
+ let hostIp = observedAddr[0].valueOr:
+ return conn.sendResponseError(InternalError, "Wrong observed address")
+ if not IP.match(hostIp):
return conn.sendResponseError(InternalError, "Expected an IP address")
var addrs = initHashSet[MultiAddress]()
addrs.incl(observedAddr)
trace "addrs received", addrs = peerInfo.addrs
for ma in peerInfo.addrs:
- isRelayed = ma.contains(multiCodec("p2p-circuit"))
- if isRelayed.isErr() or isRelayed.get():
- continue
- let maFirst = ma[0]
- if maFirst.isErr() or not DNS_OR_IP.match(maFirst.get()):
- continue
+ isRelayed = ma.contains(multiCodec("p2p-circuit")).valueOr: continue
+ let maFirst = ma[0].valueOr: continue
+ if not DNS_OR_IP.match(maFirst): continue
try:
addrs.incl(
- if maFirst.get() == hostIp.get():
+ if maFirst == hostIp:
ma
else:
- let maEnd = ma[1..^1]
- if maEnd.isErr(): continue
- hostIp.get() & maEnd.get()
+ let maEnd = ma[1..^1].valueOr: continue
+ hostIp & maEnd
)
except LPError as exc:
continue
@@ -147,10 +142,10 @@ proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout =
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
try:
- let msgOpt = AutonatMsg.decode(await conn.readLp(1024))
- if msgOpt.isNone() or msgOpt.get().msgType != MsgType.Dial:
+ let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
raise newException(AutonatError, "Received malformed message")
- let msg = msgOpt.get()
+ if msg.msgType != MsgType.Dial:
+ raise newException(AutonatError, "Message type should be dial")
await autonat.handleDial(conn, msg)
except CancelledError as exc:
raise exc
diff --git a/libp2p/protocols/connectivity/autonat/service.nim b/libp2p/protocols/connectivity/autonat/service.nim
index 50edae185e..7726c6b111 100644
--- a/libp2p/protocols/connectivity/autonat/service.nim
+++ b/libp2p/protocols/connectivity/autonat/service.nim
@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import std/[options, deques, sequtils]
+import std/[deques, sequtils]
import chronos, metrics
import ../../../switch
import ../../../wire
@@ -21,7 +18,7 @@ from core import NetworkReachability, AutonatUnreachableError
import ../../../utils/heartbeat
import ../../../crypto/crypto
-export options, core.NetworkReachability
+export core.NetworkReachability
logScope:
topics = "libp2p autonatservice"
@@ -34,12 +31,12 @@ type
addressMapper: AddressMapper
scheduleHandle: Future[void]
networkReachability*: NetworkReachability
- confidence: Option[float]
+ confidence: Opt[float]
answers: Deque[NetworkReachability]
autonatClient: AutonatClient
statusAndConfidenceHandler: StatusAndConfidenceHandler
rng: ref HmacDrbgContext
- scheduleInterval: Option[Duration]
+ scheduleInterval: Opt[Duration]
askNewConnectedPeers: bool
numPeersToAsk: int
maxQueueSize: int
@@ -47,13 +44,13 @@ type
dialTimeout: Duration
enableAddressMapper: bool
- StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Option[float]): Future[void] {.gcsafe, raises: [Defect].}
+ StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Opt[float]): Future[void] {.gcsafe, raises: [].}
proc new*(
T: typedesc[AutonatService],
autonatClient: AutonatClient,
rng: ref HmacDrbgContext,
- scheduleInterval: Option[Duration] = none(Duration),
+ scheduleInterval: Opt[Duration] = Opt.none(Duration),
askNewConnectedPeers = true,
numPeersToAsk: int = 5,
maxQueueSize: int = 10,
@@ -63,7 +60,7 @@ proc new*(
return T(
scheduleInterval: scheduleInterval,
networkReachability: Unknown,
- confidence: none(float),
+ confidence: Opt.none(float),
answers: initDeque[NetworkReachability](),
autonatClient: autonatClient,
rng: rng,
@@ -85,27 +82,33 @@ proc hasEnoughIncomingSlots(switch: Switch): bool =
proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
return switch.connManager.selectMuxer(peerId, In) != nil
-proc handleAnswer(self: AutonatService, ans: NetworkReachability) {.async.} =
+proc handleAnswer(self: AutonatService, ans: NetworkReachability): Future[bool] {.async.} =
if ans == Unknown:
return
+ let oldNetworkReachability = self.networkReachability
+ let oldConfidence = self.confidence
+
if self.answers.len == self.maxQueueSize:
self.answers.popFirst()
self.answers.addLast(ans)
self.networkReachability = Unknown
- self.confidence = none(float)
+ self.confidence = Opt.none(float)
const reachabilityPriority = [Reachable, NotReachable]
for reachability in reachabilityPriority:
let confidence = self.answers.countIt(it == reachability) / self.maxQueueSize
libp2p_autonat_reachability_confidence.set(value = confidence, labelValues = [$reachability])
if self.confidence.isNone and confidence >= self.minConfidence:
self.networkReachability = reachability
- self.confidence = some(confidence)
+ self.confidence = Opt.some(confidence)
debug "Current status", currentStats = $self.networkReachability, confidence = $self.confidence, answers = self.answers
+ # Return whether anything has changed
+ return self.networkReachability != oldNetworkReachability or self.confidence != oldConfidence
+
proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[NetworkReachability] {.async.} =
logScope:
peerId = $peerId
@@ -132,9 +135,9 @@ proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[Netwo
except CatchableError as error:
debug "dialMe unexpected error", msg = error.msg
Unknown
- await self.handleAnswer(ans)
- if not isNil(self.statusAndConfidenceHandler):
- await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
+ let hasReachabilityOrConfidenceChanged = await self.handleAnswer(ans)
+ if hasReachabilityOrConfidenceChanged:
+ await self.callHandler()
await switch.peerInfo.update()
return ans
@@ -168,8 +171,7 @@ proc addressMapper(
for listenAddr in listenAddrs:
var processedMA = listenAddr
try:
- let hostIP = initTAddress(listenAddr).get()
- if not hostIP.isGlobal() and self.networkReachability == NetworkReachability.Reachable:
+ if not listenAddr.isPublicMA() and self.networkReachability == NetworkReachability.Reachable:
processedMA = peerStore.guessDialableAddr(listenAddr) # handle manual port forwarding
except CatchableError as exc:
debug "Error while handling address mapper", msg = exc.msg
@@ -187,8 +189,8 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} =
discard askPeer(self, switch, peerId)
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
- if self.scheduleInterval.isSome():
- self.scheduleHandle = schedule(self, switch, self.scheduleInterval.get())
+ self.scheduleInterval.withValue(interval):
+ self.scheduleHandle = schedule(self, switch, interval)
if self.enableAddressMapper:
switch.peerInfo.addressMappers.add(self.addressMapper)
return hasBeenSetup
@@ -196,7 +198,6 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
method run*(self: AutonatService, switch: Switch) {.async, public.} =
trace "Running AutonatService"
await askConnectedPeers(self, switch)
- await self.callHandler()
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
info "Stopping AutonatService"
diff --git a/libp2p/protocols/connectivity/dcutr/client.nim b/libp2p/protocols/connectivity/dcutr/client.nim
index 1aafeb1601..cda04c72a7 100644
--- a/libp2p/protocols/connectivity/dcutr/client.nim
+++ b/libp2p/protocols/connectivity/dcutr/client.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/sequtils
@@ -84,8 +81,8 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
debug "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
raise newException(DcutrError, "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", err)
except CatchableError as err:
- debug "Unexpected error when trying direct conn", err = err.msg
- raise newException(DcutrError, "Unexpected error when trying a direct conn", err)
+ debug "Unexpected error when Dcutr initiator tried to connect to the remote peer", err = err.msg
+ raise newException(DcutrError, "Unexpected error when Dcutr initiator tried to connect to the remote peer", err)
finally:
if stream != nil:
await stream.close()
diff --git a/libp2p/protocols/connectivity/dcutr/core.nim b/libp2p/protocols/connectivity/dcutr/core.nim
index 07585dc519..d0744c80d4 100644
--- a/libp2p/protocols/connectivity/dcutr/core.nim
+++ b/libp2p/protocols/connectivity/dcutr/core.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/sequtils
@@ -44,7 +41,7 @@ proc encode*(msg: DcutrMsg): ProtoBuffer =
result.write(2, addr)
result.finish()
-proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [Defect, DcutrError].} =
+proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [DcutrError].} =
var
msgTypeOrd: uint32
dcutrMsg: DcutrMsg
diff --git a/libp2p/protocols/connectivity/dcutr/server.nim b/libp2p/protocols/connectivity/dcutr/server.nim
index c9f4fcb4ba..ecd4d15f0e 100644
--- a/libp2p/protocols/connectivity/dcutr/server.nim
+++ b/libp2p/protocols/connectivity/dcutr/server.nim
@@ -7,13 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
-
-import std/[options, sets, sequtils]
+{.push raises: [].}
+import std/[sets, sequtils]
import stew/[results, objects]
import chronos, chronicles
@@ -75,8 +71,8 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
debug "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", err)
except CatchableError as err:
- warn "Unexpected error in dcutr handler", msg = err.msg
- raise newException(DcutrError, "Unexpected error in dcutr handler", err)
+ warn "Unexpected error when Dcutr receiver tried to connect to the remote peer", msg = err.msg
+ raise newException(DcutrError, "Unexpected error when Dcutr receiver tried to connect to the remote peer", err)
let self = T()
self.handler = handleStream
diff --git a/libp2p/protocols/connectivity/relay/client.nim b/libp2p/protocols/connectivity/relay/client.nim
index e8573e0787..5e1537a9b5 100644
--- a/libp2p/protocols/connectivity/relay/client.nim
+++ b/libp2p/protocols/connectivity/relay/client.nim
@@ -7,15 +7,10 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
-
-import times, options
+{.push raises: [].}
+import times
import chronos, chronicles
-
import ./relay,
./messages,
./rconn,
@@ -25,8 +20,6 @@ import ./relay,
../../../multiaddress,
../../../stream/connection
-export options
-
logScope:
topics = "libp2p relay relay-client"
@@ -39,7 +32,7 @@ type
RelayV2DialError* = object of RelayClientError
RelayClientAddConn* = proc(conn: Connection,
duration: uint32,
- data: uint64): Future[void] {.gcsafe, raises: [Defect].}
+ data: uint64): Future[void] {.gcsafe, raises: [].}
RelayClient* = ref object of Relay
onNewConnection*: RelayClientAddConn
canHop: bool
@@ -47,28 +40,27 @@ type
Rsvp* = object
expire*: uint64 # required, Unix expiration time (UTC)
addrs*: seq[MultiAddress] # relay address for reserving peer
- voucher*: Option[Voucher] # optional, reservation voucher
+ voucher*: Opt[Voucher] # optional, reservation voucher
limitDuration*: uint32 # seconds
limitData*: uint64 # bytes
proc sendStopError(conn: Connection, code: StatusV2) {.async.} =
trace "send stop status", status = $code & " (" & $ord(code) & ")"
- let msg = StopMessage(msgType: StopMessageType.Status, status: some(code))
+ let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
await conn.writeLp(encode(msg).buffer)
proc handleRelayedConnect(cl: RelayClient, conn: Connection, msg: StopMessage) {.async.} =
- if msg.peer.isNone():
- await sendStopError(conn, MalformedMessage)
- return
let
# TODO: check the go version to see in which way this could fail
# it's unclear in the spec
- src = msg.peer.get()
+ src = msg.peer.valueOr:
+ await sendStopError(conn, MalformedMessage)
+ return
limitDuration = msg.limit.duration
limitData = msg.limit.data
msg = StopMessage(
msgType: StopMessageType.Status,
- status: some(Ok))
+ status: Opt.some(Ok))
pb = encode(msg)
trace "incoming relay connection", src
@@ -92,7 +84,7 @@ proc reserve*(cl: RelayClient,
pb = encode(HopMessage(msgType: HopMessageType.Reserve))
msg = try:
await conn.writeLp(pb.buffer)
- HopMessage.decode(await conn.readLp(RelayClientMsgSize)).get()
+ HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
except CancelledError as exc:
raise exc
except CatchableError as exc:
@@ -103,21 +95,21 @@ proc reserve*(cl: RelayClient,
raise newException(ReservationError, "Unexpected relay response type")
if msg.status.get(UnexpectedMessage) != Ok:
raise newException(ReservationError, "Reservation failed")
- if msg.reservation.isNone():
- raise newException(ReservationError, "Missing reservation information")
- let reservation = msg.reservation.get()
+ let reservation = msg.reservation.valueOr:
+ raise newException(ReservationError, "Missing reservation information")
if reservation.expire > int64.high().uint64 or
now().utc > reservation.expire.int64.fromUnix.utc:
raise newException(ReservationError, "Bad expiration date")
result.expire = reservation.expire
result.addrs = reservation.addrs
- if reservation.svoucher.isSome():
- let svoucher = SignedVoucher.decode(reservation.svoucher.get())
- if svoucher.isErr() or svoucher.get().data.relayPeerId != peerId:
+ reservation.svoucher.withValue(sv):
+ let svoucher = SignedVoucher.decode(sv).valueOr:
raise newException(ReservationError, "Invalid voucher")
- result.voucher = some(svoucher.get().data)
+ if svoucher.data.relayPeerId != peerId:
+ raise newException(ReservationError, "Invalid voucher PeerId")
+ result.voucher = Opt.some(svoucher.data)
result.limitDuration = msg.limit.duration
result.limitData = msg.limit.data
@@ -129,9 +121,9 @@ proc dialPeerV1*(
dstAddrs: seq[MultiAddress]): Future[Connection] {.async.} =
var
msg = RelayMessage(
- msgType: some(RelayType.Hop),
- srcPeer: some(RelayPeer(peerId: cl.switch.peerInfo.peerId, addrs: cl.switch.peerInfo.addrs)),
- dstPeer: some(RelayPeer(peerId: dstPeerId, addrs: dstAddrs)))
+ msgType: Opt.some(RelayType.Hop),
+ srcPeer: Opt.some(RelayPeer(peerId: cl.switch.peerInfo.peerId, addrs: cl.switch.peerInfo.addrs)),
+ dstPeer: Opt.some(RelayPeer(peerId: dstPeerId, addrs: dstAddrs)))
pb = encode(msg)
trace "Dial peer", msgSend=msg
@@ -154,16 +146,18 @@ proc dialPeerV1*(
raise exc
try:
- if msgRcvFromRelayOpt.isNone:
+ let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
raise newException(RelayV1DialError, "Hop can't open destination stream")
- let msgRcvFromRelay = msgRcvFromRelayOpt.get()
- if msgRcvFromRelay.msgType.isNone or msgRcvFromRelay.msgType.get() != RelayType.Status:
+ if msgRcvFromRelay.msgType.tryGet() != RelayType.Status:
raise newException(RelayV1DialError, "Hop can't open destination stream: wrong message type")
- if msgRcvFromRelay.status.isNone or msgRcvFromRelay.status.get() != StatusV1.Success:
+ if msgRcvFromRelay.status.tryGet() != StatusV1.Success:
raise newException(RelayV1DialError, "Hop can't open destination stream: status failed")
except RelayV1DialError as exc:
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise exc
+ except ValueError as exc:
+ await sendStatus(conn, StatusV1.HopCantOpenDstStream)
+ raise newException(RelayV1DialError, exc.msg)
result = conn
proc dialPeerV2*(
@@ -173,13 +167,13 @@ proc dialPeerV2*(
dstAddrs: seq[MultiAddress]): Future[Connection] {.async.} =
let
p = Peer(peerId: dstPeerId, addrs: dstAddrs)
- pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: some(p)))
+ pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: Opt.some(p)))
trace "Dial peer", p
let msgRcvFromRelay = try:
await conn.writeLp(pb.buffer)
- HopMessage.decode(await conn.readLp(RelayClientMsgSize)).get()
+ HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
except CancelledError as exc:
raise exc
except CatchableError as exc:
@@ -189,19 +183,17 @@ proc dialPeerV2*(
if msgRcvFromRelay.msgType != HopMessageType.Status:
raise newException(RelayV2DialError, "Unexpected stop response")
if msgRcvFromRelay.status.get(UnexpectedMessage) != Ok:
- trace "Relay stop failed", msg = msgRcvFromRelay.status.get()
+ trace "Relay stop failed", msg = msgRcvFromRelay.status
raise newException(RelayV2DialError, "Relay stop failure")
conn.limitDuration = msgRcvFromRelay.limit.duration
conn.limitData = msgRcvFromRelay.limit.data
return conn
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
- let msgOpt = StopMessage.decode(await conn.readLp(RelayClientMsgSize))
- if msgOpt.isNone():
+ let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
- trace "client circuit relay v2 handle stream", msg = msgOpt.get()
- let msg = msgOpt.get()
+ trace "client circuit relay v2 handle stream", msg
if msg.msgType == StopMessageType.Connect:
await cl.handleRelayedConnect(conn, msg)
@@ -210,16 +202,14 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
await sendStopError(conn, MalformedMessage)
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, gcsafe.} =
- if msg.srcPeer.isNone:
+ let src = msg.srcPeer.valueOr:
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
return
- let src = msg.srcPeer.get()
- if msg.dstPeer.isNone:
+ let dst = msg.dstPeer.valueOr:
await sendStatus(conn, StatusV1.StopDstMultiaddrInvalid)
return
- let dst = msg.dstPeer.get()
if dst.peerId != cl.switch.peerInfo.peerId:
await sendStatus(conn, StatusV1.StopDstMultiaddrInvalid)
return
@@ -237,13 +227,16 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, g
else: await conn.close()
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
- let msgOpt = RelayMessage.decode(await conn.readLp(RelayClientMsgSize))
- if msgOpt.isNone:
+ let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
+ await sendStatus(conn, StatusV1.MalformedMessage)
+ return
+ trace "client circuit relay v1 handle stream", msg
+
+ let typ = msg.msgType.valueOr:
+ trace "Message type not set"
await sendStatus(conn, StatusV1.MalformedMessage)
return
- trace "client circuit relay v1 handle stream", msg = msgOpt.get()
- let msg = msgOpt.get()
- case msg.msgType.get:
+ case typ:
of RelayType.Hop:
if cl.canHop: await cl.handleHop(conn, msg)
else: await sendStatus(conn, StatusV1.HopCantSpeakRelay)
diff --git a/libp2p/protocols/connectivity/relay/messages.nim b/libp2p/protocols/connectivity/relay/messages.nim
index 4b4a0fb3ac..8cb2bfa654 100644
--- a/libp2p/protocols/connectivity/relay/messages.nim
+++ b/libp2p/protocols/connectivity/relay/messages.nim
@@ -7,13 +7,10 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import options, macros
-import stew/objects
+import macros
+import stew/[objects, results]
import ../../../peerinfo,
../../../signed_envelope
@@ -49,36 +46,36 @@ type
addrs*: seq[MultiAddress]
RelayMessage* = object
- msgType*: Option[RelayType]
- srcPeer*: Option[RelayPeer]
- dstPeer*: Option[RelayPeer]
- status*: Option[StatusV1]
+ msgType*: Opt[RelayType]
+ srcPeer*: Opt[RelayPeer]
+ dstPeer*: Opt[RelayPeer]
+ status*: Opt[StatusV1]
proc encode*(msg: RelayMessage): ProtoBuffer =
result = initProtoBuffer()
- if isSome(msg.msgType):
- result.write(1, msg.msgType.get().ord.uint)
- if isSome(msg.srcPeer):
+ msg.msgType.withValue(typ):
+ result.write(1, typ.ord.uint)
+ msg.srcPeer.withValue(srcPeer):
var peer = initProtoBuffer()
- peer.write(1, msg.srcPeer.get().peerId)
- for ma in msg.srcPeer.get().addrs:
+ peer.write(1, srcPeer.peerId)
+ for ma in srcPeer.addrs:
peer.write(2, ma.data.buffer)
peer.finish()
result.write(2, peer.buffer)
- if isSome(msg.dstPeer):
+ msg.dstPeer.withValue(dstPeer):
var peer = initProtoBuffer()
- peer.write(1, msg.dstPeer.get().peerId)
- for ma in msg.dstPeer.get().addrs:
+ peer.write(1, dstPeer.peerId)
+ for ma in dstPeer.addrs:
peer.write(2, ma.data.buffer)
peer.finish()
result.write(3, peer.buffer)
- if isSome(msg.status):
- result.write(4, msg.status.get().ord.uint)
+ msg.status.withValue(status):
+ result.write(4, status.ord.uint)
result.finish()
-proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Option[RelayMessage] =
+proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Opt[RelayMessage] =
var
rMsg: RelayMessage
msgTypeOrd: uint32
@@ -88,38 +85,29 @@ proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Option[RelayMessage] =
pbSrc: ProtoBuffer
pbDst: ProtoBuffer
- let
- pb = initProtoBuffer(buf)
- r1 = pb.getField(1, msgTypeOrd)
- r2 = pb.getField(2, pbSrc)
- r3 = pb.getField(3, pbDst)
- r4 = pb.getField(4, statusOrd)
+ let pb = initProtoBuffer(buf)
- if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr():
- return none(RelayMessage)
+ if ? pb.getField(1, msgTypeOrd).toOpt():
+ if msgTypeOrd.int notin RelayType:
+ return Opt.none(RelayMessage)
+ rMsg.msgType = Opt.some(RelayType(msgTypeOrd))
- if r2.get() and
- (pbSrc.getField(1, src.peerId).isErr() or
- pbSrc.getRepeatedField(2, src.addrs).isErr()):
- return none(RelayMessage)
+ if ? pb.getField(2, pbSrc).toOpt():
+ discard ? pbSrc.getField(1, src.peerId).toOpt()
+ discard ? pbSrc.getRepeatedField(2, src.addrs).toOpt()
+ rMsg.srcPeer = Opt.some(src)
- if r3.get() and
- (pbDst.getField(1, dst.peerId).isErr() or
- pbDst.getRepeatedField(2, dst.addrs).isErr()):
- return none(RelayMessage)
+ if ? pb.getField(3, pbDst).toOpt():
+ discard ? pbDst.getField(1, dst.peerId).toOpt()
+ discard ? pbDst.getRepeatedField(2, dst.addrs).toOpt()
+ rMsg.dstPeer = Opt.some(dst)
- if r1.get():
- if msgTypeOrd.int notin RelayType:
- return none(RelayMessage)
- rMsg.msgType = some(RelayType(msgTypeOrd))
- if r2.get(): rMsg.srcPeer = some(src)
- if r3.get(): rMsg.dstPeer = some(dst)
- if r4.get():
+ if ? pb.getField(4, statusOrd).toOpt():
var status: StatusV1
if not checkedEnumAssign(status, statusOrd):
- return none(RelayMessage)
- rMsg.status = some(status)
- some(rMsg)
+ return Opt.none(RelayMessage)
+ rMsg.status = Opt.some(status)
+ Opt.some(rMsg)
# Voucher
@@ -179,7 +167,7 @@ type
Reservation* = object
expire*: uint64 # required, Unix expiration time (UTC)
addrs*: seq[MultiAddress] # relay address for reserving peer
- svoucher*: Option[seq[byte]] # optional, reservation voucher
+ svoucher*: Opt[seq[byte]] # optional, reservation voucher
Limit* = object
duration*: uint32 # seconds
data*: uint64 # bytes
@@ -199,30 +187,29 @@ type
Status = 2
HopMessage* = object
msgType*: HopMessageType
- peer*: Option[Peer]
- reservation*: Option[Reservation]
+ peer*: Opt[Peer]
+ reservation*: Opt[Reservation]
limit*: Limit
- status*: Option[StatusV2]
+ status*: Opt[StatusV2]
proc encode*(msg: HopMessage): ProtoBuffer =
var pb = initProtoBuffer()
pb.write(1, msg.msgType.ord.uint)
- if msg.peer.isSome():
+ msg.peer.withValue(peer):
var ppb = initProtoBuffer()
- ppb.write(1, msg.peer.get().peerId)
- for ma in msg.peer.get().addrs:
+ ppb.write(1, peer.peerId)
+ for ma in peer.addrs:
ppb.write(2, ma.data.buffer)
ppb.finish()
pb.write(2, ppb.buffer)
- if msg.reservation.isSome():
- let rsrv = msg.reservation.get()
+ msg.reservation.withValue(rsrv):
var rpb = initProtoBuffer()
rpb.write(1, rsrv.expire)
for ma in rsrv.addrs:
rpb.write(2, ma.data.buffer)
- if rsrv.svoucher.isSome():
- rpb.write(3, rsrv.svoucher.get())
+ rsrv.svoucher.withValue(vouch):
+ rpb.write(3, vouch)
rpb.finish()
pb.write(3, rpb.buffer)
if msg.limit.duration > 0 or msg.limit.data > 0:
@@ -231,66 +218,51 @@ proc encode*(msg: HopMessage): ProtoBuffer =
if msg.limit.data > 0: lpb.write(2, msg.limit.data)
lpb.finish()
pb.write(4, lpb.buffer)
- if msg.status.isSome():
- pb.write(5, msg.status.get().ord.uint)
+ msg.status.withValue(status):
+ pb.write(5, status.ord.uint)
pb.finish()
pb
-proc decode*(_: typedesc[HopMessage], buf: seq[byte]): Option[HopMessage] =
- var
- msg: HopMessage
- msgTypeOrd: uint32
- pbPeer: ProtoBuffer
- pbReservation: ProtoBuffer
- pbLimit: ProtoBuffer
- statusOrd: uint32
- peer: Peer
- reservation: Reservation
- limit: Limit
- res: bool
-
- let
- pb = initProtoBuffer(buf)
- r1 = pb.getRequiredField(1, msgTypeOrd)
- r2 = pb.getField(2, pbPeer)
- r3 = pb.getField(3, pbReservation)
- r4 = pb.getField(4, pbLimit)
- r5 = pb.getField(5, statusOrd)
-
- if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr() or r5.isErr():
- return none(HopMessage)
-
- if r2.get() and
- (pbPeer.getRequiredField(1, peer.peerId).isErr() or
- pbPeer.getRepeatedField(2, peer.addrs).isErr()):
- return none(HopMessage)
-
- if r3.get():
- var svoucher: seq[byte]
- let rSVoucher = pbReservation.getField(3, svoucher)
- if pbReservation.getRequiredField(1, reservation.expire).isErr() or
- pbReservation.getRepeatedField(2, reservation.addrs).isErr() or
- rSVoucher.isErr():
- return none(HopMessage)
- if rSVoucher.get(): reservation.svoucher = some(svoucher)
-
- if r4.get() and
- (pbLimit.getField(1, limit.duration).isErr() or
- pbLimit.getField(2, limit.data).isErr()):
- return none(HopMessage)
+proc decode*(_: typedesc[HopMessage], buf: seq[byte]): Opt[HopMessage] =
+ var msg: HopMessage
+ let pb = initProtoBuffer(buf)
+ var msgTypeOrd: uint32
+ ? pb.getRequiredField(1, msgTypeOrd).toOpt()
if not checkedEnumAssign(msg.msgType, msgTypeOrd):
- return none(HopMessage)
- if r2.get(): msg.peer = some(peer)
- if r3.get(): msg.reservation = some(reservation)
- if r4.get(): msg.limit = limit
- if r5.get():
+ return Opt.none(HopMessage)
+
+ var pbPeer: ProtoBuffer
+ if ? pb.getField(2, pbPeer).toOpt():
+ var peer: Peer
+ ? pbPeer.getRequiredField(1, peer.peerId).toOpt()
+ discard ? pbPeer.getRepeatedField(2, peer.addrs).toOpt()
+ msg.peer = Opt.some(peer)
+
+ var pbReservation: ProtoBuffer
+ if ? pb.getField(3, pbReservation).toOpt():
+ var
+ svoucher: seq[byte]
+ reservation: Reservation
+ if ? pbReservation.getField(3, svoucher).toOpt():
+ reservation.svoucher = Opt.some(svoucher)
+ ? pbReservation.getRequiredField(1, reservation.expire).toOpt()
+ discard ? pbReservation.getRepeatedField(2, reservation.addrs).toOpt()
+ msg.reservation = Opt.some(reservation)
+
+ var pbLimit: ProtoBuffer
+ if ? pb.getField(4, pbLimit).toOpt():
+ discard ? pbLimit.getField(1, msg.limit.duration).toOpt()
+ discard ? pbLimit.getField(2, msg.limit.data).toOpt()
+
+ var statusOrd: uint32
+ if ? pb.getField(5, statusOrd).toOpt():
var status: StatusV2
if not checkedEnumAssign(status, statusOrd):
- return none(HopMessage)
- msg.status = some(status)
- some(msg)
+ return Opt.none(HopMessage)
+ msg.status = Opt.some(status)
+ Opt.some(msg)
# Circuit Relay V2 Stop Message
@@ -300,19 +272,19 @@ type
Status = 1
StopMessage* = object
msgType*: StopMessageType
- peer*: Option[Peer]
+ peer*: Opt[Peer]
limit*: Limit
- status*: Option[StatusV2]
+ status*: Opt[StatusV2]
proc encode*(msg: StopMessage): ProtoBuffer =
var pb = initProtoBuffer()
pb.write(1, msg.msgType.ord.uint)
- if msg.peer.isSome():
+ msg.peer.withValue(peer):
var ppb = initProtoBuffer()
- ppb.write(1, msg.peer.get().peerId)
- for ma in msg.peer.get().addrs:
+ ppb.write(1, peer.peerId)
+ for ma in peer.addrs:
ppb.write(2, ma.data.buffer)
ppb.finish()
pb.write(2, ppb.buffer)
@@ -322,52 +294,40 @@ proc encode*(msg: StopMessage): ProtoBuffer =
if msg.limit.data > 0: lpb.write(2, msg.limit.data)
lpb.finish()
pb.write(3, lpb.buffer)
- if msg.status.isSome():
- pb.write(4, msg.status.get().ord.uint)
+ msg.status.withValue(status):
+ pb.write(4, status.ord.uint)
pb.finish()
pb
-proc decode*(_: typedesc[StopMessage], buf: seq[byte]): Option[StopMessage] =
- var
- msg: StopMessage
- msgTypeOrd: uint32
- pbPeer: ProtoBuffer
- pbLimit: ProtoBuffer
- statusOrd: uint32
- peer: Peer
- limit: Limit
- rVoucher: ProtoResult[bool]
- res: bool
-
- let
- pb = initProtoBuffer(buf)
- r1 = pb.getRequiredField(1, msgTypeOrd)
- r2 = pb.getField(2, pbPeer)
- r3 = pb.getField(3, pbLimit)
- r4 = pb.getField(4, statusOrd)
-
- if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr():
- return none(StopMessage)
-
- if r2.get() and
- (pbPeer.getRequiredField(1, peer.peerId).isErr() or
- pbPeer.getRepeatedField(2, peer.addrs).isErr()):
- return none(StopMessage)
-
- if r3.get() and
- (pbLimit.getField(1, limit.duration).isErr() or
- pbLimit.getField(2, limit.data).isErr()):
- return none(StopMessage)
-
- if msgTypeOrd.int notin StopMessageType.low.ord .. StopMessageType.high.ord:
- return none(StopMessage)
+proc decode*(_: typedesc[StopMessage], buf: seq[byte]): Opt[StopMessage] =
+ var msg: StopMessage
+
+ let pb = initProtoBuffer(buf)
+
+ var msgTypeOrd: uint32
+ ? pb.getRequiredField(1, msgTypeOrd).toOpt()
+ if msgTypeOrd.int notin StopMessageType:
+ return Opt.none(StopMessage)
msg.msgType = StopMessageType(msgTypeOrd)
- if r2.get(): msg.peer = some(peer)
- if r3.get(): msg.limit = limit
- if r4.get():
+
+
+ var pbPeer: ProtoBuffer
+ if ? pb.getField(2, pbPeer).toOpt():
+ var peer: Peer
+ ? pbPeer.getRequiredField(1, peer.peerId).toOpt()
+ discard ? pbPeer.getRepeatedField(2, peer.addrs).toOpt()
+ msg.peer = Opt.some(peer)
+
+ var pbLimit: ProtoBuffer
+ if ? pb.getField(3, pbLimit).toOpt():
+ discard ? pbLimit.getField(1, msg.limit.duration).toOpt()
+ discard ? pbLimit.getField(2, msg.limit.data).toOpt()
+
+ var statusOrd: uint32
+ if ? pb.getField(4, statusOrd).toOpt():
var status: StatusV2
if not checkedEnumAssign(status, statusOrd):
- return none(StopMessage)
- msg.status = some(status)
- some(msg)
+ return Opt.none(StopMessage)
+ msg.status = Opt.some(status)
+ Opt.some(msg)
diff --git a/libp2p/protocols/connectivity/relay/rconn.nim b/libp2p/protocols/connectivity/relay/rconn.nim
index c46b8cc707..1856afe706 100644
--- a/libp2p/protocols/connectivity/relay/rconn.nim
+++ b/libp2p/protocols/connectivity/relay/rconn.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos
diff --git a/libp2p/protocols/connectivity/relay/relay.nim b/libp2p/protocols/connectivity/relay/relay.nim
index d9b1bd9280..5165fde539 100644
--- a/libp2p/protocols/connectivity/relay/relay.nim
+++ b/libp2p/protocols/connectivity/relay/relay.nim
@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import options, sequtils, tables
+import sequtils, tables
import chronos, chronicles
@@ -93,11 +90,11 @@ proc createReserveResponse(
rsrv = Reservation(expire: expireUnix,
addrs: r.switch.peerInfo.addrs.mapIt(
? it.concat(ma).orErr(CryptoError.KeyError)),
- svoucher: some(? sv.encode))
+ svoucher: Opt.some(? sv.encode))
msg = HopMessage(msgType: HopMessageType.Status,
- reservation: some(rsrv),
+ reservation: Opt.some(rsrv),
limit: r.limit,
- status: some(Ok))
+ status: Opt.some(Ok))
return ok(msg)
proc isRelayed*(conn: Connection): bool =
@@ -118,17 +115,16 @@ proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
trace "Too many reservations", pid = conn.peerId
await sendHopStatus(conn, ReservationRefused)
return
+ trace "reserving relay slot for", pid = conn.peerId
let
pid = conn.peerId
expire = now().utc + r.reservationTTL
- msg = r.createReserveResponse(pid, expire)
+ msg = r.createReserveResponse(pid, expire).valueOr:
+ trace "error signing the voucher", pid
+ return
- trace "reserving relay slot for", pid
- if msg.isErr():
- trace "error signing the voucher", error = error(msg), pid
- return
r.rsvp[pid] = expire
- await conn.writeLp(encode(msg.get()).buffer)
+ await conn.writeLp(encode(msg).buffer)
proc handleConnect(r: Relay,
connSrc: Connection,
@@ -137,13 +133,12 @@ proc handleConnect(r: Relay,
trace "connection attempt over relay connection"
await sendHopStatus(connSrc, PermissionDenied)
return
- if msg.peer.isNone():
- await sendHopStatus(connSrc, MalformedMessage)
- return
-
let
+ msgPeer = msg.peer.valueOr:
+ await sendHopStatus(connSrc, MalformedMessage)
+ return
src = connSrc.peerId
- dst = msg.peer.get().peerId
+ dst = msgPeer.peerId
if dst notin r.rsvp:
trace "refusing connection, no reservation", src, dst
await sendHopStatus(connSrc, NoReservation)
@@ -176,16 +171,17 @@ proc handleConnect(r: Relay,
proc sendStopMsg() {.async.} =
let stopMsg = StopMessage(msgType: StopMessageType.Connect,
- peer: some(Peer(peerId: src, addrs: @[])),
+ peer: Opt.some(Peer(peerId: src, addrs: @[])),
limit: r.limit)
await connDst.writeLp(encode(stopMsg).buffer)
- let msg = StopMessage.decode(await connDst.readLp(r.msgSize)).get()
+ let msg = StopMessage.decode(await connDst.readLp(r.msgSize)).valueOr:
+ raise newException(SendStopError, "Malformed message")
if msg.msgType != StopMessageType.Status:
raise newException(SendStopError, "Unexpected stop response, not a status message")
if msg.status.get(UnexpectedMessage) != Ok:
raise newException(SendStopError, "Relay stop failure")
await connSrc.writeLp(encode(HopMessage(msgType: HopMessageType.Status,
- status: some(Ok))).buffer)
+ status: Opt.some(Ok))).buffer)
try:
await sendStopMsg()
except CancelledError as exc:
@@ -205,12 +201,10 @@ proc handleConnect(r: Relay,
await bridge(rconnSrc, rconnDst)
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
- let msgOpt = HopMessage.decode(await conn.readLp(r.msgSize))
- if msgOpt.isNone():
+ let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
- trace "relayv2 handle stream", msg = msgOpt.get()
- let msg = msgOpt.get()
+ trace "relayv2 handle stream", msg = msg
case msg.msgType:
of HopMessageType.Reserve: await r.handleReserve(conn)
of HopMessageType.Connect: await r.handleConnect(conn, msg)
@@ -228,15 +222,14 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
await sendStatus(connSrc, StatusV1.HopCantSpeakRelay)
return
+ var src, dst: RelayPeer
proc checkMsg(): Result[RelayMessage, StatusV1] =
- if msg.srcPeer.isNone:
+ src = msg.srcPeer.valueOr:
return err(StatusV1.HopSrcMultiaddrInvalid)
- let src = msg.srcPeer.get()
if src.peerId != connSrc.peerId:
return err(StatusV1.HopSrcMultiaddrInvalid)
- if msg.dstPeer.isNone:
+ dst = msg.dstPeer.valueOr:
return err(StatusV1.HopDstMultiaddrInvalid)
- let dst = msg.dstPeer.get()
if dst.peerId == r.switch.peerInfo.peerId:
return err(StatusV1.HopCantRelayToSelf)
if not r.switch.isConnected(dst.peerId):
@@ -248,9 +241,6 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
await sendStatus(connSrc, check.error())
return
- let
- src = msg.srcPeer.get()
- dst = msg.dstPeer.get()
if r.peerCount[src.peerId] >= r.maxCircuitPerPeer or
r.peerCount[dst.peerId] >= r.maxCircuitPerPeer:
trace "refusing connection; too many connection from src or to dst", src, dst
@@ -274,9 +264,9 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
await connDst.close()
let msgToSend = RelayMessage(
- msgType: some(RelayType.Stop),
- srcPeer: some(src),
- dstPeer: some(dst))
+ msgType: Opt.some(RelayType.Stop),
+ srcPeer: Opt.some(src),
+ dstPeer: Opt.some(dst))
let msgRcvFromDstOpt = try:
await connDst.writeLp(encode(msgToSend).buffer)
@@ -288,12 +278,11 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
return
- if msgRcvFromDstOpt.isNone:
+ let msgRcvFromDst = msgRcvFromDstOpt.valueOr:
trace "error reading stop response", msg = msgRcvFromDstOpt
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
return
- let msgRcvFromDst = msgRcvFromDstOpt.get()
if msgRcvFromDst.msgType.get(RelayType.Stop) != RelayType.Status or
msgRcvFromDst.status.get(StatusV1.StopRelayRefused) != StatusV1.Success:
trace "unexcepted relay stop response", msgRcvFromDst
@@ -305,13 +294,16 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
await bridge(connSrc, connDst)
proc handleStreamV1(r: Relay, conn: Connection) {.async, gcsafe.} =
- let msgOpt = RelayMessage.decode(await conn.readLp(r.msgSize))
- if msgOpt.isNone:
+ let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
+ await sendStatus(conn, StatusV1.MalformedMessage)
+ return
+ trace "relay handle stream", msg
+
+ let typ = msg.msgType.valueOr:
+ trace "Message type not set"
await sendStatus(conn, StatusV1.MalformedMessage)
return
- trace "relay handle stream", msg = msgOpt.get()
- let msg = msgOpt.get()
- case msg.msgType.get:
+ case typ:
of RelayType.Hop: await r.handleHop(conn, msg)
of RelayType.Stop: await sendStatus(conn, StatusV1.StopRelayRefused)
of RelayType.CanHop: await sendStatus(conn, StatusV1.Success)
diff --git a/libp2p/protocols/connectivity/relay/rtransport.nim b/libp2p/protocols/connectivity/relay/rtransport.nim
index 83ab7ba9d0..3008d9b043 100644
--- a/libp2p/protocols/connectivity/relay/rtransport.nim
+++ b/libp2p/protocols/connectivity/relay/rtransport.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import sequtils, strutils
@@ -40,7 +37,7 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
self.client.onNewConnection = proc(
conn: Connection,
duration: uint32 = 0,
- data: uint64 = 0) {.async, gcsafe, raises: [Defect].} =
+ data: uint64 = 0) {.async, gcsafe, raises: [].} =
await self.queue.addLast(RelayConnection.new(conn, duration, data))
await conn.join()
self.selfRunning = true
@@ -64,9 +61,9 @@ proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async,
var
relayPeerId: PeerId
dstPeerId: PeerId
- if not relayPeerId.init(($(sma[^3].get())).split('/')[2]):
+ if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
raise newException(RelayV2DialError, "Relay doesn't exist")
- if not dstPeerId.init(($(sma[^1].get())).split('/')[2]):
+ if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
raise newException(RelayV2DialError, "Destination doesn't exist")
trace "Dial", relayPeerId, dstPeerId
@@ -94,13 +91,17 @@ method dial*(
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
- let address = MultiAddress.init($ma & "/p2p/" & $peerId.get()).tryGet()
- result = await self.dial(address)
+ peerId.withValue(pid):
+ let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
+ result = await self.dial(address)
-method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe} =
- if ma.protocols.isOk():
- let sma = toSeq(ma.items())
- result = sma.len >= 2 and CircuitRelay.match(sma[^1].get())
+method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
+ try:
+ if ma.protocols.isOk():
+ let sma = toSeq(ma.items())
+ result = sma.len >= 2 and CircuitRelay.match(sma[^1].tryGet())
+ except CatchableError as exc:
+ result = false
trace "Handles return", ma, result
proc new*(T: typedesc[RelayTransport], cl: RelayClient, upgrader: Upgrade): T =
diff --git a/libp2p/protocols/connectivity/relay/utils.nim b/libp2p/protocols/connectivity/relay/utils.nim
index 65eb4f4e3d..9a337e6d7c 100644
--- a/libp2p/protocols/connectivity/relay/utils.nim
+++ b/libp2p/protocols/connectivity/relay/utils.nim
@@ -7,15 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
-
-import options
+{.push raises: [].}
import chronos, chronicles
-
import ./messages,
../../../stream/connection
@@ -30,21 +24,21 @@ const
proc sendStatus*(conn: Connection, code: StatusV1) {.async, gcsafe.} =
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
let
- msg = RelayMessage(msgType: some(RelayType.Status), status: some(code))
+ msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async, gcsafe.} =
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
let
- msg = HopMessage(msgType: HopMessageType.Status, status: some(code))
+ msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
proc sendStopStatus*(conn: Connection, code: StatusV2) {.async.} =
trace "send stop relay/v2 status", status = $code & " (" & $ord(code) & ")"
let
- msg = StopMessage(msgType: StopMessageType.Status, status: some(code))
+ msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
diff --git a/libp2p/protocols/identify.nim b/libp2p/protocols/identify.nim
index 85202d5967..d677e9729f 100644
--- a/libp2p/protocols/identify.nim
+++ b/libp2p/protocols/identify.nim
@@ -10,10 +10,7 @@
## `Identify `_ and
## `Push Identify `_ implementation
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[sequtils, options, strutils, sugar]
import stew/results
@@ -65,7 +62,7 @@ type
peer: PeerId,
newInfo: IdentifyInfo):
Future[void]
- {.gcsafe, raises: [Defect], public.}
+ {.gcsafe, raises: [], public.}
IdentifyPush* = ref object of LPProtocol
identifyHandler: IdentifyPushHandler
@@ -74,9 +71,7 @@ chronicles.expandIt(IdentifyInfo):
pubkey = ($it.pubkey).shortLog
addresses = it.addrs.map(x => $x).join(",")
protocols = it.protos.map(x => $x).join(",")
- observable_address =
- if it.observedAddr.isSome(): $it.observedAddr.get()
- else: "None"
+ observable_address = $it.observedAddr
proto_version = it.protoVersion.get("None")
agent_version = it.agentVersion.get("None")
signedPeerRecord =
@@ -86,18 +81,18 @@ chronicles.expandIt(IdentifyInfo):
else: "None"
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
- {.raises: [Defect].} =
+ {.raises: [].} =
result = initProtoBuffer()
let pkey = peerInfo.publicKey
- result.write(1, pkey.getBytes().get())
+ result.write(1, pkey.getBytes().expect("valid key"))
for ma in peerInfo.addrs:
result.write(2, ma.data.buffer)
for proto in peerInfo.protocols:
result.write(3, proto)
- if observedAddr.isSome:
- result.write(4, observedAddr.get().data.buffer)
+ observedAddr.withValue(observed):
+ result.write(4, observed.data.buffer)
let protoVersion = ProtoVersion
result.write(5, protoVersion)
let agentVersion = if peerInfo.agentVersion.len <= 0:
@@ -109,13 +104,12 @@ proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: boo
## Optionally populate signedPeerRecord field.
## See https://github.com/libp2p/go-libp2p/blob/ddf96ce1cfa9e19564feb9bd3e8269958bbc0aba/p2p/protocol/identify/pb/identify.proto for reference.
if sendSpr:
- let sprBuff = peerInfo.signedPeerRecord.envelope.encode()
- if sprBuff.isOk():
- result.write(8, sprBuff.get())
+ peerInfo.signedPeerRecord.envelope.encode().toOpt().withValue(sprBuff):
+ result.write(8, sprBuff)
result.finish()
-proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
+proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
var
iinfo: IdentifyInfo
pubkey: PublicKey
@@ -125,37 +119,22 @@ proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
signedPeerRecord: SignedPeerRecord
var pb = initProtoBuffer(buf)
-
- let r1 = pb.getField(1, pubkey)
- let r2 = pb.getRepeatedField(2, iinfo.addrs)
- let r3 = pb.getRepeatedField(3, iinfo.protos)
- let r4 = pb.getField(4, oaddr)
- let r5 = pb.getField(5, protoVersion)
- let r6 = pb.getField(6, agentVersion)
-
- let r8 = pb.getField(8, signedPeerRecord)
-
- let res = r1.isOk() and r2.isOk() and r3.isOk() and
- r4.isOk() and r5.isOk() and r6.isOk() and
- r8.isOk()
-
- if res:
- if r1.get():
- iinfo.pubkey = some(pubkey)
- if r4.get():
- iinfo.observedAddr = some(oaddr)
- if r5.get():
- iinfo.protoVersion = some(protoVersion)
- if r6.get():
- iinfo.agentVersion = some(agentVersion)
- if r8.get() and r1.get():
- if iinfo.pubkey.get() == signedPeerRecord.envelope.publicKey:
- iinfo.signedPeerRecord = some(signedPeerRecord.envelope)
- debug "decodeMsg: decoded identify", iinfo
- some(iinfo)
- else:
- trace "decodeMsg: failed to decode received message"
- none[IdentifyInfo]()
+ if ? pb.getField(1, pubkey).toOpt():
+ iinfo.pubkey = some(pubkey)
+ if ? pb.getField(8, signedPeerRecord).toOpt() and
+ pubkey == signedPeerRecord.envelope.publicKey:
+ iinfo.signedPeerRecord = some(signedPeerRecord.envelope)
+ discard ? pb.getRepeatedField(2, iinfo.addrs).toOpt()
+ discard ? pb.getRepeatedField(3, iinfo.protos).toOpt()
+ if ? pb.getField(4, oaddr).toOpt():
+ iinfo.observedAddr = some(oaddr)
+ if ? pb.getField(5, protoVersion).toOpt():
+ iinfo.protoVersion = some(protoVersion)
+ if ? pb.getField(6, agentVersion).toOpt():
+ iinfo.agentVersion = some(agentVersion)
+
+ debug "decodeMsg: decoded identify", iinfo
+ Opt.some(iinfo)
proc new*(
T: typedesc[Identify],
@@ -196,26 +175,19 @@ proc identify*(self: Identify,
trace "identify: Empty message received!", conn
raise newException(IdentityInvalidMsgError, "Empty message received!")
- let infoOpt = decodeMsg(message)
- if infoOpt.isNone():
- raise newException(IdentityInvalidMsgError, "Incorrect message received!")
+ var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
+ let
+ pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
+ peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
- var info = infoOpt.get()
- if info.pubkey.isNone():
- raise newException(IdentityInvalidMsgError, "No pubkey in identify")
-
- let peer = PeerId.init(info.pubkey.get())
- if peer.isErr:
- raise newException(IdentityInvalidMsgError, $peer.error)
-
- if peer.get() != remotePeerId:
+ if peer != remotePeerId:
trace "Peer ids don't match", remote = peer, local = remotePeerId
raise newException(IdentityNoMatchError, "Peer ids don't match")
- info.peerId = peer.get()
+ info.peerId = peer
- if info.observedAddr.isSome:
- if not self.observedAddrManager.addObservation(info.observedAddr.get()):
- debug "Observed address is not valid", observedAddr = info.observedAddr.get()
+ info.observedAddr.withValue(observed):
+ if not self.observedAddrManager.addObservation(observed):
+ debug "Observed address is not valid", observedAddr = observed
return info
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
@@ -231,21 +203,18 @@ proc init*(p: IdentifyPush) =
try:
var message = await conn.readLp(64*1024)
- let infoOpt = decodeMsg(message)
- if infoOpt.isNone():
+ var identInfo = decodeMsg(message).valueOr:
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
- var indentInfo = infoOpt.get()
-
- if indentInfo.pubkey.isSome:
- let receivedPeerId = PeerId.init(indentInfo.pubkey.get()).tryGet()
+ identInfo.pubkey.withValue(pubkey):
+ let receivedPeerId = PeerId.init(pubkey).tryGet()
if receivedPeerId != conn.peerId:
raise newException(IdentityNoMatchError, "Peer ids don't match")
- indentInfo.peerId = receivedPeerId
+ identInfo.peerId = receivedPeerId
trace "triggering peer event", peerInfo = conn.peerId
if not isNil(p.identifyHandler):
- await p.identifyHandler(conn.peerId, indentInfo)
+ await p.identifyHandler(conn.peerId, identInfo)
except CancelledError as exc:
raise exc
except CatchableError as exc:
diff --git a/libp2p/protocols/perf/client.nim b/libp2p/protocols/perf/client.nim
new file mode 100644
index 0000000000..467aac4303
--- /dev/null
+++ b/libp2p/protocols/perf/client.nim
@@ -0,0 +1,47 @@
+# Nim-LibP2P
+# Copyright (c) 2023 Status Research & Development GmbH
+# Licensed under either of
+# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
+# * MIT license ([LICENSE-MIT](LICENSE-MIT))
+# at your option.
+# This file may not be copied, modified, or distributed except according to
+# those terms.
+
+## `Perf `_ protocol specification
+
+import chronos, chronicles, sequtils
+import stew/endians2
+import ./core, ../../stream/connection
+
+logScope:
+ topics = "libp2p perf"
+
+type PerfClient* = ref object of RootObj
+
+proc perf*(_: typedesc[PerfClient], conn: Connection,
+ sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0):
+ Future[Duration] {.async, public.} =
+ var
+ size = sizeToWrite
+ buf: array[PerfSize, byte]
+ let start = Moment.now()
+ trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
+
+ await conn.write(toSeq(toBytesBE(sizeToRead)))
+ while size > 0:
+ let toWrite = min(size, PerfSize)
+ await conn.write(buf[0.. 0:
+ let toRead = min(size, PerfSize)
+ await conn.readExactly(addr buf[0], toRead.int)
+ size = size - toRead
+
+ let duration = Moment.now() - start
+ trace "finishing performance benchmark", duration
+ return duration
diff --git a/libp2p/protocols/perf/core.nim b/libp2p/protocols/perf/core.nim
new file mode 100644
index 0000000000..bb61965e8a
--- /dev/null
+++ b/libp2p/protocols/perf/core.nim
@@ -0,0 +1,14 @@
+# Nim-LibP2P
+# Copyright (c) 2023 Status Research & Development GmbH
+# Licensed under either of
+# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
+# * MIT license ([LICENSE-MIT](LICENSE-MIT))
+# at your option.
+# This file may not be copied, modified, or distributed except according to
+# those terms.
+
+## `Perf `_ protocol specification
+
+const
+ PerfCodec* = "/perf/1.0.0"
+ PerfSize* = 65536
diff --git a/libp2p/protocols/perf/server.nim b/libp2p/protocols/perf/server.nim
new file mode 100644
index 0000000000..383c32399b
--- /dev/null
+++ b/libp2p/protocols/perf/server.nim
@@ -0,0 +1,60 @@
+# Nim-LibP2P
+# Copyright (c) 2023 Status Research & Development GmbH
+# Licensed under either of
+# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
+# * MIT license ([LICENSE-MIT](LICENSE-MIT))
+# at your option.
+# This file may not be copied, modified, or distributed except according to
+# those terms.
+
+## `Perf `_ protocol specification
+
+{.push raises: [].}
+
+import chronos, chronicles
+import stew/endians2
+import ./core,
+ ../protocol,
+ ../../stream/connection,
+ ../../utility
+
+export chronicles, connection
+
+logScope:
+ topics = "libp2p perf"
+
+type Perf* = ref object of LPProtocol
+
+proc new*(T: typedesc[Perf]): T {.public.} =
+ var p = T()
+ proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
+ var bytesRead = 0
+ try:
+ trace "Received benchmark performance check", conn
+ var
+ sizeBuffer: array[8, byte]
+ size: uint64
+ await conn.readExactly(addr sizeBuffer[0], 8)
+ size = uint64.fromBytesBE(sizeBuffer)
+
+ var toReadBuffer: array[PerfSize, byte]
+ try:
+ while true:
+ bytesRead += await conn.readOnce(addr toReadBuffer[0], PerfSize)
+ except CatchableError as exc:
+ discard
+
+ var buf: array[PerfSize, byte]
+ while size > 0:
+ let toWrite = min(size, PerfSize)
+ await conn.write(buf[0..`_ protocol implementation
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos, chronicles
import bearssl/rand
@@ -42,7 +39,7 @@ type
PingHandler* {.public.} = proc (
peer: PeerId):
Future[void]
- {.gcsafe, raises: [Defect].}
+ {.gcsafe, raises: [].}
Ping* = ref object of LPProtocol
pingHandler*: PingHandler
diff --git a/libp2p/protocols/protocol.nim b/libp2p/protocols/protocol.nim
index 88d96f623f..cb328849d0 100644
--- a/libp2p/protocols/protocol.nim
+++ b/libp2p/protocols/protocol.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos, stew/results
import ../stream/connection
@@ -25,7 +22,7 @@ type
conn: Connection,
proto: string):
Future[void]
- {.gcsafe, raises: [Defect].}
+ {.gcsafe, raises: [].}
LPProtocol* = ref object of RootObj
codecs*: seq[string]
@@ -55,8 +52,8 @@ func `codec=`*(p: LPProtocol, codec: string) =
proc new*(
T: type LPProtocol,
codecs: seq[string],
- handler: LPProtoHandler, # default(Opt[int]) or Opt.none(int) don't work on 1.2
- maxIncomingStreams: Opt[int] | int = Opt[int]()): T =
+ handler: LPProtoHandler,
+ maxIncomingStreams: Opt[int] | int = Opt.none(int)): T =
T(
codecs: codecs,
handler: handler,
diff --git a/libp2p/protocols/pubsub/floodsub.nim b/libp2p/protocols/pubsub/floodsub.nim
index c68aca8ea1..819161c0c7 100644
--- a/libp2p/protocols/pubsub/floodsub.nim
+++ b/libp2p/protocols/pubsub/floodsub.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[sets, hashes, tables]
import chronos, chronicles, metrics
@@ -18,7 +15,7 @@ import ./pubsub,
./pubsubpeer,
./timedcache,
./peertable,
- ./rpc/[message, messages],
+ ./rpc/[message, messages, protobuf],
../../crypto/crypto,
../../stream/connection,
../../peerid,
@@ -98,7 +95,16 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
method rpcHandler*(f: FloodSub,
peer: PubSubPeer,
- rpcMsg: RPCMsg) {.async.} =
+ data: seq[byte]) {.async.} =
+
+ var rpcMsg = decodeRpcMsg(data).valueOr:
+ debug "failed to decode msg from peer", peer, err = error
+ raise newException(CatchableError, "")
+
+ trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
+ # trigger hooks
+ peer.recvObservers(rpcMsg)
+
for i in 0.. 0:
respControl.iwant.add(iwant)
@@ -304,12 +312,13 @@ proc validateAndRelay(g: GossipSub,
var seenPeers: HashSet[PubSubPeer]
discard g.validationSeen.pop(msgIdSalted, seenPeers)
libp2p_gossipsub_duplicate_during_validation.inc(seenPeers.len.int64)
+ libp2p_gossipsub_saved_bytes.inc((msg.data.len * seenPeers.len).int64, labelValues = ["validation_duplicate"])
case validation
of ValidationResult.Reject:
debug "Dropping message after validation, reason: reject",
msgId = shortLog(msgId), peer
- g.punishInvalidMessage(peer, msg.topicIds)
+ await g.punishInvalidMessage(peer, msg)
return
of ValidationResult.Ignore:
debug "Dropping message after validation, reason: ignore",
@@ -331,11 +340,31 @@ proc validateAndRelay(g: GossipSub,
g.floodsub.withValue(t, peers): toSendPeers.incl(peers[])
g.mesh.withValue(t, peers): toSendPeers.incl(peers[])
+ # add direct peers
+ toSendPeers.incl(g.subscribedDirectPeers.getOrDefault(t))
+
# Don't send it to source peer, or peers that
# sent it during validation
toSendPeers.excl(peer)
toSendPeers.excl(seenPeers)
+ # IDontWant is only worth it if the message is substantially
+ # bigger than the messageId
+ if msg.data.len > msgId.len * 10:
+ g.broadcast(toSendPeers, RPCMsg(control: some(ControlMessage(
+ idontwant: @[ControlIWant(messageIds: @[msgId])]
+ ))))
+
+ for peer in toSendPeers:
+ for heDontWant in peer.heDontWants:
+ if msgId in heDontWant:
+ seenPeers.incl(peer)
+ libp2p_gossipsub_idontwant_saved_messages.inc
+ libp2p_gossipsub_saved_bytes.inc(msg.data.len.int64, labelValues = ["idontwant"])
+ break
+ toSendPeers.excl(seenPeers)
+
+
# In theory, if topics are the same in all messages, we could batch - we'd
# also have to be careful to only include validated messages
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
@@ -352,9 +381,60 @@ proc validateAndRelay(g: GossipSub,
except CatchableError as exc:
info "validateAndRelay failed", msg=exc.msg
+proc dataAndTopicsIdSize(msgs: seq[Message]): int =
+ msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
+
+proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.raises:[PeerRateLimitError, CatchableError], async.} =
+ # In this way we count even ignored fields by protobuf
+
+ var rmsg = rpcMsgOpt.valueOr:
+ peer.overheadRateLimitOpt.withValue(overheadRateLimit):
+ if not overheadRateLimit.tryConsume(msgSize):
+ libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
+ debug "Peer sent a msg that couldn't be decoded and it's above rate limit.", peer, uselessAppBytesNum = msgSize
+ if g.parameters.disconnectPeerAboveRateLimit:
+ await g.disconnectPeer(peer)
+ raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
+
+ raise newException(CatchableError, "Peer msg couldn't be decoded")
+
+ let usefulMsgBytesNum =
+ if g.verifySignature:
+ byteSize(rmsg.messages)
+ else:
+ dataAndTopicsIdSize(rmsg.messages)
+
+ var uselessAppBytesNum = msgSize - usefulMsgBytesNum
+ rmsg.control.withValue(control):
+ uselessAppBytesNum -= (byteSize(control.ihave) + byteSize(control.iwant))
+
+ peer.overheadRateLimitOpt.withValue(overheadRateLimit):
+ if not overheadRateLimit.tryConsume(uselessAppBytesNum):
+ libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
+ debug "Peer sent too much useless application data and it's above rate limit.", peer, msgSize, uselessAppBytesNum, rmsg
+ if g.parameters.disconnectPeerAboveRateLimit:
+ await g.disconnectPeer(peer)
+ raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
+
method rpcHandler*(g: GossipSub,
peer: PubSubPeer,
- rpcMsg: RPCMsg) {.async.} =
+ data: seq[byte]) {.async.} =
+
+ let msgSize = data.len
+ var rpcMsg = decodeRpcMsg(data).valueOr:
+ debug "failed to decode msg from peer", peer, err = error
+ await rateLimit(g, peer, Opt.none(RPCMsg), msgSize)
+ return
+
+ trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
+ await rateLimit(g, peer, Opt.some(rpcMsg), msgSize)
+
+ # trigger hooks
+ peer.recvObservers(rpcMsg)
+
+ if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
+ g.send(peer, RPCMsg(pong: rpcMsg.ping))
+ peer.pingBudget.dec
for i in 0.. 0 and msg.seqno.len != 8:
# if we have seqno should be 8 bytes long
debug "Dropping message due to invalid seqno length",
msgId = shortLog(msgId), peer
- g.punishInvalidMessage(peer, msg.topicIds)
+ await g.punishInvalidMessage(peer, msg)
continue
# g.anonymize needs no evaluation when receiving messages
@@ -491,32 +571,38 @@ method publish*(g: GossipSub,
var peers: HashSet[PubSubPeer]
+ # add always direct peers
+ peers.incl(g.subscribedDirectPeers.getOrDefault(topic))
+
+ if topic in g.topics: # if we're subscribed use the mesh
+ peers.incl(g.mesh.getOrDefault(topic))
+
if g.parameters.floodPublish:
# With flood publishing enabled, the mesh is used when propagating messages from other peers,
- # but a peer's own messages will always be published to all known peers in the topic.
+ # but a peer's own messages will always be published to all known peers in the topic, limited
+ # to the amount of peers we can send it to in one heartbeat
+ var maxPeersToFlodOpt: Opt[int64]
+ if g.parameters.bandwidthEstimatebps > 0:
+ let
+ bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000 # Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
+ msToTransmit = max(data.len div bandwidth, 1)
+ maxPeersToFlodOpt = Opt.some(max(g.parameters.heartbeatInterval.milliseconds div msToTransmit, g.parameters.dLow))
+
for peer in g.gossipsub.getOrDefault(topic):
+ maxPeersToFlodOpt.withValue(maxPeersToFlod):
+ if peers.len >= maxPeersToFlod: break
if peer.score >= g.parameters.publishThreshold:
trace "publish: including flood/high score peer", peer
peers.incl(peer)
- # add always direct peers
- peers.incl(g.explicit.getOrDefault(topic))
-
- if topic in g.topics: # if we're subscribed use the mesh
- peers.incl(g.mesh.getOrDefault(topic))
-
- if peers.len < g.parameters.dLow and g.parameters.floodPublish == false:
- # not subscribed or bad mesh, send to fanout peers
- # disable for floodPublish, since we already sent to every good peer
- #
+ if peers.len < g.parameters.dLow:
+ # not subscribed, or bad mesh, send to fanout peers
var fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
- if fanoutPeers.len == 0:
+ if fanoutPeers.len < g.parameters.dLow:
g.replenishFanout(topic)
fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
g.rng.shuffle(fanoutPeers)
- if fanoutPeers.len + peers.len > g.parameters.d:
- fanoutPeers.setLen(g.parameters.d - peers.len)
for fanPeer in fanoutPeers:
peers.incl(fanPeer)
@@ -534,7 +620,6 @@ method publish*(g: GossipSub,
debug "No peers for topic, skipping publish", peersOnTopic = topicPeers.len,
connectedPeers = topicPeers.filterIt(it.connected).len,
topic
- # skipping topic as our metrics finds that heavy
libp2p_gossipsub_failed_publish.inc()
return 0
@@ -570,15 +655,16 @@ method publish*(g: GossipSub,
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = ["generic"])
trace "Published message to peers", peers=peers.len
-
return peers.len
proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
- let peer = g.peers.getOrDefault(id)
- if isNil(peer):
+ if id notin g.peers:
trace "Attempting to dial a direct peer", peer = id
+ if g.switch.isConnected(id):
+ warn "We are connected to a direct peer, but it isn't a GossipSub peer!", id
+ return
try:
- await g.switch.connect(id, addrs)
+ await g.switch.connect(id, addrs, forceDial = true)
# populate the peer after it's connected
discard g.getOrCreatePeer(id, g.codecs)
except CancelledError as exc:
@@ -622,7 +708,7 @@ method stop*(g: GossipSub) {.async.} =
g.heartbeatFut = nil
method initPubSub*(g: GossipSub)
- {.raises: [Defect, InitializationError].} =
+ {.raises: [InitializationError].} =
procCall FloodSub(g).initPubSub()
if not g.parameters.explicit:
@@ -637,3 +723,13 @@ method initPubSub*(g: GossipSub)
# init gossip stuff
g.mcache = MCache.init(g.parameters.historyGossip, g.parameters.historyLength)
+
+method getOrCreatePeer*(
+ g: GossipSub,
+ peerId: PeerId,
+ protos: seq[string]): PubSubPeer =
+
+ let peer = procCall PubSub(g).getOrCreatePeer(peerId, protos)
+ g.parameters.overheadRateLimit.withValue(overheadRateLimit):
+ peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(overheadRateLimit.bytes, overheadRateLimit.interval))
+ return peer
diff --git a/libp2p/protocols/pubsub/gossipsub/behavior.nim b/libp2p/protocols/pubsub/gossipsub/behavior.nim
index 13a041245b..983262fa0c 100644
--- a/libp2p/protocols/pubsub/gossipsub/behavior.nim
+++ b/libp2p/protocols/pubsub/gossipsub/behavior.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[tables, sequtils, sets, algorithm, deques]
import chronos, chronicles, metrics
@@ -33,7 +30,7 @@ declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh wi
declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"])
declareGauge(libp2p_gossipsub_received_iwants, "received iwants", labels = ["kind"])
-proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [Defect].} =
+proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [].} =
g.withPeerStats(p.peerId) do (stats: var PeerStats):
var info = stats.topicInfos.getOrDefault(topic)
info.graftTime = Moment.now()
@@ -49,12 +46,10 @@ proc pruned*(g: GossipSub,
p: PubSubPeer,
topic: string,
setBackoff: bool = true,
- backoff = none(Duration)) {.raises: [Defect].} =
+ backoff = none(Duration)) {.raises: [].} =
if setBackoff:
let
- backoffDuration =
- if isSome(backoff): backoff.get()
- else: g.parameters.pruneBackoff
+ backoffDuration = backoff.get(g.parameters.pruneBackoff)
backoffMoment = Moment.fromNow(backoffDuration)
g.backingOff
@@ -75,7 +70,7 @@ proc pruned*(g: GossipSub,
trace "pruned", peer=p, topic
-proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].} =
+proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [].} =
let now = Moment.now()
var expired = toSeq(t.getOrDefault(topic).pairs())
expired.keepIf do (pair: tuple[peer: PeerId, expire: Moment]) -> bool:
@@ -84,7 +79,7 @@ proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].}
t.withValue(topic, v):
v[].del(peer)
-proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises: [Defect].} =
+proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises: [].} =
if not g.parameters.enablePX:
return @[]
var peers = g.gossipsub.getOrDefault(topic, initHashSet[PubSubPeer]()).toSeq()
@@ -111,10 +106,11 @@ proc handleGraft*(g: GossipSub,
let topic = graft.topicId
trace "peer grafted topic", peer, topic
- # It is an error to GRAFT on a explicit peer
+ # It is an error to GRAFT on a direct peer
if peer.peerId in g.parameters.directPeers:
# receiving a graft from a direct peer should yield a more prominent warning (protocol violation)
- warn "an explicit peer attempted to graft us, peering agreements should be reciprocal",
+ # we are trusting direct peer not to abuse this
+ warn "a direct peer attempted to graft us, peering agreements should be reciprocal",
peer, topic
# and such an attempt should be logged and rejected with a PRUNE
prunes.add(ControlPrune(
@@ -194,27 +190,22 @@ proc handleGraft*(g: GossipSub,
proc getPeers(prune: ControlPrune, peer: PubSubPeer): seq[(PeerId, Option[PeerRecord])] =
var routingRecords: seq[(PeerId, Option[PeerRecord])]
for record in prune.peers:
- let peerRecord =
- if record.signedPeerRecord.len == 0:
- none(PeerRecord)
- else:
- let signedRecord = SignedPeerRecord.decode(record.signedPeerRecord)
- if signedRecord.isErr:
- trace "peer sent invalid SPR", peer, error=signedRecord.error
- none(PeerRecord)
+ var peerRecord = none(PeerRecord)
+ if record.signedPeerRecord.len > 0:
+ SignedPeerRecord.decode(record.signedPeerRecord).toOpt().withValue(spr):
+ if record.peerId != spr.data.peerId:
+ trace "peer sent envelope with wrong public key", peer
else:
- if record.peerId != signedRecord.get().data.peerId:
- trace "peer sent envelope with wrong public key", peer
- none(PeerRecord)
- else:
- some(signedRecord.get().data)
+ peerRecord = some(spr.data)
+ else:
+ trace "peer sent invalid SPR", peer
routingRecords.add((record.peerId, peerRecord))
routingRecords
-proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.raises: [Defect].} =
+proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.raises: [].} =
for prune in prunes:
let topic = prune.topicId
@@ -248,39 +239,42 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.r
proc handleIHave*(g: GossipSub,
peer: PubSubPeer,
- ihaves: seq[ControlIHave]): ControlIWant {.raises: [Defect].} =
+ ihaves: seq[ControlIHave]): ControlIWant {.raises: [].} =
var res: ControlIWant
if peer.score < g.parameters.gossipThreshold:
trace "ihave: ignoring low score peer", peer, score = peer.score
elif peer.iHaveBudget <= 0:
trace "ihave: ignoring out of budget peer", peer, score = peer.score
else:
- # TODO review deduplicate algorithm
- # * https://github.com/nim-lang/Nim/blob/5f46474555ee93306cce55342e81130c1da79a42/lib/pure/collections/sequtils.nim#L184
- # * it's probably not efficient and might give preference to the first dupe
- let deIhaves = ihaves.deduplicate()
- for ihave in deIhaves:
+ for ihave in ihaves:
trace "peer sent ihave",
peer, topic = ihave.topicId, msgs = ihave.messageIds
- if ihave.topicId in g.mesh:
- # also avoid duplicates here!
- let deIhavesMsgs = ihave.messageIds.deduplicate()
- for msgId in deIhavesMsgs:
+ if ihave.topicId in g.topics:
+ for msgId in ihave.messageIds:
if not g.hasSeen(msgId):
- if peer.iHaveBudget > 0:
+ if peer.iHaveBudget <= 0:
+ break
+ elif msgId notin res.messageIds:
res.messageIds.add(msgId)
dec peer.iHaveBudget
trace "requested message via ihave", messageID=msgId
- else:
- break
# shuffling res.messageIDs before sending it out to increase the likelihood
# of getting an answer if the peer truncates the list due to internal size restrictions.
g.rng.shuffle(res.messageIds)
return res
+proc handleIDontWant*(g: GossipSub,
+ peer: PubSubPeer,
+ iDontWants: seq[ControlIWant]) =
+ for dontWant in iDontWants:
+ for messageId in dontWant.messageIds:
+ if peer.heDontWants[^1].len > 1000: break
+ if messageId.len > 100: continue
+ peer.heDontWants[^1].incl(messageId)
+
proc handleIWant*(g: GossipSub,
peer: PubSubPeer,
- iwants: seq[ControlIWant]): seq[Message] {.raises: [Defect].} =
+ iwants: seq[ControlIWant]): seq[Message] {.raises: [].} =
var
messages: seq[Message]
invalidRequests = 0
@@ -299,15 +293,14 @@ proc handleIWant*(g: GossipSub,
libp2p_gossipsub_received_iwants.inc(1, labelValues=["skipped"])
return messages
continue
- let msg = g.mcache.get(mid)
- if msg.isSome:
- libp2p_gossipsub_received_iwants.inc(1, labelValues=["correct"])
- messages.add(msg.get())
- else:
+ let msg = g.mcache.get(mid).valueOr:
libp2p_gossipsub_received_iwants.inc(1, labelValues=["unknown"])
+ continue
+ libp2p_gossipsub_received_iwants.inc(1, labelValues=["correct"])
+ messages.add(msg)
return messages
-proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
+proc commitMetrics(metrics: var MeshMetrics) {.raises: [].} =
libp2p_gossipsub_low_peers_topics.set(metrics.lowPeersTopics)
libp2p_gossipsub_no_peers_topics.set(metrics.noPeersTopics)
libp2p_gossipsub_under_dout_topics.set(metrics.underDoutTopics)
@@ -316,7 +309,7 @@ proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
libp2p_gossipsub_peers_per_topic_fanout.set(metrics.otherPeersPerTopicFanout, labelValues = ["other"])
libp2p_gossipsub_peers_per_topic_mesh.set(metrics.otherPeersPerTopicMesh, labelValues = ["other"])
-proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) {.raises: [Defect].} =
+proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) {.raises: [].} =
logScope:
topic
mesh = g.mesh.peers(topic)
@@ -348,7 +341,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# avoid negative score peers
it.score >= 0.0 and
it notin currentMesh[] and
- # don't pick explicit peers
+ # don't pick direct peers
it.peerId notin g.parameters.directPeers and
# and avoid peers we are backing off
it.peerId notin backingOff:
@@ -388,7 +381,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
it notin currentMesh[] and
# avoid negative score peers
it.score >= 0.0 and
- # don't pick explicit peers
+ # don't pick direct peers
it.peerId notin g.parameters.directPeers and
# and avoid peers we are backing off
it.peerId notin backingOff:
@@ -490,7 +483,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# avoid negative score peers
it.score >= median.score and
it notin currentMesh[] and
- # don't pick explicit peers
+ # don't pick direct peers
it.peerId notin g.parameters.directPeers and
# and avoid peers we are backing off
it.peerId notin backingOff:
@@ -546,7 +539,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
g.broadcast(prunes, prune)
-proc dropFanoutPeers*(g: GossipSub) {.raises: [Defect].} =
+proc dropFanoutPeers*(g: GossipSub) {.raises: [].} =
# drop peers that we haven't published to in
# GossipSubFanoutTTL seconds
let now = Moment.now()
@@ -559,13 +552,13 @@ proc dropFanoutPeers*(g: GossipSub) {.raises: [Defect].} =
for topic in drops:
g.lastFanoutPubSub.del topic
-proc replenishFanout*(g: GossipSub, topic: string) {.raises: [Defect].} =
+proc replenishFanout*(g: GossipSub, topic: string) {.raises: [].} =
## get fanout peers for a topic
logScope: topic
trace "about to replenish fanout"
- let currentMesh = g.mesh.getOrDefault(topic)
if g.fanout.peers(topic) < g.parameters.dLow:
+ let currentMesh = g.mesh.getOrDefault(topic)
trace "replenishing fanout", peers = g.fanout.peers(topic)
for peer in g.gossipsub.getOrDefault(topic):
if peer in currentMesh: continue
@@ -575,7 +568,7 @@ proc replenishFanout*(g: GossipSub, topic: string) {.raises: [Defect].} =
trace "fanout replenished with peers", peers = g.fanout.peers(topic)
-proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises: [Defect].} =
+proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises: [].} =
## gossip iHave messages to peers
##
@@ -638,7 +631,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
return control
-proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
+proc onHeartbeat(g: GossipSub) {.raises: [].} =
# reset IWANT budget
# reset IHAVE cap
block:
@@ -646,7 +639,11 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
peer.sentIHaves.addFirst(default(HashSet[MessageId]))
if peer.sentIHaves.len > g.parameters.historyLength:
discard peer.sentIHaves.popLast()
+ peer.heDontWants.addFirst(default(HashSet[MessageId]))
+ if peer.heDontWants.len > g.parameters.historyLength:
+ discard peer.heDontWants.popLast()
peer.iHaveBudget = IHavePeerBudget
+ peer.pingBudget = PingsPeerBudget
var meshMetrics = MeshMetrics()
@@ -698,7 +695,7 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
g.mcache.shift() # shift the cache
-# {.pop.} # raises [Defect]
+# {.pop.} # raises []
proc heartbeat*(g: GossipSub) {.async.} =
heartbeat "GossipSub", g.parameters.heartbeatInterval:
diff --git a/libp2p/protocols/pubsub/gossipsub/scoring.nim b/libp2p/protocols/pubsub/gossipsub/scoring.nim
index 3606059db6..ee4f34da00 100644
--- a/libp2p/protocols/pubsub/gossipsub/scoring.nim
+++ b/libp2p/protocols/pubsub/gossipsub/scoring.nim
@@ -7,16 +7,16 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import std/[tables, sets, options]
+import std/[tables, sets]
import chronos, chronicles, metrics
+import chronos/ratelimit
import "."/[types]
import ".."/[pubsubpeer]
+import ../rpc/messages
import "../../.."/[peerid, multiaddress, switch, utils/heartbeat]
+import ../pubsub
logScope:
topics = "libp2p gossipsub"
@@ -30,6 +30,7 @@ declareGauge(libp2p_gossipsub_peers_score_invalidMessageDeliveries, "Detailed go
declareGauge(libp2p_gossipsub_peers_score_appScore, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_behaviourPenalty, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_colocationFactor, "Detailed gossipsub scoring metric", labels = ["agent"])
+declarePublicCounter(libp2p_gossipsub_peers_rate_limit_hits, "The number of times peers were above their rate limit", labels = ["agent"])
proc init*(_: type[TopicParams]): TopicParams =
TopicParams(
@@ -55,7 +56,7 @@ proc init*(_: type[TopicParams]): TopicParams =
proc withPeerStats*(
g: GossipSub,
peerId: PeerId,
- action: proc (stats: var PeerStats) {.gcsafe, raises: [Defect].}) =
+ action: proc (stats: var PeerStats) {.gcsafe, raises: [].}) =
## Add or update peer statistics for a particular peer id - the statistics
## are retained across multiple connections until they expire
g.peerStats.withValue(peerId, stats) do:
@@ -74,39 +75,32 @@ func `/`(a, b: Duration): float64 =
func byScore*(x,y: PubSubPeer): int = system.cmp(x.score, y.score)
proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
- if peer.address.isNone():
- 0.0
+ let address = peer.address.valueOr: return 0.0
+
+ g.peersInIP.mgetOrPut(address, initHashSet[PeerId]()).incl(peer.peerId)
+ let
+ ipPeers = g.peersInIP.getOrDefault(address).len().float64
+ if ipPeers > g.parameters.ipColocationFactorThreshold:
+ trace "colocationFactor over threshold", peer, address, ipPeers
+ let over = ipPeers - g.parameters.ipColocationFactorThreshold
+ over * over
else:
- let
- address = peer.address.get()
- g.peersInIP.mgetOrPut(address, initHashSet[PeerId]()).incl(peer.peerId)
- let
- ipPeers = g.peersInIP.getOrDefault(address).len().float64
- if ipPeers > g.parameters.ipColocationFactorThreshold:
- trace "colocationFactor over threshold", peer, address, ipPeers
- let over = ipPeers - g.parameters.ipColocationFactorThreshold
- over * over
- else:
- 0.0
+ 0.0
{.pop.}
-proc disconnectPeer(g: GossipSub, peer: PubSubPeer) {.async.} =
- let agent =
- when defined(libp2p_agents_metrics):
- if peer.shortAgent.len > 0:
- peer.shortAgent
- else:
- "unknown"
- else:
- "unknown"
- libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [agent])
-
+proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
try:
await g.switch.disconnect(peer.peerId)
except CatchableError as exc: # Never cancelled
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
+proc disconnectIfBadScorePeer*(g: GossipSub, peer: PubSubPeer, score: float64) =
+ if g.parameters.disconnectBadPeers and score < g.parameters.graylistThreshold and
+ peer.peerId notin g.parameters.directPeers:
+ debug "disconnecting bad score peer", peer, score = peer.score
+ asyncSpawn(g.disconnectPeer(peer))
+ libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [peer.getAgent()])
proc updateScores*(g: GossipSub) = # avoid async
## https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#the-score-function
@@ -176,14 +170,7 @@ proc updateScores*(g: GossipSub) = # avoid async
score += topicScore * topicParams.topicWeight
# Score metrics
- let agent =
- when defined(libp2p_agents_metrics):
- if peer.shortAgent.len > 0:
- peer.shortAgent
- else:
- "unknown"
- else:
- "unknown"
+ let agent = peer.getAgent()
libp2p_gossipsub_peers_score_firstMessageDeliveries.inc(info.firstMessageDeliveries, labelValues = [agent])
libp2p_gossipsub_peers_score_meshMessageDeliveries.inc(info.meshMessageDeliveries, labelValues = [agent])
libp2p_gossipsub_peers_score_meshFailurePenalty.inc(info.meshFailurePenalty, labelValues = [agent])
@@ -220,14 +207,7 @@ proc updateScores*(g: GossipSub) = # avoid async
score += colocationFactor * g.parameters.ipColocationFactorWeight
# Score metrics
- let agent =
- when defined(libp2p_agents_metrics):
- if peer.shortAgent.len > 0:
- peer.shortAgent
- else:
- "unknown"
- else:
- "unknown"
+ let agent = peer.getAgent()
libp2p_gossipsub_peers_score_appScore.inc(peer.appScore, labelValues = [agent])
libp2p_gossipsub_peers_score_behaviourPenalty.inc(peer.behaviourPenalty, labelValues = [agent])
libp2p_gossipsub_peers_score_colocationFactor.inc(colocationFactor, labelValues = [agent])
@@ -247,11 +227,7 @@ proc updateScores*(g: GossipSub) = # avoid async
trace "updated peer's score", peer, score = peer.score, n_topics, is_grafted
- if g.parameters.disconnectBadPeers and stats.score < g.parameters.graylistThreshold and
- peer.peerId notin g.parameters.directPeers:
- debug "disconnecting bad score peer", peer, score = peer.score
- asyncSpawn(g.disconnectPeer(peer))
-
+ g.disconnectIfBadScorePeer(peer, stats.score)
libp2p_gossipsub_peers_scores.inc(peer.score, labelValues = [agent])
for peer in evicting:
@@ -264,8 +240,18 @@ proc scoringHeartbeat*(g: GossipSub) {.async.} =
trace "running scoring heartbeat", instance = cast[int](g)
g.updateScores()
-proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, topics: seq[string]) =
- for tt in topics:
+proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
+ let uselessAppBytesNum = msg.data.len
+ peer.overheadRateLimitOpt.withValue(overheadRateLimit):
+ if not overheadRateLimit.tryConsume(uselessAppBytesNum):
+ debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
+ libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
+ if g.parameters.disconnectPeerAboveRateLimit:
+ await g.disconnectPeer(peer)
+ raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
+
+
+ for tt in msg.topicIds:
let t = tt
if t notin g.topics:
continue
diff --git a/libp2p/protocols/pubsub/gossipsub/types.nim b/libp2p/protocols/pubsub/gossipsub/types.nim
index e82b85af88..06fa55eb30 100644
--- a/libp2p/protocols/pubsub/gossipsub/types.nim
+++ b/libp2p/protocols/pubsub/gossipsub/types.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos
import std/[options, tables, sets]
@@ -48,6 +45,7 @@ const
const
BackoffSlackTime* = 2 # seconds
+ PingsPeerBudget* = 100 # maximum of 6.4kb/heartbeat (6.4kb/s with default 1 second/hb)
IHavePeerBudget* = 10
# the max amount of IHave to expose, not by spec, but go as example
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
@@ -144,6 +142,11 @@ type
disconnectBadPeers*: bool
enablePX*: bool
+ bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
+
+ overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
+ disconnectPeerAboveRateLimit*: bool
+
BackoffTable* = Table[string, Table[PeerId, Moment]]
ValidationSeenTable* = Table[MessageId, HashSet[PubSubPeer]]
@@ -152,13 +155,13 @@ type
proc(peer: PeerId,
tag: string, # For gossipsub, the topic
peers: seq[RoutingRecordsPair])
- {.gcsafe, raises: [Defect].}
+ {.gcsafe, raises: [].}
GossipSub* = ref object of FloodSub
mesh*: PeerTable # peers that we send messages to when we are subscribed to the topic
fanout*: PeerTable # peers that we send messages to when we're not subscribed to the topic
gossipsub*: PeerTable # peers that are subscribed to a topic
- explicit*: PeerTable # directpeers that we keep alive explicitly
+ subscribedDirectPeers*: PeerTable # directpeers that we keep alive
backingOff*: BackoffTable # peers to backoff from when replenishing the mesh
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
gossip*: Table[string, seq[ControlIHave]] # pending gossip
diff --git a/libp2p/protocols/pubsub/mcache.nim b/libp2p/protocols/pubsub/mcache.nim
index 14aa9a0ee9..d6ea6871af 100644
--- a/libp2p/protocols/pubsub/mcache.nim
+++ b/libp2p/protocols/pubsub/mcache.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[sets, tables, options]
import rpc/[messages]
diff --git a/libp2p/protocols/pubsub/peertable.nim b/libp2p/protocols/pubsub/peertable.nim
index 2f19befafc..78eadc4645 100644
--- a/libp2p/protocols/pubsub/peertable.nim
+++ b/libp2p/protocols/pubsub/peertable.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[tables, sets, sequtils]
import ./pubsubpeer, ../../peerid
diff --git a/libp2p/protocols/pubsub/pubsub.nim b/libp2p/protocols/pubsub/pubsub.nim
index fb7aea13b4..ef4d680024 100644
--- a/libp2p/protocols/pubsub/pubsub.nim
+++ b/libp2p/protocols/pubsub/pubsub.nim
@@ -13,13 +13,11 @@
## `publish<#publish.e%2CPubSub%2Cstring%2Cseq%5Bbyte%5D>`_ something on it,
## and eventually `unsubscribe<#unsubscribe%2CPubSub%2Cstring%2CTopicHandler>`_ from it.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[tables, sequtils, sets, strutils]
import chronos, chronicles, metrics
+import chronos/ratelimit
import ./errors as pubsub_errors,
./pubsubpeer,
./rpc/[message, messages, protobuf],
@@ -86,18 +84,18 @@ type
InitializationError* = object of LPError
TopicHandler* {.public.} = proc(topic: string,
- data: seq[byte]): Future[void] {.gcsafe, raises: [Defect].}
+ data: seq[byte]): Future[void] {.gcsafe, raises: [].}
ValidatorHandler* {.public.} = proc(topic: string,
- message: Message): Future[ValidationResult] {.gcsafe, raises: [Defect].}
+ message: Message): Future[ValidationResult] {.gcsafe, raises: [].}
TopicPair* = tuple[topic: string, handler: TopicHandler]
MsgIdProvider* {.public.} =
- proc(m: Message): Result[MessageId, ValidationResult] {.noSideEffect, raises: [Defect], gcsafe.}
+ proc(m: Message): Result[MessageId, ValidationResult] {.noSideEffect, raises: [], gcsafe.}
SubscriptionValidator* {.public.} =
- proc(topic: string): bool {.raises: [Defect], gcsafe.}
+ proc(topic: string): bool {.raises: [], gcsafe.}
## Every time a peer send us a subscription (even to an unknown topic),
## we have to store it, which may be an attack vector.
## This callback can be used to reject topic we're not interested in
@@ -140,7 +138,7 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
libp2p_pubsub_peers.set(p.peers.len.int64)
-proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg) {.raises: [Defect].} =
+proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg) {.raises: [].} =
## Attempt to send `msg` to remote peer
##
@@ -150,7 +148,7 @@ proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg) {.raises: [Defect].} =
proc broadcast*(
p: PubSub,
sendPeers: auto, # Iteratble[PubSubPeer]
- msg: RPCMsg) {.raises: [Defect].} =
+ msg: RPCMsg) {.raises: [].} =
## Attempt to send `msg` to the given peers
let npeers = sendPeers.len.int64
@@ -173,10 +171,9 @@ proc broadcast*(
else:
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = ["generic"])
- if msg.control.isSome():
- libp2p_pubsub_broadcast_iwant.inc(npeers * msg.control.get().iwant.len.int64)
+ msg.control.withValue(control):
+ libp2p_pubsub_broadcast_iwant.inc(npeers * control.iwant.len.int64)
- let control = msg.control.get()
for ihave in control.ihave:
if p.knownTopics.contains(ihave.topicId):
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = [ihave.topicId])
@@ -247,9 +244,8 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
else:
libp2p_pubsub_received_messages.inc(labelValues = ["generic"])
- if rpcMsg.control.isSome():
- libp2p_pubsub_received_iwant.inc(rpcMsg.control.get().iwant.len.int64)
- template control: untyped = rpcMsg.control.unsafeGet()
+ rpcMsg.control.withValue(control):
+ libp2p_pubsub_received_iwant.inc(control.iwant.len.int64)
for ihave in control.ihave:
if p.knownTopics.contains(ihave.topicId):
libp2p_pubsub_received_ihave.inc(labelValues = [ihave.topicId])
@@ -268,7 +264,7 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
method rpcHandler*(p: PubSub,
peer: PubSubPeer,
- rpcMsg: RPCMsg): Future[void] {.base, async.} =
+ data: seq[byte]): Future[void] {.base, async.} =
## Handler that must be overridden by concrete implementation
raiseAssert "Unimplemented"
@@ -283,10 +279,11 @@ method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {
of PubSubPeerEventKind.Disconnected:
discard
-proc getOrCreatePeer*(
+method getOrCreatePeer*(
p: PubSub,
peerId: PeerId,
- protos: seq[string]): PubSubPeer =
+ protos: seq[string]): PubSubPeer {.base, gcsafe.} =
+
p.peers.withValue(peerId, peer):
return peer[]
@@ -359,9 +356,9 @@ method handleConn*(p: PubSub,
## that we're interested in
##
- proc handler(peer: PubSubPeer, msg: RPCMsg): Future[void] =
+ proc handler(peer: PubSubPeer, data: seq[byte]): Future[void] =
# call pubsub rpc handler
- p.rpcHandler(peer, msg)
+ p.rpcHandler(peer, data)
let peer = p.getOrCreatePeer(conn.peerId, @[proto])
@@ -491,7 +488,7 @@ method publish*(p: PubSub,
return 0
method initPubSub*(p: PubSub)
- {.base, raises: [Defect, InitializationError].} =
+ {.base, raises: [InitializationError].} =
## perform pubsub initialization
p.observers = new(seq[PubSubObserver])
if p.msgIdProvider == nil:
@@ -559,7 +556,7 @@ proc init*[PubParams: object | bool](
maxMessageSize: int = 1024 * 1024,
rng: ref HmacDrbgContext = newRng(),
parameters: PubParams = false): P
- {.raises: [Defect, InitializationError], public.} =
+ {.raises: [InitializationError], public.} =
let pubsub =
when PubParams is bool:
P(switch: switch,
diff --git a/libp2p/protocols/pubsub/pubsubpeer.nim b/libp2p/protocols/pubsub/pubsubpeer.nim
index 31fbbe7a69..25315e4dca 100644
--- a/libp2p/protocols/pubsub/pubsubpeer.nim
+++ b/libp2p/protocols/pubsub/pubsubpeer.nim
@@ -7,14 +7,12 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
import stew/results
import chronos, chronicles, nimcrypto/sha2, metrics
+import chronos/ratelimit
import rpc/[messages, message, protobuf],
../../peerid,
../../peerinfo,
@@ -23,7 +21,7 @@ import rpc/[messages, message, protobuf],
../../protobuf/minprotobuf,
../../utility
-export peerid, connection
+export peerid, connection, deques
logScope:
topics = "libp2p pubsubpeer"
@@ -35,9 +33,11 @@ when defined(libp2p_expensive_metrics):
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
type
+ PeerRateLimitError* = object of CatchableError
+
PubSubObserver* = ref object
- onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [Defect].}
- onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [Defect].}
+ onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
+ onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
PubSubPeerEventKind* {.pure.} = enum
Connected
@@ -46,9 +46,9 @@ type
PubSubPeerEvent* = object
kind*: PubSubPeerEventKind
- GetConn* = proc(): Future[Connection] {.gcsafe, raises: [Defect].}
- DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [Defect].} # have to pass peer as it's unknown during init
- OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [Defect].}
+ GetConn* = proc(): Future[Connection] {.gcsafe, raises: [].}
+ DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].} # have to pass peer as it's unknown during init
+ OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
PubSubPeer* = ref object of RootObj
getConn*: GetConn # callback to establish a new send connection
@@ -63,13 +63,16 @@ type
score*: float64
sentIHaves*: Deque[HashSet[MessageId]]
+ heDontWants*: Deque[HashSet[MessageId]]
iHaveBudget*: int
+ pingBudget*: int
maxMessageSize: int
appScore*: float64 # application specific score
behaviourPenalty*: float64 # the eventual penalty score
+ overheadRateLimitOpt*: Opt[TokenBucket]
- RPCHandler* = proc(peer: PubSubPeer, msg: RPCMsg): Future[void]
- {.gcsafe, raises: [Defect].}
+ RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
+ {.gcsafe, raises: [].}
when defined(libp2p_agents_metrics):
func shortAgent*(p: PubSubPeer): string =
@@ -108,7 +111,7 @@ func outbound*(p: PubSubPeer): bool =
else:
false
-proc recvObservers(p: PubSubPeer, msg: var RPCMsg) =
+proc recvObservers*(p: PubSubPeer, msg: var RPCMsg) =
# trigger hooks
if not(isNil(p.observers)) and p.observers[].len > 0:
for obs in p.observers[]:
@@ -135,28 +138,19 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
conn, peer = p, closed = conn.closed,
data = data.shortLog
- var rmsg = decodeRpcMsg(data)
- data = newSeq[byte]() # Release memory
-
- if rmsg.isErr():
- notice "failed to decode msg from peer",
- conn, peer = p, closed = conn.closed,
- err = rmsg.error()
- break
-
- trace "decoded msg from peer",
- conn, peer = p, closed = conn.closed,
- msg = rmsg.get().shortLog
- # trigger hooks
- p.recvObservers(rmsg.get())
-
when defined(libp2p_expensive_metrics):
- for m in rmsg.get().messages:
+ for m in rmsg.messages:
for t in m.topicIDs:
# metrics
libp2p_pubsub_received_messages.inc(labelValues = [$p.peerId, t])
- await p.handler(p, rmsg.get())
+ await p.handler(p, data)
+ data = newSeq[byte]() # Release memory
+ except PeerRateLimitError as exc:
+ debug "Peer rate limit exceeded, exiting read while", conn, peer = p, error = exc.msg
+ except CatchableError as exc:
+ debug "Exception occurred in PubSubPeer.handle",
+ conn, peer = p, closed = conn.closed, exc = exc.msg
finally:
await conn.close()
except CancelledError:
@@ -174,7 +168,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
try:
if p.connectedFut.finished:
p.connectedFut = newFuture[void]()
- let newConn = await p.getConn()
+ let newConn = await p.getConn().wait(5.seconds)
if newConn.isNil:
raise (ref LPError)(msg: "Cannot establish send connection")
@@ -201,6 +195,9 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
await p.sendConn.close()
p.sendConn = nil
+ if not p.connectedFut.finished:
+ p.connectedFut.complete()
+
try:
if p.onEvent != nil:
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Disconnected))
@@ -237,7 +234,7 @@ template sendMetrics(msg: RPCMsg): untyped =
# metrics
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
-proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} =
+proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
doAssert(not isNil(p), "pubsubpeer nil!")
if msg.len <= 0:
@@ -245,15 +242,17 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} =
return
if msg.len > p.maxMessageSize:
- info "trying to send a too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
+ info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
return
if p.sendConn == nil:
- discard await p.connectedFut.withTimeout(1.seconds)
+ # Wait for a send conn to be setup. `connectOnce` will
+ # complete this even if the sendConn setup failed
+ await p.connectedFut
var conn = p.sendConn
if conn == nil or conn.closed():
- debug "No send connection, skipping message", p, msg = shortLog(msg)
+ debug "No send connection", p, msg = shortLog(msg)
return
trace "sending encoded msgs to peer", conn, encoded = shortLog(msg)
@@ -270,9 +269,42 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} =
await conn.close() # This will clean up the send connection
-proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
- trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
+iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
+ ## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
+ ## Each new `RPCMsg` accumulates Messages until reaching the specified `maxSize`. If a single Message
+ ## exceeds the `maxSize` when trying to fit into an empty `RPCMsg`, the latter is skipped as too large to send.
+ ## Every constructed `RPCMsg` is then encoded, optionally anonymized, and yielded as a sequence of bytes.
+
+ var currentRPCMsg = rpcMsg
+ currentRPCMsg.messages = newSeq[Message]()
+ var currentSize = byteSize(currentRPCMsg)
+
+ for msg in rpcMsg.messages:
+ let msgSize = byteSize(msg)
+
+ # Check if adding the next message will exceed maxSize
+ if float(currentSize + msgSize) * 1.1 > float(maxSize): # Guessing 10% protobuf overhead
+ if currentRPCMsg.messages.len == 0:
+ trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
+ continue # Skip this message
+
+ trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
+ yield encodeRpcMsg(currentRPCMsg, anonymize)
+ currentRPCMsg = RPCMsg()
+ currentSize = 0
+
+ currentRPCMsg.messages.add(msg)
+ currentSize += msgSize
+
+ # Check if there is a non-empty currentRPCMsg left to be added
+ if currentSize > 0 and currentRPCMsg.messages.len > 0:
+ trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
+ yield encodeRpcMsg(currentRPCMsg, anonymize)
+ else:
+ trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
+
+proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [].} =
# When sending messages, we take care to re-encode them with the right
# anonymization flag to ensure that we're not penalized for sending invalid
# or malicious data on the wire - in particular, re-encoding protects against
@@ -290,7 +322,13 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
sendMetrics(msg)
encodeRpcMsg(msg, anonymize)
- asyncSpawn p.sendEncoded(encoded)
+ if encoded.len > p.maxMessageSize and msg.messages.len > 1:
+ for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
+ asyncSpawn p.sendEncoded(encodedSplitMsg)
+ else:
+ # If the message size is within limits, send it as is
+ trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
+ asyncSpawn p.sendEncoded(encoded)
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
for sentIHave in p.sentIHaves.mitems():
@@ -305,7 +343,8 @@ proc new*(
getConn: GetConn,
onEvent: OnEvent,
codec: string,
- maxMessageSize: int): T =
+ maxMessageSize: int,
+ overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket)): T =
result = T(
getConn: getConn,
@@ -313,6 +352,18 @@ proc new*(
codec: codec,
peerId: peerId,
connectedFut: newFuture[void](),
- maxMessageSize: maxMessageSize
+ maxMessageSize: maxMessageSize,
+ overheadRateLimitOpt: overheadRateLimitOpt
)
result.sentIHaves.addFirst(default(HashSet[MessageId]))
+ result.heDontWants.addFirst(default(HashSet[MessageId]))
+
+proc getAgent*(peer: PubSubPeer): string =
+ return
+ when defined(libp2p_agents_metrics):
+ if peer.shortAgent.len > 0:
+ peer.shortAgent
+ else:
+ "unknown"
+ else:
+ "unknown"
diff --git a/libp2p/protocols/pubsub/rpc/message.nim b/libp2p/protocols/pubsub/rpc/message.nim
index a37cc73809..4c90e3157b 100644
--- a/libp2p/protocols/pubsub/rpc/message.nim
+++ b/libp2p/protocols/pubsub/rpc/message.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronicles, metrics, stew/[byteutils, endians2]
import ./messages,
@@ -65,22 +62,21 @@ proc init*(
topic: string,
seqno: Option[uint64],
sign: bool = true): Message
- {.gcsafe, raises: [Defect, LPError].} =
+ {.gcsafe, raises: [LPError].} =
var msg = Message(data: data, topicIDs: @[topic])
# order matters, we want to include seqno in the signature
- if seqno.isSome:
- msg.seqno = @(seqno.get().toBytesBE())
+ seqno.withValue(seqn):
+ msg.seqno = @(seqn.toBytesBE())
- if peer.isSome:
- let peer = peer.get()
+ peer.withValue(peer):
msg.fromPeer = peer.peerId
if sign:
msg.signature = sign(msg, peer.privateKey).expect("Couldn't sign message!")
msg.key = peer.privateKey.getPublicKey().expect("Invalid private key!")
.getBytes().expect("Couldn't get public key bytes!")
- elif sign:
- raise (ref LPError)(msg: "Cannot sign message without peer info")
+ else:
+ if sign: raise (ref LPError)(msg: "Cannot sign message without peer info")
msg
@@ -90,10 +86,10 @@ proc init*(
data: seq[byte],
topic: string,
seqno: Option[uint64]): Message
- {.gcsafe, raises: [Defect, LPError].} =
+ {.gcsafe, raises: [LPError].} =
var msg = Message(data: data, topicIDs: @[topic])
msg.fromPeer = peerId
- if seqno.isSome:
- msg.seqno = @(seqno.get().toBytesBE())
+ seqno.withValue(seqn):
+ msg.seqno = @(seqn.toBytesBE())
msg
diff --git a/libp2p/protocols/pubsub/rpc/messages.nim b/libp2p/protocols/pubsub/rpc/messages.nim
index 541782a8b6..77baded788 100644
--- a/libp2p/protocols/pubsub/rpc/messages.nim
+++ b/libp2p/protocols/pubsub/rpc/messages.nim
@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import options, sequtils
+import options, sequtils, sugar
import "../../.."/[
peerid,
routing_record,
@@ -21,6 +18,14 @@ import "../../.."/[
export options
+proc expectedFields[T](t: typedesc[T], existingFieldNames: seq[string]) {.raises: [CatchableError].} =
+ var fieldNames: seq[string]
+ for name, _ in fieldPairs(T()):
+ fieldNames &= name
+ if fieldNames != existingFieldNames:
+ fieldNames.keepIf(proc(it: string): bool = it notin existingFieldNames)
+ raise newException(CatchableError, $T & " fields changed, please search for and revise all relevant procs. New fields: " & $fieldNames)
+
type
PeerInfoMsg* = object
peerId*: PeerId
@@ -45,6 +50,7 @@ type
iwant*: seq[ControlIWant]
graft*: seq[ControlGraft]
prune*: seq[ControlPrune]
+ idontwant*: seq[ControlIWant]
ControlIHave* = object
topicId*: string
@@ -65,6 +71,8 @@ type
subscriptions*: seq[SubOpts]
messages*: seq[Message]
control*: Option[ControlMessage]
+ ping*: seq[byte]
+ pong*: seq[byte]
func withSubs*(
T: type RPCMsg, topics: openArray[string], subscribe: bool): T =
@@ -111,15 +119,59 @@ func shortLog*(msg: Message): auto =
)
func shortLog*(m: RPCMsg): auto =
- if m.control.isSome:
- (
- subscriptions: m.subscriptions,
- messages: mapIt(m.messages, it.shortLog),
- control: m.control.get().shortLog
- )
- else:
- (
- subscriptions: m.subscriptions,
- messages: mapIt(m.messages, it.shortLog),
- control: ControlMessage().shortLog
- )
+ (
+ subscriptions: m.subscriptions,
+ messages: mapIt(m.messages, it.shortLog),
+ control: m.control.get(ControlMessage()).shortLog
+ )
+
+static: expectedFields(PeerInfoMsg, @["peerId", "signedPeerRecord"])
+proc byteSize(peerInfo: PeerInfoMsg): int =
+ peerInfo.peerId.len + peerInfo.signedPeerRecord.len
+
+static: expectedFields(SubOpts, @["subscribe", "topic"])
+proc byteSize(subOpts: SubOpts): int =
+ 1 + subOpts.topic.len # 1 byte for the bool
+
+static: expectedFields(Message, @["fromPeer", "data", "seqno", "topicIds", "signature", "key"])
+proc byteSize*(msg: Message): int =
+ msg.fromPeer.len + msg.data.len + msg.seqno.len +
+ msg.signature.len + msg.key.len + msg.topicIds.foldl(a + b.len, 0)
+
+proc byteSize*(msgs: seq[Message]): int =
+ msgs.foldl(a + b.byteSize, 0)
+
+static: expectedFields(ControlIHave, @["topicId", "messageIds"])
+proc byteSize(controlIHave: ControlIHave): int =
+ controlIHave.topicId.len + controlIHave.messageIds.foldl(a + b.len, 0)
+
+proc byteSize*(ihaves: seq[ControlIHave]): int =
+ ihaves.foldl(a + b.byteSize, 0)
+
+static: expectedFields(ControlIWant, @["messageIds"])
+proc byteSize(controlIWant: ControlIWant): int =
+ controlIWant.messageIds.foldl(a + b.len, 0)
+
+proc byteSize*(iwants: seq[ControlIWant]): int =
+ iwants.foldl(a + b.byteSize, 0)
+
+static: expectedFields(ControlGraft, @["topicId"])
+proc byteSize(controlGraft: ControlGraft): int =
+ controlGraft.topicId.len
+
+static: expectedFields(ControlPrune, @["topicId", "peers", "backoff"])
+proc byteSize(controlPrune: ControlPrune): int =
+ controlPrune.topicId.len + controlPrune.peers.foldl(a + b.byteSize, 0) + 8 # 8 bytes for uint64
+
+static: expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
+proc byteSize(control: ControlMessage): int =
+ control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
+ control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
+ control.idontwant.foldl(a + b.byteSize, 0)
+
+static: expectedFields(RPCMsg, @["subscriptions", "messages", "control", "ping", "pong"])
+proc byteSize*(rpc: RPCMsg): int =
+ result = rpc.subscriptions.foldl(a + b.byteSize, 0) + byteSize(rpc.messages) +
+ rpc.ping.len + rpc.pong.len
+ rpc.control.withValue(ctrl):
+ result += ctrl.byteSize
diff --git a/libp2p/protocols/pubsub/rpc/protobuf.nim b/libp2p/protocols/pubsub/rpc/protobuf.nim
index d87a6b928a..4aa2e52106 100644
--- a/libp2p/protocols/pubsub/rpc/protobuf.nim
+++ b/libp2p/protocols/pubsub/rpc/protobuf.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import options
import stew/assign2
@@ -90,6 +87,8 @@ proc write*(pb: var ProtoBuffer, field: int, control: ControlMessage) =
ipb.write(3, graft)
for prune in control.prune:
ipb.write(4, prune)
+ for idontwant in control.idontwant:
+ ipb.write(5, idontwant)
if len(ipb.buffer) > 0:
ipb.finish()
pb.write(field, ipb)
@@ -213,6 +212,7 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.
var iwantpbs: seq[seq[byte]]
var graftpbs: seq[seq[byte]]
var prunepbs: seq[seq[byte]]
+ var idontwant: seq[seq[byte]]
if ? cpb.getRepeatedField(1, ihavepbs):
for item in ihavepbs:
control.ihave.add(? decodeIHave(initProtoBuffer(item)))
@@ -225,6 +225,9 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.
if ? cpb.getRepeatedField(4, prunepbs):
for item in prunepbs:
control.prune.add(? decodePrune(initProtoBuffer(item)))
+ if ? cpb.getRepeatedField(5, idontwant):
+ for item in idontwant:
+ control.idontwant.add(? decodeIWant(initProtoBuffer(item)))
trace "decodeControl: message statistics", graft_count = len(control.graft),
prune_count = len(control.prune),
ihave_count = len(control.ihave),
@@ -317,8 +320,14 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
pb.write(1, item)
for item in msg.messages:
pb.write(2, item, anonymize)
- if msg.control.isSome():
- pb.write(3, msg.control.get())
+ msg.control.withValue(control):
+ pb.write(3, control)
+ # nim-libp2p extension, using fields which are unlikely to be used
+ # by other extensions
+ if msg.ping.len > 0:
+ pb.write(60, msg.ping)
+ if msg.pong.len > 0:
+ pb.write(61, msg.pong)
if len(pb.buffer) > 0:
pb.finish()
pb.buffer
@@ -326,8 +335,10 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
trace "decodeRpcMsg: decoding message", msg = msg.shortLog()
var pb = initProtoBuffer(msg, maxSize = uint.high)
- var rpcMsg = ok(RPCMsg())
- assign(rpcMsg.get().messages, ? pb.decodeMessages())
- assign(rpcMsg.get().subscriptions, ? pb.decodeSubscriptions())
- assign(rpcMsg.get().control, ? pb.decodeControl())
- rpcMsg
+ var rpcMsg = RPCMsg()
+ assign(rpcMsg.messages, ? pb.decodeMessages())
+ assign(rpcMsg.subscriptions, ? pb.decodeSubscriptions())
+ assign(rpcMsg.control, ? pb.decodeControl())
+ discard ? pb.getField(60, rpcMsg.ping)
+ discard ? pb.getField(61, rpcMsg.pong)
+ ok(rpcMsg)
diff --git a/libp2p/protocols/pubsub/timedcache.nim b/libp2p/protocols/pubsub/timedcache.nim
index 9bcfed1fc4..fbac8db6bf 100644
--- a/libp2p/protocols/pubsub/timedcache.nim
+++ b/libp2p/protocols/pubsub/timedcache.nim
@@ -7,15 +7,14 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[tables]
import chronos/timer, stew/results
+import ../../utility
+
const Timeout* = 10.seconds # default timeout in ms
type
@@ -58,9 +57,9 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
var previous = t.del(k) # Refresh existing item
- let addedAt =
- if previous.isSome: previous.get().addedAt
- else: now
+ var addedAt = now
+ previous.withValue(previous):
+ addedAt = previous.addedAt
let node = TimedEntry[K](key: k, addedAt: addedAt, expiresAt: now + t.timeout)
diff --git a/libp2p/protocols/rendezvous.nim b/libp2p/protocols/rendezvous.nim
index 3a994456da..d2ae7920ab 100644
--- a/libp2p/protocols/rendezvous.nim
+++ b/libp2p/protocols/rendezvous.nim
@@ -7,16 +7,14 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import tables, sequtils, sugar, sets, options
+import tables, sequtils, sugar, sets
+import metrics except collect
import chronos,
chronicles,
bearssl/rand,
- stew/[byteutils, objects]
+ stew/[byteutils, objects, results]
import ./protocol,
../switch,
../routing_record,
@@ -30,6 +28,11 @@ export chronicles
logScope:
topics = "libp2p discovery rendezvous"
+declareCounter(libp2p_rendezvous_register, "number of advertise requests")
+declareCounter(libp2p_rendezvous_discover, "number of discovery requests")
+declareGauge(libp2p_rendezvous_registered, "number of registered peers")
+declareGauge(libp2p_rendezvous_namespaces, "number of registered namespaces")
+
const
RendezVousCodec* = "/rendezvous/1.0.0"
MinimumDuration* = 2.hours
@@ -65,34 +68,34 @@ type
Register = object
ns : string
signedPeerRecord: seq[byte]
- ttl: Option[uint64] # in seconds
+ ttl: Opt[uint64] # in seconds
RegisterResponse = object
status: ResponseStatus
- text: Option[string]
- ttl: Option[uint64] # in seconds
+ text: Opt[string]
+ ttl: Opt[uint64] # in seconds
Unregister = object
ns: string
Discover = object
ns: string
- limit: Option[uint64]
- cookie: Option[seq[byte]]
+ limit: Opt[uint64]
+ cookie: Opt[seq[byte]]
DiscoverResponse = object
registrations: seq[Register]
- cookie: Option[seq[byte]]
+ cookie: Opt[seq[byte]]
status: ResponseStatus
- text: Option[string]
+ text: Opt[string]
Message = object
msgType: MessageType
- register: Option[Register]
- registerResponse: Option[RegisterResponse]
- unregister: Option[Unregister]
- discover: Option[Discover]
- discoverResponse: Option[DiscoverResponse]
+ register: Opt[Register]
+ registerResponse: Opt[RegisterResponse]
+ unregister: Opt[Unregister]
+ discover: Opt[Discover]
+ discoverResponse: Opt[DiscoverResponse]
proc encode(c: Cookie): ProtoBuffer =
result = initProtoBuffer()
@@ -104,17 +107,17 @@ proc encode(r: Register): ProtoBuffer =
result = initProtoBuffer()
result.write(1, r.ns)
result.write(2, r.signedPeerRecord)
- if r.ttl.isSome():
- result.write(3, r.ttl.get())
+ r.ttl.withValue(ttl):
+ result.write(3, ttl)
result.finish()
proc encode(rr: RegisterResponse): ProtoBuffer =
result = initProtoBuffer()
result.write(1, rr.status.uint)
- if rr.text.isSome():
- result.write(2, rr.text.get())
- if rr.ttl.isSome():
- result.write(3, rr.ttl.get())
+ rr.text.withValue(text):
+ result.write(2, text)
+ rr.ttl.withValue(ttl):
+ result.write(3, ttl)
result.finish()
proc encode(u: Unregister): ProtoBuffer =
@@ -125,48 +128,48 @@ proc encode(u: Unregister): ProtoBuffer =
proc encode(d: Discover): ProtoBuffer =
result = initProtoBuffer()
result.write(1, d.ns)
- if d.limit.isSome():
- result.write(2, d.limit.get())
- if d.cookie.isSome():
- result.write(3, d.cookie.get())
+ d.limit.withValue(limit):
+ result.write(2, limit)
+ d.cookie.withValue(cookie):
+ result.write(3, cookie)
result.finish()
-proc encode(d: DiscoverResponse): ProtoBuffer =
+proc encode(dr: DiscoverResponse): ProtoBuffer =
result = initProtoBuffer()
- for reg in d.registrations:
+ for reg in dr.registrations:
result.write(1, reg.encode())
- if d.cookie.isSome():
- result.write(2, d.cookie.get())
- result.write(3, d.status.uint)
- if d.text.isSome():
- result.write(4, d.text.get())
+ dr.cookie.withValue(cookie):
+ result.write(2, cookie)
+ result.write(3, dr.status.uint)
+ dr.text.withValue(text):
+ result.write(4, text)
result.finish()
proc encode(msg: Message): ProtoBuffer =
result = initProtoBuffer()
result.write(1, msg.msgType.uint)
- if msg.register.isSome():
- result.write(2, msg.register.get().encode())
- if msg.registerResponse.isSome():
- result.write(3, msg.registerResponse.get().encode())
- if msg.unregister.isSome():
- result.write(4, msg.unregister.get().encode())
- if msg.discover.isSome():
- result.write(5, msg.discover.get().encode())
- if msg.discoverResponse.isSome():
- result.write(6, msg.discoverResponse.get().encode())
+ msg.register.withValue(register):
+ result.write(2, register.encode())
+ msg.registerResponse.withValue(registerResponse):
+ result.write(3, registerResponse.encode())
+ msg.unregister.withValue(unregister):
+ result.write(4, unregister.encode())
+ msg.discover.withValue(discover):
+ result.write(5, discover.encode())
+ msg.discoverResponse.withValue(discoverResponse):
+ result.write(6, discoverResponse.encode())
result.finish()
-proc decode(_: typedesc[Cookie], buf: seq[byte]): Option[Cookie] =
+proc decode(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
var c: Cookie
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, c.offset)
r2 = pb.getRequiredField(2, c.ns)
- if r1.isErr() or r2.isErr(): return none(Cookie)
- some(c)
+ if r1.isErr() or r2.isErr(): return Opt.none(Cookie)
+ Opt.some(c)
-proc decode(_: typedesc[Register], buf: seq[byte]): Option[Register] =
+proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
var
r: Register
ttl: uint64
@@ -175,11 +178,11 @@ proc decode(_: typedesc[Register], buf: seq[byte]): Option[Register] =
r1 = pb.getRequiredField(1, r.ns)
r2 = pb.getRequiredField(2, r.signedPeerRecord)
r3 = pb.getField(3, ttl)
- if r1.isErr() or r2.isErr() or r3.isErr(): return none(Register)
- if r3.get(): r.ttl = some(ttl)
- some(r)
+ if r1.isErr() or r2.isErr() or r3.isErr(): return Opt.none(Register)
+ if r3.get(false): r.ttl = Opt.some(ttl)
+ Opt.some(r)
-proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Option[RegisterResponse] =
+proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Opt[RegisterResponse] =
var
rr: RegisterResponse
statusOrd: uint
@@ -191,20 +194,20 @@ proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Option[RegisterRespo
r2 = pb.getField(2, text)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr() or
- not checkedEnumAssign(rr.status, statusOrd): return none(RegisterResponse)
- if r2.get(): rr.text = some(text)
- if r3.get(): rr.ttl = some(ttl)
- some(rr)
+ not checkedEnumAssign(rr.status, statusOrd): return Opt.none(RegisterResponse)
+ if r2.get(false): rr.text = Opt.some(text)
+ if r3.get(false): rr.ttl = Opt.some(ttl)
+ Opt.some(rr)
-proc decode(_: typedesc[Unregister], buf: seq[byte]): Option[Unregister] =
+proc decode(_: typedesc[Unregister], buf: seq[byte]): Opt[Unregister] =
var u: Unregister
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, u.ns)
- if r1.isErr(): return none(Unregister)
- some(u)
+ if r1.isErr(): return Opt.none(Unregister)
+ Opt.some(u)
-proc decode(_: typedesc[Discover], buf: seq[byte]): Option[Discover] =
+proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
var
d: Discover
limit: uint64
@@ -214,12 +217,12 @@ proc decode(_: typedesc[Discover], buf: seq[byte]): Option[Discover] =
r1 = pb.getRequiredField(1, d.ns)
r2 = pb.getField(2, limit)
r3 = pb.getField(3, cookie)
- if r1.isErr() or r2.isErr() or r3.isErr: return none(Discover)
- if r2.get(): d.limit = some(limit)
- if r3.get(): d.cookie = some(cookie)
- some(d)
+ if r1.isErr() or r2.isErr() or r3.isErr: return Opt.none(Discover)
+ if r2.get(false): d.limit = Opt.some(limit)
+ if r3.get(false): d.cookie = Opt.some(cookie)
+ Opt.some(d)
-proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Option[DiscoverResponse] =
+proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Opt[DiscoverResponse] =
var
dr: DiscoverResponse
registrations: seq[seq[byte]]
@@ -233,48 +236,47 @@ proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Option[DiscoverRespo
r3 = pb.getRequiredField(3, statusOrd)
r4 = pb.getField(4, text)
if r1.isErr() or r2.isErr() or r3.isErr or r4.isErr() or
- not checkedEnumAssign(dr.status, statusOrd): return none(DiscoverResponse)
+ not checkedEnumAssign(dr.status, statusOrd): return Opt.none(DiscoverResponse)
for reg in registrations:
var r: Register
- let regOpt = Register.decode(reg)
- if regOpt.isNone(): return none(DiscoverResponse)
- dr.registrations.add(regOpt.get())
- if r2.get(): dr.cookie = some(cookie)
- if r4.get(): dr.text = some(text)
- some(dr)
-
-proc decode(_: typedesc[Message], buf: seq[byte]): Option[Message] =
+ let regOpt = Register.decode(reg).valueOr:
+ return
+ dr.registrations.add(regOpt)
+ if r2.get(false): dr.cookie = Opt.some(cookie)
+ if r4.get(false): dr.text = Opt.some(text)
+ Opt.some(dr)
+
+proc decode(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
var
msg: Message
statusOrd: uint
pbr, pbrr, pbu, pbd, pbdr: ProtoBuffer
- let
- pb = initProtoBuffer(buf)
- r1 = pb.getRequiredField(1, statusOrd)
- r2 = pb.getField(2, pbr)
- r3 = pb.getField(3, pbrr)
- r4 = pb.getField(4, pbu)
- r5 = pb.getField(5, pbd)
- r6 = pb.getField(6, pbdr)
- if r1.isErr() or r2.isErr() or r3.isErr() or
- r4.isErr() or r5.isErr() or r6.isErr() or
- not checkedEnumAssign(msg.msgType, statusOrd): return none(Message)
- if r2.get():
+ let pb = initProtoBuffer(buf)
+
+ ? pb.getRequiredField(1, statusOrd).toOpt
+ if not checkedEnumAssign(msg.msgType, statusOrd): return Opt.none(Message)
+
+ if ? pb.getField(2, pbr).optValue:
msg.register = Register.decode(pbr.buffer)
- if msg.register.isNone(): return none(Message)
- if r3.get():
+ if msg.register.isNone(): return Opt.none(Message)
+
+ if ? pb.getField(3, pbrr).optValue:
msg.registerResponse = RegisterResponse.decode(pbrr.buffer)
- if msg.registerResponse.isNone(): return none(Message)
- if r4.get():
+ if msg.registerResponse.isNone(): return Opt.none(Message)
+
+ if ? pb.getField(4, pbu).optValue:
msg.unregister = Unregister.decode(pbu.buffer)
- if msg.unregister.isNone(): return none(Message)
- if r5.get():
+ if msg.unregister.isNone(): return Opt.none(Message)
+
+ if ? pb.getField(5, pbd).optValue:
msg.discover = Discover.decode(pbd.buffer)
- if msg.discover.isNone(): return none(Message)
- if r6.get():
+ if msg.discover.isNone(): return Opt.none(Message)
+
+ if ? pb.getField(6, pbdr).optValue:
msg.discoverResponse = DiscoverResponse.decode(pbdr.buffer)
- if msg.discoverResponse.isNone(): return none(Message)
- some(msg)
+ if msg.discoverResponse.isNone(): return Opt.none(Message)
+
+ Opt.some(msg)
type
@@ -314,7 +316,7 @@ proc sendRegisterResponse(conn: Connection,
ttl: uint64) {.async.} =
let msg = encode(Message(
msgType: MessageType.RegisterResponse,
- registerResponse: some(RegisterResponse(status: Ok, ttl: some(ttl)))))
+ registerResponse: Opt.some(RegisterResponse(status: Ok, ttl: Opt.some(ttl)))))
await conn.writeLp(msg.buffer)
proc sendRegisterResponseError(conn: Connection,
@@ -322,7 +324,7 @@ proc sendRegisterResponseError(conn: Connection,
text: string = "") {.async.} =
let msg = encode(Message(
msgType: MessageType.RegisterResponse,
- registerResponse: some(RegisterResponse(status: status, text: some(text)))))
+ registerResponse: Opt.some(RegisterResponse(status: status, text: Opt.some(text)))))
await conn.writeLp(msg.buffer)
proc sendDiscoverResponse(conn: Connection,
@@ -330,10 +332,10 @@ proc sendDiscoverResponse(conn: Connection,
cookie: Cookie) {.async.} =
let msg = encode(Message(
msgType: MessageType.DiscoverResponse,
- discoverResponse: some(DiscoverResponse(
+ discoverResponse: Opt.some(DiscoverResponse(
status: Ok,
registrations: s,
- cookie: some(cookie.encode().buffer)
+ cookie: Opt.some(cookie.encode().buffer)
))
))
await conn.writeLp(msg.buffer)
@@ -343,7 +345,7 @@ proc sendDiscoverResponseError(conn: Connection,
text: string = "") {.async.} =
let msg = encode(Message(
msgType: MessageType.DiscoverResponse,
- discoverResponse: some(DiscoverResponse(status: status, text: some(text)))))
+ discoverResponse: Opt.some(DiscoverResponse(status: status, text: Opt.some(text)))))
await conn.writeLp(msg.buffer)
proc countRegister(rdv: RendezVous, peerId: PeerId): int =
@@ -378,6 +380,7 @@ proc save(rdv: RendezVous,
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns
+ libp2p_rendezvous_register.inc()
if r.ns.len notin 1..255:
return conn.sendRegisterResponseError(InvalidNamespace)
let ttl = r.ttl.get(MinimumTTL)
@@ -389,6 +392,8 @@ proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
if rdv.countRegister(conn.peerId) >= RegistrationLimitPerPeer:
return conn.sendRegisterResponseError(NotAuthorized, "Registration limit reached")
rdv.save(r.ns, conn.peerId, r)
+ libp2p_rendezvous_registered.inc()
+ libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
conn.sendRegisterResponse(ttl)
proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
@@ -398,11 +403,13 @@ proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == conn.peerId:
rdv.registered[index].expiration = rdv.defaultDT
+ libp2p_rendezvous_registered.dec()
except KeyError:
return
proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
trace "Received Discover", peerId = conn.peerId, ns = d.ns
+ libp2p_rendezvous_discover.inc()
if d.ns.len notin 0..255:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
@@ -411,7 +418,7 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
cookie =
if d.cookie.isSome():
try:
- Cookie.decode(d.cookie.get()).get()
+ Cookie.decode(d.cookie.tryGet()).tryGet()
except CatchableError:
await conn.sendDiscoverResponseError(InvalidCookie)
return
@@ -442,7 +449,7 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
break
if reg.expiration < n or index.uint64 <= cookie.offset: continue
limit.dec()
- reg.data.ttl = some((reg.expiration - Moment.now()).seconds.uint64)
+ reg.data.ttl = Opt.some((reg.expiration - Moment.now()).seconds.uint64)
reg.data
rdv.rng.shuffle(s)
await conn.sendDiscoverResponse(s, Cookie(offset: offset.uint64, ns: d.ns))
@@ -457,12 +464,13 @@ proc advertisePeer(rdv: RendezVous,
await conn.writeLp(msg)
let
buf = await conn.readLp(4096)
- msgRecv = Message.decode(buf).get()
+ msgRecv = Message.decode(buf).tryGet()
if msgRecv.msgType != MessageType.RegisterResponse:
trace "Unexpected register response", peer, msgType = msgRecv.msgType
- elif msgRecv.registerResponse.isNone() or
- msgRecv.registerResponse.get().status != ResponseStatus.Ok:
+ elif msgRecv.registerResponse.tryGet().status != ResponseStatus.Ok:
trace "Refuse to register", peer, response = msgRecv.registerResponse
+ else:
+ trace "Successfully registered", peer, response = msgRecv.registerResponse
except CatchableError as exc:
trace "exception in the advertise", error = exc.msg
finally:
@@ -470,19 +478,18 @@ proc advertisePeer(rdv: RendezVous,
await rdv.sema.acquire()
discard await advertiseWrap().withTimeout(5.seconds)
-proc advertise*(rdv: RendezVous,
+method advertise*(rdv: RendezVous,
ns: string,
- ttl: Duration = MinimumDuration) {.async.} =
- let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode()
- if sprBuff.isErr():
+ ttl: Duration = MinimumDuration) {.async, base.} =
+ let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
raise newException(RendezVousError, "Wrong Signed Peer Record")
if ns.len notin 1..255:
raise newException(RendezVousError, "Invalid namespace")
if ttl notin MinimumDuration..MaximumDuration:
raise newException(RendezVousError, "Invalid time to live")
let
- r = Register(ns: ns, signedPeerRecord: sprBuff.get(), ttl: some(ttl.seconds.uint64))
- msg = encode(Message(msgType: MessageType.Register, register: some(r)))
+ r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
+ msg = encode(Message(msgType: MessageType.Register, register: Opt.some(r)))
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
let fut = collect(newSeq()):
for peer in rdv.peers:
@@ -498,7 +505,9 @@ proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
collect(newSeq()):
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].expiration > n:
- SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).get().data
+ let res = SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).valueOr:
+ continue
+ res.data
except KeyError as exc:
@[]
@@ -519,38 +528,42 @@ proc request*(rdv: RendezVous,
proc requestPeer(peer: PeerId) {.async.} =
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer: await conn.close()
- d.limit = some(limit)
+ d.limit = Opt.some(limit)
d.cookie =
try:
- some(rdv.cookiesSaved[peer][ns])
+ Opt.some(rdv.cookiesSaved[peer][ns])
except KeyError as exc:
- none(seq[byte])
+ Opt.none(seq[byte])
await conn.writeLp(encode(Message(
msgType: MessageType.Discover,
- discover: some(d))).buffer)
+ discover: Opt.some(d))).buffer)
let
buf = await conn.readLp(65536)
- msgRcv = Message.decode(buf).get()
- if msgRcv.msgType != MessageType.DiscoverResponse or
- msgRcv.discoverResponse.isNone():
+ msgRcv = Message.decode(buf).valueOr:
+ debug "Message undecodable"
+ return
+ if msgRcv.msgType != MessageType.DiscoverResponse:
debug "Unexpected discover response", msgType = msgRcv.msgType
return
- let resp = msgRcv.discoverResponse.get()
+ let resp = msgRcv.discoverResponse.valueOr:
+ debug "Discover response is empty"
+ return
if resp.status != ResponseStatus.Ok:
trace "Cannot discover", ns, status = resp.status, text = resp.text
return
- if resp.cookie.isSome() and resp.cookie.get().len < 1000:
- if rdv.cookiesSaved.hasKeyOrPut(peer, {ns: resp.cookie.get()}.toTable):
- rdv.cookiesSaved[peer][ns] = resp.cookie.get()
+ resp.cookie.withValue(cookie):
+ if cookie.len() < 1000 and rdv.cookiesSaved.hasKeyOrPut(peer, {ns: cookie}.toTable()):
+ rdv.cookiesSaved[peer][ns] = cookie
for r in resp.registrations:
if limit == 0: return
- if r.ttl.isNone() or r.ttl.get() > MaximumTTL: continue
- let sprRes = SignedPeerRecord.decode(r.signedPeerRecord)
- if sprRes.isErr(): continue
- let pr = sprRes.get().data
+ let ttl = r.ttl.get(MaximumTTL + 1)
+ if ttl > MaximumTTL: continue
+ let
+ spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr: continue
+ pr = spr.data
if s.hasKey(pr.peerId):
let (prSaved, rSaved) = s[pr.peerId]
- if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get() < r.ttl.get()) or
+ if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(MaximumTTL) < ttl) or
prSaved.seqNo < pr.seqNo:
s[pr.peerId] = (pr, r)
else:
@@ -589,7 +602,7 @@ proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
rdv.unsubscribeLocally(ns)
let msg = encode(Message(
msgType: MessageType.Unregister,
- unregister: some(Unregister(ns: ns))))
+ unregister: Opt.some(Unregister(ns: ns))))
proc unsubscribePeer(rdv: RendezVous, peerId: PeerId) {.async.} =
try:
@@ -627,13 +640,13 @@ proc new*(T: typedesc[RendezVous],
try:
let
buf = await conn.readLp(4096)
- msg = Message.decode(buf).get()
+ msg = Message.decode(buf).tryGet()
case msg.msgType:
- of MessageType.Register: await rdv.register(conn, msg.register.get())
+ of MessageType.Register: await rdv.register(conn, msg.register.tryGet())
of MessageType.RegisterResponse:
trace "Got an unexpected Register Response", response = msg.registerResponse
- of MessageType.Unregister: rdv.unregister(conn, msg.unregister.get())
- of MessageType.Discover: await rdv.discover(conn, msg.discover.get())
+ of MessageType.Unregister: rdv.unregister(conn, msg.unregister.tryGet())
+ of MessageType.Discover: await rdv.discover(conn, msg.discover.tryGet())
of MessageType.DiscoverResponse:
trace "Got an unexpected Discover Response", response = msg.discoverResponse
except CancelledError as exc:
@@ -657,9 +670,13 @@ proc new*(T: typedesc[RendezVous],
proc deletesRegister(rdv: RendezVous) {.async.} =
heartbeat "Register timeout", 1.minutes:
let n = Moment.now()
+ var total = 0
rdv.registered.flushIfIt(it.expiration < n)
for data in rdv.namespaces.mvalues():
data.keepItIf(it >= rdv.registered.offset)
+ total += data.len
+ libp2p_rendezvous_registered.set(int64(total))
+ libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
method start*(rdv: RendezVous) {.async.} =
if not rdv.registerDeletionLoop.isNil:
diff --git a/libp2p/protocols/secure/noise.nim b/libp2p/protocols/secure/noise.nim
index 41a8ed02de..4c3de72b7e 100644
--- a/libp2p/protocols/secure/noise.nim
+++ b/libp2p/protocols/secure/noise.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/strformat
import chronos
@@ -136,7 +133,7 @@ proc encrypt(
state: var CipherState,
data: var openArray[byte],
ad: openArray[byte]): ChaChaPolyTag
- {.noinit, raises: [Defect, NoiseNonceMaxError].} =
+ {.noinit, raises: [NoiseNonceMaxError].} =
var nonce: ChaChaPolyNonce
nonce[4..<12] = toBytesLE(state.n)
@@ -148,7 +145,7 @@ proc encrypt(
raise newException(NoiseNonceMaxError, "Noise max nonce value reached")
proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
- {.raises: [Defect, NoiseNonceMaxError].} =
+ {.raises: [NoiseNonceMaxError].} =
result = newSeqOfCap[byte](data.len + sizeof(ChaChaPolyTag))
result.add(data)
@@ -160,7 +157,7 @@ proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
tag = byteutils.toHex(tag), data = result.shortLog, nonce = state.n - 1
proc decryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
- {.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} =
+ {.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
var
tagIn = data.toOpenArray(data.len - ChaChaPolyTag.len, data.high).intoChaChaPolyTag
tagOut: ChaChaPolyTag
@@ -209,7 +206,7 @@ proc mixKeyAndHash(ss: var SymmetricState, ikm: openArray[byte]) {.used.} =
ss.cs = CipherState(k: temp_keys[2])
proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
- {.raises: [Defect, NoiseNonceMaxError].} =
+ {.raises: [NoiseNonceMaxError].} =
# according to spec if key is empty leave plaintext
if ss.cs.hasKey:
result = ss.cs.encryptWithAd(ss.h.data, data)
@@ -218,7 +215,7 @@ proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
ss.mixHash(result)
proc decryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
- {.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} =
+ {.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
# according to spec if key is empty leave plaintext
if ss.cs.hasKey and data.len > ChaChaPolyTag.len:
result = ss.cs.decryptWithAd(ss.h.data, data)
@@ -448,7 +445,7 @@ proc encryptFrame(
sconn: NoiseConnection,
cipherFrame: var openArray[byte],
src: openArray[byte])
- {.raises: [Defect, NoiseNonceMaxError].} =
+ {.raises: [NoiseNonceMaxError].} =
# Frame consists of length + cipher data + tag
doAssert src.len <= MaxPlainSize
doAssert cipherFrame.len == 2 + src.len + sizeof(ChaChaPolyTag)
@@ -557,8 +554,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
trace "Remote peer id", pid = $pid
- if peerId.isSome():
- let targetPid = peerId.get()
+ peerId.withValue(targetPid):
if not targetPid.validate():
raise newException(NoiseHandshakeError, "Failed to validate expected peerId.")
diff --git a/libp2p/protocols/secure/plaintext.nim b/libp2p/protocols/secure/plaintext.nim
index 04d08fe5c9..48ada7631a 100644
--- a/libp2p/protocols/secure/plaintext.nim
+++ b/libp2p/protocols/secure/plaintext.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos
import secure, ../../stream/connection
diff --git a/libp2p/protocols/secure/secio.nim b/libp2p/protocols/secure/secio.nim
index 0674b54e2b..435acaf2c9 100644
--- a/libp2p/protocols/secure/secio.nim
+++ b/libp2p/protocols/secure/secio.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[oids, strformat]
import bearssl/rand
@@ -262,7 +259,7 @@ proc newSecioConn(conn: Connection,
secrets: Secret,
order: int,
remotePubKey: PublicKey): SecioConn
- {.raises: [Defect, LPError].} =
+ {.raises: [LPError].} =
## Create new secure stream/lpstream, using specified hash algorithm ``hash``,
## cipher algorithm ``cipher``, stretched keys ``secrets`` and order
## ``order``.
@@ -342,8 +339,7 @@ method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerI
remotePeerId = PeerId.init(remotePubkey).tryGet()
- if peerId.isSome():
- let targetPid = peerId.get()
+ peerId.withValue(targetPid):
if not targetPid.validate():
raise newException(SecioError, "Failed to validate expected peerId.")
@@ -439,14 +435,10 @@ proc new*(
T: typedesc[Secio],
rng: ref HmacDrbgContext,
localPrivateKey: PrivateKey): T =
- let pkRes = localPrivateKey.getPublicKey()
- if pkRes.isErr:
- raise newException(Defect, "Invalid private key")
-
let secio = Secio(
rng: rng,
localPrivateKey: localPrivateKey,
- localPublicKey: pkRes.get(),
+ localPublicKey: localPrivateKey.getPublicKey().expect("Invalid private key"),
)
secio.init()
secio
diff --git a/libp2p/protocols/secure/secure.nim b/libp2p/protocols/secure/secure.nim
index e915934c9b..96526714dd 100644
--- a/libp2p/protocols/secure/secure.nim
+++ b/libp2p/protocols/secure/secure.nim
@@ -8,10 +8,7 @@
# those terms.
{.push gcsafe.}
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[strformat]
import stew/results
diff --git a/libp2p/routing_record.nim b/libp2p/routing_record.nim
index de39891efd..e5a059b3a8 100644
--- a/libp2p/routing_record.nim
+++ b/libp2p/routing_record.nim
@@ -9,10 +9,7 @@
## This module implements Routing Records.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[sequtils, times]
import pkg/stew/results
@@ -45,14 +42,12 @@ proc decode*(
? pb.getRequiredField(2, record.seqNo)
var addressInfos: seq[seq[byte]]
- let pb3 = ? pb.getRepeatedField(3, addressInfos)
-
- if pb3:
+ if ? pb.getRepeatedField(3, addressInfos):
for address in addressInfos:
var addressInfo = AddressInfo()
let subProto = initProtoBuffer(address)
let f = subProto.getField(1, addressInfo.address)
- if f.isOk() and f.get():
+ if f.get(false):
record.addresses &= addressInfo
if record.addresses.len == 0:
diff --git a/libp2p/services/autorelayservice.nim b/libp2p/services/autorelayservice.nim
index 5dd71faa2a..dc1c861941 100644
--- a/libp2p/services/autorelayservice.nim
+++ b/libp2p/services/autorelayservice.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos, chronicles, times, tables, sequtils
import ../switch,
@@ -20,7 +17,7 @@ logScope:
topics = "libp2p autorelay"
type
- OnReservationHandler = proc (addresses: seq[MultiAddress]) {.gcsafe, raises: [Defect].}
+ OnReservationHandler = proc (addresses: seq[MultiAddress]) {.gcsafe, raises: [].}
AutoRelayService* = ref object of Service
running: bool
@@ -35,6 +32,9 @@ type
addressMapper: AddressMapper
rng: ref HmacDrbgContext
+proc isRunning*(self: AutoRelayService): bool =
+ return self.running
+
proc addressMapper(
self: AutoRelayService,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
diff --git a/libp2p/services/hpservice.nim b/libp2p/services/hpservice.nim
index bc39c1b1ca..68055d2759 100644
--- a/libp2p/services/hpservice.nim
+++ b/libp2p/services/hpservice.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[tables, sequtils]
@@ -33,13 +30,9 @@ type
onNewStatusHandler: StatusAndConfidenceHandler
autoRelayService: AutoRelayService
autonatService: AutonatService
- isPublicIPAddrProc: IsPublicIPAddrProc
- IsPublicIPAddrProc* = proc(ta: TransportAddress): bool {.gcsafe, raises: [Defect].}
-
-proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService,
- isPublicIPAddrProc: IsPublicIPAddrProc = isGlobal): T =
- return T(autonatService: autonatService, autoRelayService: autoRelayService, isPublicIPAddrProc: isPublicIPAddrProc)
+proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService): T =
+ return T(autonatService: autonatService, autoRelayService: autoRelayService)
proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} =
proc tryConnect(address: MultiAddress): Future[bool] {.async.} =
@@ -52,14 +45,8 @@ proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Fut
for address in switch.peerStore[AddressBook][peerId]:
try:
let isRelayed = address.contains(multiCodec("p2p-circuit"))
- if isRelayed.isErr() or isRelayed.get():
- continue
- if DNS.matchPartial(address):
+ if not isRelayed.get(false) and address.isPublicMA():
return await tryConnect(address)
- else:
- let ta = initTAddress(address)
- if ta.isOk() and self.isPublicIPAddrProc(ta.get()):
- return await tryConnect(address)
except CatchableError as err:
debug "Failed to create direct connection.", err = err.msg
continue
@@ -107,10 +94,10 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
- self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
- if networkReachability == NetworkReachability.NotReachable:
+ self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
+ if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
discard await self.autoRelayService.setup(switch)
- elif networkReachability == NetworkReachability.Reachable:
+ elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():
discard await self.autoRelayService.stop(switch)
# We do it here instead of in the AutonatService because this is useful only when hole punching.
diff --git a/libp2p/signed_envelope.nim b/libp2p/signed_envelope.nim
index 8e7c1d8ce1..402d6494cd 100644
--- a/libp2p/signed_envelope.nim
+++ b/libp2p/signed_envelope.nim
@@ -9,10 +9,7 @@
## This module implements Signed Envelope.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/sugar
import pkg/stew/[results, byteutils]
@@ -115,19 +112,12 @@ proc getField*(pb: ProtoBuffer, field: int,
if not(res):
ok(false)
else:
- let env = Envelope.decode(buffer, domain)
- if env.isOk():
- value = env.get()
- ok(true)
- else:
- err(ProtoError.IncorrectBlob)
+ value = Envelope.decode(buffer, domain).valueOr: return err(ProtoError.IncorrectBlob)
+ ok(true)
proc write*(pb: var ProtoBuffer, field: int, env: Envelope): Result[void, CryptoError] =
- let e = env.encode()
-
- if e.isErr():
- return err(e.error)
- pb.write(field, e.get())
+ let e = ? env.encode()
+ pb.write(field, e)
ok()
type
@@ -145,7 +135,7 @@ proc init*[T](_: typedesc[SignedPayload[T]],
T.payloadType(),
data.encode(),
T.payloadDomain)
-
+
ok(SignedPayload[T](data: data, envelope: envelope))
proc getField*[T](pb: ProtoBuffer, field: int,
diff --git a/libp2p/stream/bufferstream.nim b/libp2p/stream/bufferstream.nim
index 3dcfbdb0c2..558cf2df45 100644
--- a/libp2p/stream/bufferstream.nim
+++ b/libp2p/stream/bufferstream.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/strformat
import stew/byteutils
diff --git a/libp2p/stream/chronosstream.nim b/libp2p/stream/chronosstream.nim
index f842886e4f..b93d46f8da 100644
--- a/libp2p/stream/chronosstream.nim
+++ b/libp2p/stream/chronosstream.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[strformat]
import stew/results
@@ -132,7 +129,9 @@ method write*(s: ChronosStream, msg: seq[byte]): Future[void] =
# drives up memory usage
if msg.len == 0:
trace "Empty byte seq, nothing to write"
- return
+ let fut = newFuture[void]("chronosstream.write.empty")
+ fut.complete()
+ return fut
if s.closed:
let fut = newFuture[void]("chronosstream.write.closed")
fut.fail(newLPStreamClosedError())
diff --git a/libp2p/stream/connection.nim b/libp2p/stream/connection.nim
index 86539a0588..faa595d458 100644
--- a/libp2p/stream/connection.nim
+++ b/libp2p/stream/connection.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[hashes, oids, strformat]
import stew/results
@@ -30,7 +27,7 @@ const
DefaultConnectionTimeout* = 5.minutes
type
- TimeoutHandler* = proc(): Future[void] {.gcsafe, raises: [Defect].}
+ TimeoutHandler* = proc(): Future[void] {.gcsafe, raises: [].}
Connection* = ref object of LPStream
activity*: bool # reset every time data is sent or received
diff --git a/libp2p/stream/lpstream.nim b/libp2p/stream/lpstream.nim
index 6dfe501c4f..8357333227 100644
--- a/libp2p/stream/lpstream.nim
+++ b/libp2p/stream/lpstream.nim
@@ -10,10 +10,7 @@
## Length Prefixed stream implementation
{.push gcsafe.}
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/oids
import stew/byteutils
@@ -279,7 +276,7 @@ proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, gcsafe, publ
if length == 0:
return
- var res = newSeq[byte](length)
+ var res = newSeqUninitialized[byte](length)
await s.readExactly(addr res[0], res.len)
return res
diff --git a/libp2p/stream/streamseq.nim b/libp2p/stream/streamseq.nim
index f91b9bdf70..881e505cd8 100644
--- a/libp2p/stream/streamseq.nim
+++ b/libp2p/stream/streamseq.nim
@@ -1,7 +1,13 @@
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+# Nim-LibP2P
+# Copyright (c) 2023 Status Research & Development GmbH
+# Licensed under either of
+# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
+# * MIT license ([LICENSE-MIT](LICENSE-MIT))
+# at your option.
+# This file may not be copied, modified, or distributed except according to
+# those terms.
+
+{.push raises: [].}
import stew/bitops2
diff --git a/libp2p/switch.nim b/libp2p/switch.nim
index 2318578996..dabbc2f700 100644
--- a/libp2p/switch.nim
+++ b/libp2p/switch.nim
@@ -11,10 +11,7 @@
## transports, the connection manager, the upgrader and other
## parts to allow programs to use libp2p
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[tables,
options,
@@ -198,7 +195,7 @@ proc dial*(
dial(s, peerId, addrs, @[proto])
proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil)
- {.gcsafe, raises: [Defect, LPError], public.} =
+ {.gcsafe, raises: [LPError], public.} =
## mount a protocol to the switch
if isNil(proto.handler):
@@ -261,12 +258,8 @@ proc accept(s: Switch, transport: Transport) {.async.} = # noraises
if isNil(conn):
# A nil connection means that we might have hit a
# file-handle limit (or another non-fatal error),
- # we can get one on the next try, but we should
- # be careful to not end up in a thigh loop that
- # will starve the main event loop, thus we sleep
- # here before retrying.
- trace "Unable to get a connection, sleeping"
- await sleepAsync(100.millis) # TODO: should be configurable?
+ # we can get one on the next try
+ debug "Unable to get a connection"
upgrades.release()
continue
@@ -281,7 +274,7 @@ proc accept(s: Switch, transport: Transport) {.async.} = # noraises
trace "releasing semaphore on cancellation"
upgrades.release() # always release the slot
except CatchableError as exc:
- debug "Exception in accept loop, exiting", exc = exc.msg
+ error "Exception in accept loop, exiting", exc = exc.msg
upgrades.release() # always release the slot
if not isNil(conn):
await conn.close()
@@ -380,7 +373,7 @@ proc newSwitch*(peerInfo: PeerInfo,
peerStore: PeerStore,
nameResolver: NameResolver = nil,
services = newSeq[Service]()): Switch
- {.raises: [Defect, LPError].} =
+ {.raises: [LPError].} =
if secureManagers.len == 0:
raise newException(LPError, "Provide at least one secure manager")
diff --git a/libp2p/transcoder.nim b/libp2p/transcoder.nim
index 2acea8ab19..7f00fe650b 100644
--- a/libp2p/transcoder.nim
+++ b/libp2p/transcoder.nim
@@ -13,7 +13,7 @@ import vbuffer
type
Transcoder* = object
stringToBuffer*: proc(s: string,
- vb: var VBuffer): bool {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
+ vb: var VBuffer): bool {.nimcall, gcsafe, noSideEffect, raises: [].}
bufferToString*: proc(vb: var VBuffer,
- s: var string): bool {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
- validateBuffer*: proc(vb: var VBuffer): bool {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
+ s: var string): bool {.nimcall, gcsafe, noSideEffect, raises: [].}
+ validateBuffer*: proc(vb: var VBuffer): bool {.nimcall, gcsafe, noSideEffect, raises: [].}
diff --git a/libp2p/transports/tcptransport.nim b/libp2p/transports/tcptransport.nim
index 6407557dbd..388ab6a43b 100644
--- a/libp2p/transports/tcptransport.nim
+++ b/libp2p/transports/tcptransport.nim
@@ -9,10 +9,7 @@
## TCP transport implementation
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[sequtils]
import stew/results
@@ -43,6 +40,7 @@ type
flags: set[ServerFlags]
clientFlags: set[SocketFlags]
acceptFuts: seq[Future[StreamTransport]]
+ connectionsTimeout: Duration
TcpTransportTracker* = ref object of TrackerBase
opened*: uint64
@@ -50,7 +48,7 @@ type
TcpTransportError* = object of transport.TransportError
-proc setupTcpTransportTracker(): TcpTransportTracker {.gcsafe, raises: [Defect].}
+proc setupTcpTransportTracker(): TcpTransportTracker {.gcsafe, raises: [].}
proc getTcpTransportTracker(): TcpTransportTracker {.gcsafe.} =
result = cast[TcpTransportTracker](getTracker(TcpTransportTrackerName))
@@ -74,15 +72,6 @@ proc setupTcpTransportTracker(): TcpTransportTracker =
result.isLeaked = leakTransport
addTracker(TcpTransportTrackerName, result)
-proc getObservedAddr(client: StreamTransport): Future[MultiAddress] {.async.} =
- try:
- return MultiAddress.init(client.remoteAddress).tryGet()
- except CatchableError as exc:
- trace "Failed to create observedAddr", exc = exc.msg
- if not(isNil(client) and client.closed):
- await client.closeWait()
- raise exc
-
proc connHandler*(self: TcpTransport,
client: StreamTransport,
observedAddr: Opt[MultiAddress],
@@ -97,7 +86,8 @@ proc connHandler*(self: TcpTransport,
ChronosStream.init(
client = client,
dir = dir,
- observedAddr = observedAddr
+ observedAddr = observedAddr,
+ timeout = self.connectionsTimeout
))
proc onClose() {.async.} =
@@ -129,7 +119,8 @@ proc connHandler*(self: TcpTransport,
proc new*(
T: typedesc[TcpTransport],
flags: set[ServerFlags] = {},
- upgrade: Upgrade): T {.public.} =
+ upgrade: Upgrade,
+ connectionsTimeout = 10.minutes): T {.public.} =
let
transport = T(
@@ -144,7 +135,8 @@ proc new*(
else:
default(set[SocketFlags]),
upgrader: upgrade,
- networkReachability: NetworkReachability.Unknown)
+ networkReachability: NetworkReachability.Unknown,
+ connectionsTimeout: connectionsTimeout)
return transport
@@ -227,7 +219,7 @@ method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
try:
if self.acceptFuts.len <= 0:
- self.acceptFuts = self.servers.mapIt(it.accept())
+ self.acceptFuts = self.servers.mapIt(Future[StreamTransport](it.accept()))
if self.acceptFuts.len <= 0:
return
@@ -239,22 +231,29 @@ method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
self.acceptFuts[index] = self.servers[index].accept()
let transp = await finished
- let observedAddr = await getObservedAddr(transp)
- return await self.connHandler(transp, Opt.some(observedAddr), Direction.In)
- except TransportOsError as exc:
- # TODO: it doesn't sound like all OS errors
- # can be ignored, we should re-raise those
- # that can'self.
- debug "OS Error", exc = exc.msg
+ try:
+ let observedAddr = MultiAddress.init(transp.remoteAddress).tryGet()
+ return await self.connHandler(transp, Opt.some(observedAddr), Direction.In)
+ except CancelledError as exc:
+ transp.close()
+ raise exc
+ except CatchableError as exc:
+ debug "Failed to handle connection", exc = exc.msg
+ transp.close()
except TransportTooManyError as exc:
debug "Too many files opened", exc = exc.msg
+ except TransportAbortedError as exc:
+ debug "Connection aborted", exc = exc.msg
except TransportUseClosedError as exc:
debug "Server was closed", exc = exc.msg
raise newTransportClosedError(exc)
except CancelledError as exc:
raise exc
+ except TransportOsError as exc:
+ info "OS Error", exc = exc.msg
+ raise exc
except CatchableError as exc:
- debug "Unexpected error accepting connection", exc = exc.msg
+ info "Unexpected error accepting connection", exc = exc.msg
raise exc
method dial*(
@@ -274,7 +273,7 @@ method dial*(
await connect(address, flags = self.clientFlags)
try:
- let observedAddr = await getObservedAddr(transp)
+ let observedAddr = MultiAddress.init(transp.remoteAddress).tryGet()
return await self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
except CatchableError as err:
await transp.closeWait()
diff --git a/libp2p/transports/tortransport.nim b/libp2p/transports/tortransport.nim
index 371cc616e5..278c04bd6f 100644
--- a/libp2p/transports/tortransport.nim
+++ b/libp2p/transports/tortransport.nim
@@ -9,10 +9,7 @@
## Tor transport implementation
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/strformat
import chronos, chronicles, strutils
@@ -133,7 +130,7 @@ proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
else:
raise newException(LPError, "Address not supported")
-proc parseOnion3(address: MultiAddress): (byte, seq[byte], seq[byte]) {.raises: [Defect, LPError, ValueError].} =
+proc parseOnion3(address: MultiAddress): (byte, seq[byte], seq[byte]) {.raises: [LPError, ValueError].} =
var addressArray = ($address).split('/')
if addressArray.len < 2: raise newException(LPError, fmt"Onion address not supported {address}")
addressArray = addressArray[2].split(':')
@@ -144,7 +141,9 @@ proc parseOnion3(address: MultiAddress): (byte, seq[byte], seq[byte]) {.raises:
dstPort = address.data.buffer[37..38]
return (Socks5AddressType.FQDN.byte, dstAddr, dstPort)
-proc parseIpTcp(address: MultiAddress): (byte, seq[byte], seq[byte]) {.raises: [Defect, LPError, ValueError].} =
+proc parseIpTcp(address: MultiAddress):
+ (byte, seq[byte], seq[byte])
+ {.raises: [LPError, ValueError].} =
let (codec, atyp) =
if IPv4Tcp.match(address):
(multiCodec("ip4"), Socks5AddressType.IPv4.byte)
@@ -153,15 +152,17 @@ proc parseIpTcp(address: MultiAddress): (byte, seq[byte], seq[byte]) {.raises: [
else:
raise newException(LPError, fmt"IP address not supported {address}")
let
- dstAddr = address[codec].get().protoArgument().get()
- dstPort = address[multiCodec("tcp")].get().protoArgument().get()
+ dstAddr = address[codec].tryGet().protoArgument().tryGet()
+ dstPort = address[multiCodec("tcp")].tryGet().protoArgument().tryGet()
(atyp, dstAddr, dstPort)
-proc parseDnsTcp(address: MultiAddress): (byte, seq[byte], seq[byte]) =
+proc parseDnsTcp(address: MultiAddress):
+ (byte, seq[byte], seq[byte])
+ {.raises: [LPError, ValueError].} =
let
- dnsAddress = address[multiCodec("dns")].get().protoArgument().get()
+ dnsAddress = address[multiCodec("dns")].tryGet().protoArgument().tryGet()
dstAddr = @(uint8(dnsAddress.len).toBytes()) & dnsAddress
- dstPort = address[multiCodec("tcp")].get().protoArgument().get()
+ dstPort = address[multiCodec("tcp")].tryGet().protoArgument().tryGet()
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
proc dialPeer(
@@ -217,9 +218,9 @@ method start*(
warn "Invalid address detected, skipping!", address = ma
continue
- let listenAddress = ma[0..1].get()
+ let listenAddress = ma[0..1].tryGet()
listenAddrs.add(listenAddress)
- let onion3 = ma[multiCodec("onion3")].get()
+ let onion3 = ma[multiCodec("onion3")].tryGet()
onion3Addrs.add(onion3)
if len(listenAddrs) != 0 and len(onion3Addrs) != 0:
@@ -254,7 +255,7 @@ proc new*(
rng: ref HmacDrbgContext,
addresses: seq[MultiAddress] = @[],
flags: set[ServerFlags] = {}): TorSwitch
- {.raises: [LPError, Defect], public.} =
+ {.raises: [LPError], public.} =
var builder = SwitchBuilder.new()
.withRng(rng)
.withTransport(proc(upgr: Upgrade): Transport = TorTransport.new(torServer, flags, upgr))
diff --git a/libp2p/transports/transport.nim b/libp2p/transports/transport.nim
index 7e00fc2fca..5c4a53503b 100644
--- a/libp2p/transports/transport.nim
+++ b/libp2p/transports/transport.nim
@@ -8,10 +8,7 @@
# those terms.
##
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import sequtils
import chronos, chronicles
@@ -102,9 +99,8 @@ method handles*(
# by default we skip circuit addresses to avoid
# having to repeat the check in every transport
- if address.protocols.isOk:
- return address.protocols
- .get()
+ let protocols = address.protocols.valueOr: return false
+ return protocols
.filterIt(
it == multiCodec("p2p-circuit")
).len == 0
diff --git a/libp2p/transports/wstransport.nim b/libp2p/transports/wstransport.nim
index 1bb86c4afb..7762c60656 100644
--- a/libp2p/transports/wstransport.nim
+++ b/libp2p/transports/wstransport.nim
@@ -9,10 +9,7 @@
## WebSocket & WebSocket Secure transport implementation
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[sequtils]
import stew/results
@@ -111,7 +108,7 @@ type
flags: set[ServerFlags]
handshakeTimeout: Duration
factories: seq[ExtFactory]
- rng: Rng
+ rng: ref HmacDrbgContext
proc secure*(self: WsTransport): bool =
not (isNil(self.tlsPrivateKey) or isNil(self.tlsCertificate))
@@ -159,8 +156,12 @@ method start*(
self.httpservers &= httpserver
- let codec = if isWss:
- MultiAddress.init("/wss")
+ let codec =
+ if isWss:
+ if ma.contains(multiCodec("tls")) == MaResult[bool].ok(true):
+ MultiAddress.init("/tls/ws")
+ else:
+ MultiAddress.init("/wss")
else:
MultiAddress.init("/ws")
@@ -267,8 +268,6 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
except CatchableError as exc:
await req.stream.closeWait()
raise exc
- except TransportOsError as exc:
- debug "OS Error", exc = exc.msg
except WebSocketError as exc:
debug "Websocket Error", exc = exc.msg
except HttpError as exc:
@@ -277,16 +276,19 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
debug "AsyncStream Error", exc = exc.msg
except TransportTooManyError as exc:
debug "Too many files opened", exc = exc.msg
+ except TransportAbortedError as exc:
+ debug "Connection aborted", exc = exc.msg
except AsyncTimeoutError as exc:
debug "Timed out", exc = exc.msg
except TransportUseClosedError as exc:
debug "Server was closed", exc = exc.msg
raise newTransportClosedError(exc)
except CancelledError as exc:
- # bubble up silently
raise exc
+ except TransportOsError as exc:
+ debug "OS Error", exc = exc.msg
except CatchableError as exc:
- warn "Unexpected error accepting connection", exc = exc.msg
+ info "Unexpected error accepting connection", exc = exc.msg
raise exc
method dial*(
@@ -327,7 +329,7 @@ proc new*(
tlsFlags: set[TLSFlags] = {},
flags: set[ServerFlags] = {},
factories: openArray[ExtFactory] = [],
- rng: Rng = nil,
+ rng: ref HmacDrbgContext = nil,
handshakeTimeout = DefaultHeadersTimeout): T {.public.} =
## Creates a secure WebSocket transport
@@ -346,7 +348,7 @@ proc new*(
upgrade: Upgrade,
flags: set[ServerFlags] = {},
factories: openArray[ExtFactory] = [],
- rng: Rng = nil,
+ rng: ref HmacDrbgContext = nil,
handshakeTimeout = DefaultHeadersTimeout): T {.public.} =
## Creates a clear-text WebSocket transport
diff --git a/libp2p/upgrademngrs/muxedupgrade.nim b/libp2p/upgrademngrs/muxedupgrade.nim
index bde7e3f584..05d751c94d 100644
--- a/libp2p/upgrademngrs/muxedupgrade.nim
+++ b/libp2p/upgrademngrs/muxedupgrade.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/sequtils
import pkg/[chronos, chronicles, metrics]
@@ -94,17 +91,15 @@ proc new*(
T: type MuxedUpgrade,
muxers: seq[MuxerProvider],
secureManagers: openArray[Secure] = [],
- connManager: ConnManager,
ms: MultistreamSelect): T =
let upgrader = T(
muxers: muxers,
secureManagers: @secureManagers,
- connManager: connManager,
ms: ms)
upgrader.streamHandler = proc(conn: Connection)
- {.async, gcsafe, raises: [Defect].} =
+ {.async, gcsafe, raises: [].} =
trace "Starting stream handler", conn
try:
await upgrader.ms.handle(conn) # handle incoming connection
diff --git a/libp2p/upgrademngrs/upgrade.nim b/libp2p/upgrademngrs/upgrade.nim
index 8a191dea33..49e70a900d 100644
--- a/libp2p/upgrademngrs/upgrade.nim
+++ b/libp2p/upgrademngrs/upgrade.nim
@@ -8,10 +8,7 @@
# those terms.
{.push gcsafe.}
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/[sequtils, strutils]
import pkg/[chronos, chronicles, metrics]
@@ -38,7 +35,6 @@ type
Upgrade* = ref object of RootObj
ms*: MultistreamSelect
- connManager*: ConnManager
secureManagers*: seq[Secure]
method upgrade*(
diff --git a/libp2p/utility.nim b/libp2p/utility.nim
index b22887dcc0..3b0a713453 100644
--- a/libp2p/utility.nim
+++ b/libp2p/utility.nim
@@ -7,12 +7,12 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
-import stew/byteutils
+import std/options, std/macros
+import stew/[byteutils, results]
+
+export results
template public* {.pragma.}
@@ -53,9 +53,6 @@ when defined(libp2p_agents_metrics):
import strutils
export split
- import stew/results
- export results
-
proc safeToLowerAscii*(s: string): Result[string, cstring] =
try:
ok(s.toLowerAscii())
@@ -73,6 +70,10 @@ template safeConvert*[T: SomeInteger, S: Ordinal](value: S): T =
else:
{.error: "Source and target types have an incompatible range low..high".}
+proc capLen*[T](s: var seq[T], length: Natural) =
+ if s.len > length:
+ s.setLen(length)
+
template exceptionToAssert*(body: untyped): untyped =
block:
var res: type(body)
@@ -86,3 +87,30 @@ template exceptionToAssert*(body: untyped): untyped =
when defined(nimHasWarnBareExcept):
{.pop.}
res
+
+template withValue*[T](self: Opt[T] | Option[T], value, body: untyped): untyped =
+ if self.isSome:
+ let value {.inject.} = self.get()
+ body
+
+macro withValue*[T](self: Opt[T] | Option[T], value, body, body2: untyped): untyped =
+ let elseBody = body2[0]
+ quote do:
+ if `self`.isSome:
+ let `value` {.inject.} = `self`.get()
+ `body`
+ else:
+ `elseBody`
+
+template valueOr*[T](self: Option[T], body: untyped): untyped =
+ if self.isSome:
+ self.get()
+ else:
+ body
+
+template toOpt*[T, E](self: Result[T, E]): Opt[T] =
+ if self.isOk:
+ when T is void: Result[void, void].ok()
+ else: Opt.some(self.unsafeGet())
+ else:
+ Opt.none(type(T))
diff --git a/libp2p/utils/future.nim b/libp2p/utils/future.nim
index 6c18af85ce..e15bf40b04 100644
--- a/libp2p/utils/future.nim
+++ b/libp2p/utils/future.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos
diff --git a/libp2p/utils/heartbeat.nim b/libp2p/utils/heartbeat.nim
index 6f99008ff6..0db43a54d8 100644
--- a/libp2p/utils/heartbeat.nim
+++ b/libp2p/utils/heartbeat.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos, chronicles
diff --git a/libp2p/utils/offsettedseq.nim b/libp2p/utils/offsettedseq.nim
index 40539f85e5..4e28c5ff21 100644
--- a/libp2p/utils/offsettedseq.nim
+++ b/libp2p/utils/offsettedseq.nim
@@ -50,10 +50,7 @@ template flushIfIt*(o, pred: untyped) =
if not pred: break
i.inc()
if i > 0:
- when (NimMajor, NimMinor) < (1, 4):
- o.s.delete(0, i - 1)
- else:
- o.s.delete(0.. 0:
copyMem(addr result.buffer[0], unsafeAddr data[0], len(data))
result.offset = offset
diff --git a/libp2p/wire.nim b/libp2p/wire.nim
index 533024755c..70d5574fd1 100644
--- a/libp2p/wire.nim
+++ b/libp2p/wire.nim
@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
## This module implements wire network connection procedures.
import chronos, stew/endians2
@@ -23,14 +20,14 @@ else:
const
RTRANSPMA* = mapOr(
- TCP_IP,
- WebSockets_IP,
+ TCP,
+ WebSockets,
UNIX
)
TRANSPMA* = mapOr(
RTRANSPMA,
- UDP_IP
+ UDP,
)
@@ -40,7 +37,7 @@ proc initTAddress*(ma: MultiAddress): MaResult[TransportAddress] =
## MultiAddress must be wire address, e.g. ``{IP4, IP6, UNIX}/{TCP, UDP}``.
##
- if TRANSPMA.match(ma):
+ if mapOr(TCP_IP, WebSockets_IP, UNIX, UDP_IP).match(ma):
var pbuf: array[2, byte]
let code = (?(?ma[0]).protoCode())
if code == multiCodec("unix"):
@@ -79,7 +76,7 @@ proc connect*(
child: StreamTransport = nil,
flags = default(set[SocketFlags]),
localAddress: Opt[MultiAddress] = Opt.none(MultiAddress)): Future[StreamTransport]
- {.raises: [Defect, LPError, MaInvalidAddress].} =
+ {.raises: [LPError, MaInvalidAddress].} =
## Open new connection to remote peer with address ``ma`` and create
## new transport object ``StreamTransport`` for established connection.
## ``bufferSize`` is size of internal buffer for transport.
@@ -92,7 +89,7 @@ proc connect*(
compilesOr:
return connect(transportAddress, bufferSize, child,
- if localAddress.isSome(): initTAddress(localAddress.get()).tryGet() else : TransportAddress(),
+ if localAddress.isSome(): initTAddress(localAddress.expect("just checked")).tryGet() else: TransportAddress(),
flags)
do:
# support for older chronos versions
@@ -107,7 +104,7 @@ proc createStreamServer*[T](ma: MultiAddress,
bufferSize: int = DefaultStreamBufferSize,
child: StreamServer = nil,
init: TransportInitCallback = nil): StreamServer
- {.raises: [Defect, LPError, MaInvalidAddress].} =
+ {.raises: [LPError, MaInvalidAddress].} =
## Create new TCP stream server which bounds to ``ma`` address.
if not(RTRANSPMA.match(ma)):
raise newException(MaInvalidAddress, "Incorrect or unsupported address!")
@@ -134,7 +131,7 @@ proc createStreamServer*[T](ma: MultiAddress,
bufferSize: int = DefaultStreamBufferSize,
child: StreamServer = nil,
init: TransportInitCallback = nil): StreamServer
- {.raises: [Defect, LPError, MaInvalidAddress].} =
+ {.raises: [LPError, MaInvalidAddress].} =
## Create new TCP stream server which bounds to ``ma`` address.
##
@@ -155,7 +152,7 @@ proc createStreamServer*[T](ma: MultiAddress,
raise newException(LPError, exc.msg)
proc createAsyncSocket*(ma: MultiAddress): AsyncFD
- {.raises: [Defect, LPError].} =
+ {.raises: [ValueError, LPError].} =
## Create new asynchronous socket using MultiAddress' ``ma`` socket type and
## protocol information.
##
@@ -188,7 +185,7 @@ proc createAsyncSocket*(ma: MultiAddress): AsyncFD
raise newException(LPError, exc.msg)
proc bindAsyncSocket*(sock: AsyncFD, ma: MultiAddress): bool
- {.raises: [Defect, LPError].} =
+ {.raises: [LPError].} =
## Bind socket ``sock`` to MultiAddress ``ma``.
##
## Note: This procedure only used in `go-libp2p-daemon` wrapper.
@@ -216,3 +213,10 @@ proc getLocalAddress*(sock: AsyncFD): TransportAddress =
if getsockname(SocketHandle(sock), cast[ptr SockAddr](addr saddr),
addr slen) == 0:
fromSAddr(addr saddr, slen, result)
+
+proc isPublicMA*(ma: MultiAddress): bool =
+ if DNS.matchPartial(ma):
+ return true
+
+ let hostIP = initTAddress(ma).valueOr: return false
+ return hostIP.isGlobal()
diff --git a/tests/commoninterop.nim b/tests/commoninterop.nim
index 96480142d6..a5684aa6a6 100644
--- a/tests/commoninterop.nim
+++ b/tests/commoninterop.nim
@@ -10,7 +10,7 @@ type
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov: TransportProvider = proc(upgr: Upgrade): Transport = TcpTransport.new({}, upgr),
relay: Relay = Relay.new(circuitRelayV1 = true)):
- Switch {.gcsafe, raises: [Defect, LPError].}
+ Switch {.gcsafe, raises: [LPError].}
DaemonPeerInfo = daemonapi.PeerInfo
proc writeLp(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.} =
diff --git a/tests/commontransport.nim b/tests/commontransport.nim
index e18dc19784..7f1b5f3616 100644
--- a/tests/commontransport.nim
+++ b/tests/commontransport.nim
@@ -9,7 +9,7 @@ import ../libp2p/[stream/connection,
import ./helpers
-type TransportProvider* = proc(): Transport {.gcsafe, raises: [Defect].}
+type TransportProvider* = proc(): Transport {.gcsafe, raises: [].}
template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string = "") =
block:
@@ -150,6 +150,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
proc acceptHandler() {.async, gcsafe.} =
while true:
let conn = await transport1.accept()
+ await conn.write(newSeq[byte](0))
await conn.write("Hello!")
await conn.close()
diff --git a/tests/helpers.nim b/tests/helpers.nim
index af6129b23a..84b2d2583b 100644
--- a/tests/helpers.nim
+++ b/tests/helpers.nim
@@ -1,7 +1,4 @@
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos
import algorithm
@@ -13,10 +10,11 @@ import ../libp2p/stream/lpstream
import ../libp2p/stream/chronosstream
import ../libp2p/muxers/mplex/lpchannel
import ../libp2p/protocols/secure/secure
+import ../libp2p/switch
+import ../libp2p/nameresolving/[nameresolver, mockresolver]
import ./asyncunit
-export asyncunit
-
+export asyncunit, mockresolver
const
StreamTransportTrackerName = "stream.transport"
@@ -83,7 +81,7 @@ template rng*(): ref HmacDrbgContext =
getRng()
type
- WriteHandler* = proc(data: seq[byte]): Future[void] {.gcsafe, raises: [Defect].}
+ WriteHandler* = proc(data: seq[byte]): Future[void] {.gcsafe, raises: [].}
TestBufferStream* = ref object of BufferStream
writeHandler*: WriteHandler
@@ -113,7 +111,7 @@ proc bridgedConnections*: (Connection, Connection) =
return (connA, connB)
-proc checkExpiringInternal(cond: proc(): bool {.raises: [Defect], gcsafe.} ): Future[bool] {.async, gcsafe.} =
+proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async, gcsafe.} =
let start = Moment.now()
while true:
if Moment.now() > (start + chronos.seconds(5)):
@@ -140,4 +138,16 @@ proc unorderedCompare*[T](a, b: seq[T]): bool =
if aSorted == bSorted:
return true
- return false
\ No newline at end of file
+ return false
+
+proc default*(T: typedesc[MockResolver]): T =
+ let resolver = MockResolver.new()
+ resolver.ipResponses[("localhost", false)] = @["127.0.0.1"]
+ resolver.ipResponses[("localhost", true)] = @["::1"]
+ resolver
+
+proc setDNSAddr*(switch: Switch) {.gcsafe, async.} =
+ proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
+ return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
+ switch.peerInfo.addressMappers.add(addressMapper)
+ await switch.peerInfo.update()
diff --git a/tests/pubsub/testgossipinternal.nim b/tests/pubsub/testgossipinternal.nim
index 2e79e23550..4f60400dba 100644
--- a/tests/pubsub/testgossipinternal.nim
+++ b/tests/pubsub/testgossipinternal.nim
@@ -1,41 +1,31 @@
-include ../../libp2p/protocols/pubsub/gossipsub
+# Nim-LibP2P
+# Copyright (c) 2023 Status Research & Development GmbH
+# Licensed under either of
+# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
+# * MIT license ([LICENSE-MIT](LICENSE-MIT))
+# at your option.
+# This file may not be copied, modified, or distributed except according to
+# those terms.
{.used.}
-import std/[options, deques]
+import std/[options, deques, sequtils, enumerate, algorithm]
import stew/byteutils
import ../../libp2p/builders
import ../../libp2p/errors
import ../../libp2p/crypto/crypto
import ../../libp2p/stream/bufferstream
+import ../../libp2p/protocols/pubsub/[pubsub, gossipsub, mcache, mcache, peertable]
+import ../../libp2p/protocols/pubsub/rpc/[message, messages]
import ../../libp2p/switch
import ../../libp2p/muxers/muxer
+import ../../libp2p/protocols/pubsub/rpc/protobuf
+import utils
import ../helpers
-type
- TestGossipSub = ref object of GossipSub
-
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
-proc getPubSubPeer(p: TestGossipSub, peerId: PeerId): PubSubPeer =
- proc getConn(): Future[Connection] =
- p.switch.dial(peerId, GossipSubCodec)
-
- let pubSubPeer = PubSubPeer.new(peerId, getConn, nil, GossipSubCodec, 1024 * 1024)
- debug "created new pubsub peer", peerId
-
- p.peers[peerId] = pubSubPeer
-
- onNewPeer(p, pubSubPeer)
- pubSubPeer
-
-proc randomPeerId(): PeerId =
- try:
- PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
- except CatchableError as exc:
- raise newException(Defect, exc.msg)
-
const MsgIdSuccess = "msg id gen success"
suite "GossipSub internal":
@@ -170,7 +160,7 @@ suite "GossipSub internal":
asyncTest "`replenishFanout` Degree Lo":
let gossipSub = TestGossipSub.init(newStandardSwitch())
- proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
+ proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -197,7 +187,7 @@ suite "GossipSub internal":
asyncTest "`dropFanoutPeers` drop expired fanout topics":
let gossipSub = TestGossipSub.init(newStandardSwitch())
- proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
+ proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -227,7 +217,7 @@ suite "GossipSub internal":
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
let gossipSub = TestGossipSub.init(newStandardSwitch())
- proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
+ proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic1 = "foobar1"
@@ -264,7 +254,7 @@ suite "GossipSub internal":
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
let gossipSub = TestGossipSub.init(newStandardSwitch())
- proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
+ proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -325,7 +315,7 @@ suite "GossipSub internal":
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
let gossipSub = TestGossipSub.init(newStandardSwitch())
- proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
+ proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -365,7 +355,7 @@ suite "GossipSub internal":
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
let gossipSub = TestGossipSub.init(newStandardSwitch())
- proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
+ proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -406,7 +396,7 @@ suite "GossipSub internal":
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
let gossipSub = TestGossipSub.init(newStandardSwitch())
- proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
+ proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -447,7 +437,7 @@ suite "GossipSub internal":
asyncTest "Drop messages of topics without subscription":
let gossipSub = TestGossipSub.init(newStandardSwitch())
- proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
+ proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
check false
let topic = "foobar"
@@ -470,7 +460,7 @@ suite "GossipSub internal":
let peer = gossipSub.getPubSubPeer(peerId)
inc seqno
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
- await gossipSub.rpcHandler(peer, RPCMsg(messages: @[msg]))
+ await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
check gossipSub.mcache.msgs.len == 0
@@ -481,7 +471,7 @@ suite "GossipSub internal":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.parameters.disconnectBadPeers = true
gossipSub.parameters.appSpecificWeight = 1.0
- proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
+ proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
check false
let topic = "foobar"
@@ -525,7 +515,7 @@ suite "GossipSub internal":
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
- await gossipSub.rpcHandler(peer, lotOfSubs)
+ await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
check:
gossipSub.gossipsub.len == gossipSub.topicsHigh
@@ -656,13 +646,16 @@ suite "GossipSub internal":
asyncTest "handleIHave/Iwant tests":
let gossipSub = TestGossipSub.init(newStandardSwitch())
- proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
+ proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
check false
+ proc handler2(topic: string, data: seq[byte]) {.async.} = discard
let topic = "foobar"
var conns = newSeq[Connection]()
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
+ gossipSub.subscribe(topic, handler2)
+
for i in 0..<30:
let conn = TestBufferStream.new(noop)
conns &= conn
@@ -724,3 +717,130 @@ suite "GossipSub internal":
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
+
+ proc setupTest(): Future[tuple[gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]]] {.async.} =
+ let
+ nodes = generateNodes(2, gossip = true, verifySignature = false)
+ discard await allFinished(
+ nodes[0].switch.start(),
+ nodes[1].switch.start()
+ )
+
+ await nodes[1].switch.connect(nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs)
+
+ var receivedMessages = new(HashSet[seq[byte]])
+
+ proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
+ receivedMessages[].incl(data)
+
+ proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} =
+ discard
+
+ nodes[0].subscribe("foobar", handlerA)
+ nodes[1].subscribe("foobar", handlerB)
+ await waitSubGraph(nodes, "foobar")
+
+ var gossip0: GossipSub = GossipSub(nodes[0])
+ var gossip1: GossipSub = GossipSub(nodes[1])
+
+ return (gossip0, gossip1, receivedMessages)
+
+ proc teardownTest(gossip0: GossipSub, gossip1: GossipSub) {.async.} =
+ await allFuturesThrowing(
+ gossip0.switch.stop(),
+ gossip1.switch.stop()
+ )
+
+ proc createMessages(gossip0: GossipSub, gossip1: GossipSub, size1: int, size2: int): tuple[iwantMessageIds: seq[MessageId], sentMessages: HashSet[seq[byte]]] =
+ var iwantMessageIds = newSeq[MessageId]()
+ var sentMessages = initHashSet[seq[byte]]()
+
+ for i, size in enumerate([size1, size2]):
+ let data = newSeqWith[byte](size, i.byte)
+ sentMessages.incl(data)
+
+ let msg = Message.init(gossip1.peerInfo.peerId, data, "foobar", some(uint64(i + 1)))
+ let iwantMessageId = gossip1.msgIdProvider(msg).expect(MsgIdSuccess)
+ iwantMessageIds.add(iwantMessageId)
+ gossip1.mcache.put(iwantMessageId, msg)
+
+ let peer = gossip1.peers[(gossip0.peerInfo.peerId)]
+ peer.sentIHaves[^1].incl(iwantMessageId)
+
+ return (iwantMessageIds, sentMessages)
+
+ asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
+ # This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
+ # Expected: Both messages should be received.
+ let (gossip0, gossip1, receivedMessages) = await setupTest()
+
+ let messageSize = gossip1.maxMessageSize div 2 + 1
+ let (iwantMessageIds, sentMessages) = createMessages(gossip0, gossip1, messageSize, messageSize)
+
+ gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
+ ihave: @[ControlIHave(topicId: "foobar", messageIds: iwantMessageIds)]
+ ))))
+
+ checkExpiring: receivedMessages[] == sentMessages
+ check receivedMessages[].len == 2
+
+ await teardownTest(gossip0, gossip1)
+
+ asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
+ # This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
+ # Expected: No messages should be received.
+ let (gossip0, gossip1, receivedMessages) = await setupTest()
+
+ let messageSize = gossip1.maxMessageSize + 10
+ let (bigIWantMessageIds, sentMessages) = createMessages(gossip0, gossip1, messageSize, messageSize)
+
+ gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
+ ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
+ ))))
+
+ await sleepAsync(300.milliseconds)
+ checkExpiring: receivedMessages[].len == 0
+
+ await teardownTest(gossip0, gossip1)
+
+ asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
+ # This test checks if two messages, both below the maxSize, are correctly processed and sent.
+ # Expected: Both messages should be received.
+ let (gossip0, gossip1, receivedMessages) = await setupTest()
+ let size1 = gossip1.maxMessageSize div 2
+ let size2 = gossip1.maxMessageSize div 3
+ let (bigIWantMessageIds, sentMessages) = createMessages(gossip0, gossip1, size1, size2)
+
+ gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
+ ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
+ ))))
+
+ checkExpiring: receivedMessages[] == sentMessages
+ check receivedMessages[].len == 2
+
+ await teardownTest(gossip0, gossip1)
+
+ asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
+ # This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
+ # Expected: Only the smaller message should be received.
+ let (gossip0, gossip1, receivedMessages) = await setupTest()
+ let maxSize = gossip1.maxMessageSize
+ let size1 = maxSize div 2
+ let size2 = maxSize + 10
+ let (bigIWantMessageIds, sentMessages) = createMessages(gossip0, gossip1, size1, size2)
+
+ gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
+ ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
+ ))))
+
+ var smallestSet: HashSet[seq[byte]]
+ let seqs = toSeq(sentMessages)
+ if seqs[0] < seqs[1]:
+ smallestSet.incl(seqs[0])
+ else:
+ smallestSet.incl(seqs[1])
+
+ checkExpiring: receivedMessages[] == smallestSet
+ check receivedMessages[].len == 1
+
+ await teardownTest(gossip0, gossip1)
diff --git a/tests/pubsub/testgossipsub.nim b/tests/pubsub/testgossipsub.nim
index 396bccbabd..712829dcd6 100644
--- a/tests/pubsub/testgossipsub.nim
+++ b/tests/pubsub/testgossipsub.nim
@@ -10,8 +10,9 @@
{.used.}
import sequtils, options, tables, sets, sugar
-import chronos, stew/byteutils
+import chronos, stew/byteutils, chronos/ratelimit
import chronicles
+import metrics
import utils, ../../libp2p/[errors,
peerid,
peerinfo,
@@ -20,6 +21,7 @@ import utils, ../../libp2p/[errors,
crypto/crypto,
protocols/pubsub/pubsub,
protocols/pubsub/gossipsub,
+ protocols/pubsub/gossipsub/scoring,
protocols/pubsub/pubsubpeer,
protocols/pubsub/peertable,
protocols/pubsub/timedcache,
@@ -628,7 +630,6 @@ suite "GossipSub":
"foobar" in gossip1.gossipsub
"foobar" notin gossip2.gossipsub
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
- not gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
await allFuturesThrowing(
nodes[0].switch.stop(),
@@ -637,6 +638,79 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
+ # Helper procedures to avoid repetition
+ proc setupNodes(count: int): seq[PubSub] =
+ generateNodes(count, gossip = true)
+
+ proc startNodes(nodes: seq[PubSub]) {.async.} =
+ await allFuturesThrowing(
+ nodes.mapIt(it.switch.start())
+ )
+
+ proc stopNodes(nodes: seq[PubSub]) {.async.} =
+ await allFuturesThrowing(
+ nodes.mapIt(it.switch.stop())
+ )
+
+ proc connectNodes(nodes: seq[PubSub], target: PubSub) {.async.} =
+ proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
+ check topic == "foobar"
+
+ for node in nodes:
+ node.subscribe("foobar", handler)
+ await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
+
+ proc baseTestProcedure(nodes: seq[PubSub], gossip1: GossipSub, numPeersFirstMsg: int, numPeersSecondMsg: int) {.async.} =
+ proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
+ check topic == "foobar"
+
+ block setup:
+ for i in 0..<50:
+ if (await nodes[0].publish("foobar", ("Hello!" & $i).toBytes())) == 19:
+ break setup
+ await sleepAsync(10.milliseconds)
+ check false
+
+ check (await nodes[0].publish("foobar", newSeq[byte](2_500_000))) == numPeersFirstMsg
+ check (await nodes[0].publish("foobar", newSeq[byte](500_001))) == numPeersSecondMsg
+
+ # Now try with a mesh
+ gossip1.subscribe("foobar", handler)
+ checkExpiring: gossip1.mesh.peers("foobar") > 5
+
+ # use a different length so that the message is not equal to the last
+ check (await nodes[0].publish("foobar", newSeq[byte](500_000))) == numPeersSecondMsg
+
+ # Actual tests
+ asyncTest "e2e - GossipSub floodPublish limit":
+
+ let
+ nodes = setupNodes(20)
+ gossip1 = GossipSub(nodes[0])
+
+ gossip1.parameters.floodPublish = true
+ gossip1.parameters.heartbeatInterval = milliseconds(700)
+
+ await startNodes(nodes)
+ await connectNodes(nodes[1..^1], nodes[0])
+ await baseTestProcedure(nodes, gossip1, gossip1.parameters.dLow, 17)
+ await stopNodes(nodes)
+
+ asyncTest "e2e - GossipSub floodPublish limit with bandwidthEstimatebps = 0":
+
+ let
+ nodes = setupNodes(20)
+ gossip1 = GossipSub(nodes[0])
+
+ gossip1.parameters.floodPublish = true
+ gossip1.parameters.heartbeatInterval = milliseconds(700)
+ gossip1.parameters.bandwidthEstimatebps = 0
+
+ await startNodes(nodes)
+ await connectNodes(nodes[1..^1], nodes[0])
+ await baseTestProcedure(nodes, gossip1, nodes.len - 1, nodes.len - 1)
+ await stopNodes(nodes)
+
asyncTest "e2e - GossipSub with multiple peers":
var runs = 10
@@ -796,3 +870,196 @@ suite "GossipSub":
)
await allFuturesThrowing(nodesFut.concat())
+
+ asyncTest "e2e - iDontWant":
+ # 3 nodes: A <=> B <=> C
+ # (A & C are NOT connected). We pre-emptively send a dontwant from C to B,
+ # and check that B doesn't relay the message to C.
+ # We also check that B sends IDONTWANT to C, but not A
+ func dumbMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
+ ok(newSeq[byte](10))
+ let
+ nodes = generateNodes(
+ 3,
+ gossip = true,
+ msgIdProvider = dumbMsgIdProvider
+ )
+
+ nodesFut = await allFinished(
+ nodes[0].switch.start(),
+ nodes[1].switch.start(),
+ nodes[2].switch.start(),
+ )
+
+ await nodes[0].switch.connect(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
+ await nodes[1].switch.connect(nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs)
+
+ let bFinished = newFuture[void]()
+ proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
+ proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = bFinished.complete()
+ proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} = doAssert false
+
+ nodes[0].subscribe("foobar", handlerA)
+ nodes[1].subscribe("foobar", handlerB)
+ nodes[2].subscribe("foobar", handlerB)
+ await waitSubGraph(nodes, "foobar")
+
+ var gossip1: GossipSub = GossipSub(nodes[0])
+ var gossip2: GossipSub = GossipSub(nodes[1])
+ var gossip3: GossipSub = GossipSub(nodes[2])
+
+ check: gossip3.mesh.peers("foobar") == 1
+
+ gossip3.broadcast(gossip3.mesh["foobar"], RPCMsg(control: some(ControlMessage(
+ idontwant: @[ControlIWant(messageIds: @[newSeq[byte](10)])]
+ ))))
+ checkExpiring: gossip2.mesh.getOrDefault("foobar").anyIt(it.heDontWants[^1].len == 1)
+
+ tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
+
+ await bFinished
+
+ checkExpiring: toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.heDontWants[^1].len == 1)
+ check: toSeq(gossip1.mesh.getOrDefault("foobar")).anyIt(it.heDontWants[^1].len == 0)
+
+ await allFuturesThrowing(
+ nodes[0].switch.stop(),
+ nodes[1].switch.stop(),
+ nodes[2].switch.stop()
+ )
+
+ await allFuturesThrowing(nodesFut.concat())
+
+ proc initializeGossipTest(): Future[(seq[PubSub], GossipSub, GossipSub)] {.async.} =
+ let nodes = generateNodes(
+ 2,
+ gossip = true,
+ overheadRateLimit = Opt.some((20, 1.millis)))
+
+ discard await allFinished(
+ nodes[0].switch.start(),
+ nodes[1].switch.start(),
+ )
+
+ await subscribeNodes(nodes)
+
+ proc handle(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
+
+ let gossip0 = GossipSub(nodes[0])
+ let gossip1 = GossipSub(nodes[1])
+
+ gossip0.subscribe("foobar", handle)
+ gossip1.subscribe("foobar", handle)
+ await waitSubGraph(nodes, "foobar")
+
+ # Avoid being disconnected by failing signature verification
+ gossip0.verifySignature = false
+ gossip1.verifySignature = false
+
+ return (nodes, gossip0, gossip1)
+
+ proc currentRateLimitHits(): float64 =
+ try:
+ libp2p_gossipsub_peers_rate_limit_hits.valueByName("libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"])
+ except KeyError:
+ 0
+
+ asyncTest "e2e - GossipSub should not rate limit decodable messages below the size allowed":
+ let rateLimitHits = currentRateLimitHits()
+ let (nodes, gossip0, gossip1) = await initializeGossipTest()
+
+ gossip0.broadcast(gossip0.mesh["foobar"], RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](10))]))
+ await sleepAsync(300.millis)
+
+ check currentRateLimitHits() == rateLimitHits
+ check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
+
+ # Disconnect peer when rate limiting is enabled
+ gossip1.parameters.disconnectPeerAboveRateLimit = true
+ gossip0.broadcast(gossip0.mesh["foobar"], RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](12))]))
+ await sleepAsync(300.millis)
+
+ check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
+ check currentRateLimitHits() == rateLimitHits
+
+ await stopNodes(nodes)
+
+ asyncTest "e2e - GossipSub should rate limit undecodable messages above the size allowed":
+ let rateLimitHits = currentRateLimitHits()
+
+ let (nodes, gossip0, gossip1) = await initializeGossipTest()
+
+ # Simulate sending an undecodable message
+ await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](33, 1.byte))
+ await sleepAsync(300.millis)
+
+ check currentRateLimitHits() == rateLimitHits + 1
+ check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
+
+ # Disconnect peer when rate limiting is enabled
+ gossip1.parameters.disconnectPeerAboveRateLimit = true
+ await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](35, 1.byte))
+
+ checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
+ check currentRateLimitHits() == rateLimitHits + 2
+
+ await stopNodes(nodes)
+
+ asyncTest "e2e - GossipSub should rate limit decodable messages above the size allowed":
+ let rateLimitHits = currentRateLimitHits()
+ let (nodes, gossip0, gossip1) = await initializeGossipTest()
+
+ let msg = RPCMsg(control: some(ControlMessage(prune: @[
+ ControlPrune(topicID: "foobar", peers: @[
+ PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))
+ ], backoff: 123'u64)
+ ])))
+ gossip0.broadcast(gossip0.mesh["foobar"], msg)
+ await sleepAsync(300.millis)
+
+ check currentRateLimitHits() == rateLimitHits + 1
+ check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
+
+ # Disconnect peer when rate limiting is enabled
+ gossip1.parameters.disconnectPeerAboveRateLimit = true
+ let msg2 = RPCMsg(control: some(ControlMessage(prune: @[
+ ControlPrune(topicID: "foobar", peers: @[
+ PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))
+ ], backoff: 123'u64)
+ ])))
+ gossip0.broadcast(gossip0.mesh["foobar"], msg2)
+
+ checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
+ check currentRateLimitHits() == rateLimitHits + 2
+
+ await stopNodes(nodes)
+
+ asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
+ let rateLimitHits = currentRateLimitHits()
+ let (nodes, gossip0, gossip1) = await initializeGossipTest()
+
+ let topic = "foobar"
+ proc execValidator(topic: string, message: messages.Message): Future[ValidationResult] {.raises: [].} =
+ let res = newFuture[ValidationResult]()
+ res.complete(ValidationResult.Reject)
+ res
+
+ gossip0.addValidator(topic, execValidator)
+ gossip1.addValidator(topic, execValidator)
+
+ let msg = RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](40))])
+
+ gossip0.broadcast(gossip0.mesh[topic], msg)
+ await sleepAsync(300.millis)
+
+ check currentRateLimitHits() == rateLimitHits + 1
+ check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
+
+ # Disconnect peer when rate limiting is enabled
+ gossip1.parameters.disconnectPeerAboveRateLimit = true
+ gossip0.broadcast(gossip0.mesh[topic], RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](35))]))
+
+ checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
+ check currentRateLimitHits() == rateLimitHits + 2
+
+ await stopNodes(nodes)
diff --git a/tests/pubsub/testgossipsub2.nim b/tests/pubsub/testgossipsub2.nim
index 8132a098b7..a076fe4aa2 100644
--- a/tests/pubsub/testgossipsub2.nim
+++ b/tests/pubsub/testgossipsub2.nim
@@ -167,36 +167,44 @@ suite "GossipSub":
asyncTest "GossipSub directPeers: always forward messages":
let
- nodes = generateNodes(2, gossip = true)
+ nodes = generateNodes(3, gossip = true)
# start switches
nodesFut = await allFinished(
nodes[0].switch.start(),
nodes[1].switch.start(),
+ nodes[2].switch.start(),
)
await GossipSub(nodes[0]).addDirectPeer(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
await GossipSub(nodes[1]).addDirectPeer(nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs)
+ await GossipSub(nodes[1]).addDirectPeer(nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs)
+ await GossipSub(nodes[2]).addDirectPeer(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
check topic == "foobar"
handlerFut.complete()
+ proc noop(topic: string, data: seq[byte]) {.async, gcsafe.} =
+ check topic == "foobar"
- nodes[0].subscribe("foobar", handler)
- nodes[1].subscribe("foobar", handler)
+ nodes[0].subscribe("foobar", noop)
+ nodes[1].subscribe("foobar", noop)
+ nodes[2].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
- await handlerFut
+ await handlerFut.wait(2.seconds)
# peer shouldn't be in our mesh
check "foobar" notin GossipSub(nodes[0]).mesh
check "foobar" notin GossipSub(nodes[1]).mesh
+ check "foobar" notin GossipSub(nodes[2]).mesh
await allFuturesThrowing(
nodes[0].switch.stop(),
- nodes[1].switch.stop()
+ nodes[1].switch.stop(),
+ nodes[2].switch.stop()
)
await allFuturesThrowing(nodesFut.concat())
diff --git a/tests/pubsub/testmessage.nim b/tests/pubsub/testmessage.nim
index 7bc4b267a3..589920b404 100644
--- a/tests/pubsub/testmessage.nim
+++ b/tests/pubsub/testmessage.nim
@@ -2,10 +2,10 @@ import unittest2
{.used.}
-import options
+import options, strutils
import stew/byteutils
import ../../libp2p/[peerid, peerinfo,
- crypto/crypto,
+ crypto/crypto as crypto,
protocols/pubsub/errors,
protocols/pubsub/rpc/message,
protocols/pubsub/rpc/messages]
@@ -28,7 +28,7 @@ suite "Message":
"""08011240B9EA7F0357B5C1247E4FCB5AD09C46818ECB07318CA84711875F4C6C
E6B946186A4EB44E0D714B2A2D48263D75CF52D30BEF9D9AE2A9FEB7DAF1775F
E731065A"""
- seckey = PrivateKey.init(fromHex(stripSpaces(pkHex)))
+ seckey = PrivateKey.init(crypto.fromHex(stripSpaces(pkHex)))
.expect("valid private key bytes")
peer = PeerInfo.new(seckey)
msg = Message.init(some(peer), @[], "topic", some(seqno), sign = true)
@@ -46,7 +46,7 @@ suite "Message":
"""08011240B9EA7F0357B5C1247E4FCB5AD09C46818ECB07318CA84711875F4C6C
E6B946186A4EB44E0D714B2A2D48263D75CF52D30BEF9D9AE2A9FEB7DAF1775F
E731065A"""
- seckey = PrivateKey.init(fromHex(stripSpaces(pkHex)))
+ seckey = PrivateKey.init(crypto.fromHex(stripSpaces(pkHex)))
.expect("valid private key bytes")
peer = PeerInfo.new(seckey)
@@ -64,7 +64,7 @@ suite "Message":
"""08011240B9EA7F0357B5C1247E4FCB5AD09C46818ECB07318CA84711875F4C6C
E6B946186A4EB44E0D714B2A2D48263D75CF52D30BEF9D9AE2A9FEB7DAF1775F
E731065A"""
- seckey = PrivateKey.init(fromHex(stripSpaces(pkHex)))
+ seckey = PrivateKey.init(crypto.fromHex(stripSpaces(pkHex)))
.expect("valid private key bytes")
peer = PeerInfo.new(seckey)
msg = Message.init(some(peer), @[], "topic", uint64.none, sign = true)
@@ -73,3 +73,55 @@ suite "Message":
check:
msgIdResult.isErr
msgIdResult.error == ValidationResult.Reject
+
+ test "byteSize for RPCMsg":
+ var msg = Message(
+ fromPeer: PeerId(data: @['a'.byte, 'b'.byte]), # 2 bytes
+ data: @[1'u8, 2, 3], # 3 bytes
+ seqno: @[4'u8, 5], # 2 bytes
+ signature: @['c'.byte, 'd'.byte], # 2 bytes
+ key: @[6'u8, 7], # 2 bytes
+ topicIds: @["abc", "defgh"] # 3 + 5 = 8 bytes
+ )
+
+ var peerInfo = PeerInfoMsg(
+ peerId: PeerId(data: @['e'.byte]), # 1 byte
+ signedPeerRecord: @['f'.byte, 'g'.byte] # 2 bytes
+ )
+
+ var controlIHave = ControlIHave(
+ topicId: "ijk", # 3 bytes
+ messageIds: @[ @['l'.byte], @['m'.byte, 'n'.byte] ] # 1 + 2 = 3 bytes
+ )
+
+ var controlIWant = ControlIWant(
+ messageIds: @[ @['o'.byte, 'p'.byte], @['q'.byte] ] # 2 + 1 = 3 bytes
+ )
+
+ var controlGraft = ControlGraft(
+ topicId: "rst" # 3 bytes
+ )
+
+ var controlPrune = ControlPrune(
+ topicId: "uvw", # 3 bytes
+ peers: @[peerInfo, peerInfo], # (1 + 2) * 2 = 6 bytes
+ backoff: 12345678 # 8 bytes for uint64
+ )
+
+ var control = ControlMessage(
+ ihave: @[controlIHave, controlIHave], # (3 + 3) * 2 = 12 bytes
+ iwant: @[controlIWant], # 3 bytes
+ graft: @[controlGraft], # 3 bytes
+ prune: @[controlPrune], # 3 + 6 + 8 = 17 bytes
+ idontwant: @[controlIWant] # 3 bytes
+ )
+
+ var rpcMsg = RPCMsg(
+ subscriptions: @[SubOpts(subscribe: true, topic: "a".repeat(12)), SubOpts(subscribe: false, topic: "b".repeat(14))], # 1 + 12 + 1 + 14 = 28 bytes
+ messages: @[msg, msg], # 19 * 2 = 38 bytes
+ ping: @[1'u8, 2], # 2 bytes
+ pong: @[3'u8, 4], # 2 bytes
+ control: some(control) # 12 + 3 + 3 + 17 + 3 = 38 bytes
+ )
+
+ check byteSize(rpcMsg) == 28 + 38 + 2 + 2 + 38 # Total: 108 bytes
diff --git a/tests/pubsub/utils.nim b/tests/pubsub/utils.nim
index 6ac49b9b8b..b1b9d21449 100644
--- a/tests/pubsub/utils.nim
+++ b/tests/pubsub/utils.nim
@@ -5,20 +5,43 @@ const
libp2p_pubsub_anonymize {.booldefine.} = false
import hashes, random, tables, sets, sequtils
-import chronos, stew/[byteutils, results]
+import chronos, stew/[byteutils, results], chronos/ratelimit
import ../../libp2p/[builders,
protocols/pubsub/errors,
protocols/pubsub/pubsub,
+ protocols/pubsub/pubsubpeer,
protocols/pubsub/gossipsub,
protocols/pubsub/floodsub,
protocols/pubsub/rpc/messages,
protocols/secure/secure]
+import ../helpers
import chronicles
export builders
randomize()
+type
+ TestGossipSub* = ref object of GossipSub
+
+proc getPubSubPeer*(p: TestGossipSub, peerId: PeerId): PubSubPeer =
+ proc getConn(): Future[Connection] =
+ p.switch.dial(peerId, GossipSubCodec)
+
+ let pubSubPeer = PubSubPeer.new(peerId, getConn, nil, GossipSubCodec, 1024 * 1024)
+ debug "created new pubsub peer", peerId
+
+ p.peers[peerId] = pubSubPeer
+
+ onNewPeer(p, pubSubPeer)
+ pubSubPeer
+
+proc randomPeerId*(): PeerId =
+ try:
+ PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
+ except CatchableError as exc:
+ raise newException(Defect, exc.msg)
+
func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
let mid =
if m.seqno.len > 0 and m.fromPeer.data.len > 0:
@@ -44,7 +67,8 @@ proc generateNodes*(
sendSignedPeerRecord = false,
unsubscribeBackoff = 1.seconds,
maxMessageSize: int = 1024 * 1024,
- enablePX: bool = false): seq[PubSub] =
+ enablePX: bool = false,
+ overheadRateLimit: Opt[tuple[bytes: int, interval: Duration]] = Opt.none(tuple[bytes: int, interval: Duration])): seq[PubSub] =
for i in 0..= 0.3:
if not awaiter.finished:
awaiter.complete()
@@ -118,7 +122,7 @@ suite "Autonat Service":
let autonatClientStub = AutonatClientStub.new(expectedDials = 6)
autonatClientStub.answer = NotReachable
- let autonatService = AutonatService.new(autonatClientStub, newRng(), some(1.seconds))
+ let autonatService = AutonatService.new(autonatClientStub, newRng(), Opt.some(1.seconds))
let switch1 = createSwitch(autonatService)
let switch2 = createSwitch()
@@ -127,7 +131,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
- proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
+ proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
autonatClientStub.answer = Reachable
@@ -160,7 +164,7 @@ suite "Autonat Service":
asyncTest "Peer must be reachable when one connected peer has autonat disabled":
- let autonatService = AutonatService.new(AutonatClient.new(), newRng(), some(1.seconds), maxQueueSize = 2)
+ let autonatService = AutonatService.new(AutonatClient.new(), newRng(), Opt.some(1.seconds), maxQueueSize = 2)
let switch1 = createSwitch(autonatService)
let switch2 = createSwitch(withAutonat = false)
@@ -169,7 +173,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
- proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
+ proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -200,7 +204,7 @@ suite "Autonat Service":
let autonatClientStub = AutonatClientStub.new(expectedDials = 6)
autonatClientStub.answer = NotReachable
- let autonatService = AutonatService.new(autonatClientStub, newRng(), some(1.seconds), maxQueueSize = 3)
+ let autonatService = AutonatService.new(autonatClientStub, newRng(), Opt.some(1.seconds), maxQueueSize = 3)
let switch1 = createSwitch(autonatService)
let switch2 = createSwitch()
@@ -209,7 +213,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
- proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
+ proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
autonatClientStub.answer = Unknown
@@ -243,7 +247,7 @@ suite "Autonat Service":
asyncTest "Calling setup and stop twice must work":
let switch = createSwitch()
- let autonatService = AutonatService.new(AutonatClientStub.new(expectedDials = 0), newRng(), some(1.seconds))
+ let autonatService = AutonatService.new(AutonatClientStub.new(expectedDials = 0), newRng(), Opt.some(1.seconds))
check (await autonatService.setup(switch)) == true
check (await autonatService.setup(switch)) == false
@@ -254,14 +258,16 @@ suite "Autonat Service":
await allFuturesThrowing(switch.stop())
asyncTest "Must bypass maxConnectionsPerPeer limit":
- let autonatService = AutonatService.new(AutonatClient.new(), newRng(), some(1.seconds), maxQueueSize = 1)
+ let autonatService = AutonatService.new(AutonatClient.new(), newRng(), Opt.some(1.seconds), maxQueueSize = 1)
let switch1 = createSwitch(autonatService, maxConnsPerPeer = 0)
- let switch2 = createSwitch(maxConnsPerPeer = 0)
+ await switch1.setDNSAddr()
+
+ let switch2 = createSwitch(maxConnsPerPeer = 0, nameResolver = MockResolver.default())
let awaiter = newFuture[void]()
- proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
+ proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -284,9 +290,9 @@ suite "Autonat Service":
switch1.stop(), switch2.stop())
asyncTest "Must work when peers ask each other at the same time with max 1 conn per peer":
- let autonatService1 = AutonatService.new(AutonatClient.new(), newRng(), some(500.millis), maxQueueSize = 3)
- let autonatService2 = AutonatService.new(AutonatClient.new(), newRng(), some(500.millis), maxQueueSize = 3)
- let autonatService3 = AutonatService.new(AutonatClient.new(), newRng(), some(500.millis), maxQueueSize = 3)
+ let autonatService1 = AutonatService.new(AutonatClient.new(), newRng(), Opt.some(500.millis), maxQueueSize = 3)
+ let autonatService2 = AutonatService.new(AutonatClient.new(), newRng(), Opt.some(500.millis), maxQueueSize = 3)
+ let autonatService3 = AutonatService.new(AutonatClient.new(), newRng(), Opt.some(500.millis), maxQueueSize = 3)
let switch1 = createSwitch(autonatService1, maxConnsPerPeer = 0)
let switch2 = createSwitch(autonatService2, maxConnsPerPeer = 0)
@@ -296,12 +302,12 @@ suite "Autonat Service":
let awaiter2 = newFuture[void]()
let awaiter3 = newFuture[void]()
- proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
+ proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
- proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
+ proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter2.finished:
awaiter2.complete()
@@ -331,15 +337,15 @@ suite "Autonat Service":
switch1.stop(), switch2.stop(), switch3.stop())
asyncTest "Must work for one peer when two peers ask each other at the same time with max 1 conn per peer":
- let autonatService1 = AutonatService.new(AutonatClient.new(), newRng(), some(500.millis), maxQueueSize = 3)
- let autonatService2 = AutonatService.new(AutonatClient.new(), newRng(), some(500.millis), maxQueueSize = 3)
+ let autonatService1 = AutonatService.new(AutonatClient.new(), newRng(), Opt.some(500.millis), maxQueueSize = 3)
+ let autonatService2 = AutonatService.new(AutonatClient.new(), newRng(), Opt.some(500.millis), maxQueueSize = 3)
let switch1 = createSwitch(autonatService1, maxConnsPerPeer = 0)
let switch2 = createSwitch(autonatService2, maxConnsPerPeer = 0)
let awaiter1 = newFuture[void]()
- proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
+ proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
@@ -372,7 +378,7 @@ suite "Autonat Service":
switch1.stop(), switch2.stop())
asyncTest "Must work with low maxConnections":
- let autonatService = AutonatService.new(AutonatClient.new(), newRng(), some(1.seconds), maxQueueSize = 1)
+ let autonatService = AutonatService.new(AutonatClient.new(), newRng(), Opt.some(1.seconds), maxQueueSize = 1)
let switch1 = createSwitch(autonatService, maxConns = 4)
let switch2 = createSwitch()
@@ -382,7 +388,7 @@ suite "Autonat Service":
var awaiter = newFuture[void]()
- proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
+ proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -407,7 +413,8 @@ suite "Autonat Service":
# switch1 is now full, should stick to last observation
awaiter = newFuture[void]()
await autonatService.run(switch1)
- await awaiter
+
+ await sleepAsync(200.millis)
check autonatService.networkReachability == NetworkReachability.Reachable
check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 1
@@ -421,7 +428,7 @@ suite "Autonat Service":
let switch1 = createSwitch(autonatService)
let switch2 = createSwitch()
- proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
+ proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
fail()
check autonatService.networkReachability == NetworkReachability.Unknown
diff --git a/tests/testbufferstream.nim b/tests/testbufferstream.nim
index a0f83f32d6..905459de89 100644
--- a/tests/testbufferstream.nim
+++ b/tests/testbufferstream.nim
@@ -237,8 +237,6 @@ suite "BufferStream":
await stream.pushData("123".toBytes())
let push = stream.pushData("123".toBytes())
- when (NimMajor, NimMinor) < (1, 4):
- type AssertionDefect = AssertionError
expect AssertionDefect:
await stream.pushData("123".toBytes())
diff --git a/tests/testcrypto.nim b/tests/testcrypto.nim
index 082ac3f1ab..e84a1ff4d8 100644
--- a/tests/testcrypto.nim
+++ b/tests/testcrypto.nim
@@ -11,6 +11,7 @@
## Test vectors was made using Go implementation
## https://github.com/libp2p/go-libp2p-crypto/blob/master/key.go
+from std/strutils import toUpper
import unittest2
import bearssl/hash
import nimcrypto/utils
@@ -382,6 +383,31 @@ suite "Key interface test suite":
toHex(checkseckey) == stripSpaces(PrivateKeys[i])
toHex(checkpubkey) == stripSpaces(PublicKeys[i])
+ test "Spec test vectors":
+ # https://github.com/libp2p/specs/pull/537
+ const keys = [
+ (private: "08031279307702010104203E5B1FE9712E6C314942A750BD67485DE3C1EFE85B1BFB520AE8F9AE3DFA4A4CA00A06082A8648CE3D030107A14403420004DE3D300FA36AE0E8F5D530899D83ABAB44ABF3161F162A4BC901D8E6ECDA020E8B6D5F8DA30525E71D6851510C098E5C47C646A597FB4DCEC034E9F77C409E62",
+ public: "0803125b3059301306072a8648ce3d020106082a8648ce3d03010703420004de3d300fa36ae0e8f5d530899d83abab44abf3161f162a4bc901d8e6ecda020e8b6d5f8da30525e71d6851510c098e5c47c646a597fb4dcec034e9f77c409e62"),
+ (private: "080112407e0830617c4a7de83925dfb2694556b12936c477a0e1feb2e148ec9da60fee7d1ed1e8fae2c4a144b8be8fd4b47bf3d3b34b871c3cacf6010f0e42d474fce27e",
+ public: "080112201ed1e8fae2c4a144b8be8fd4b47bf3d3b34b871c3cacf6010f0e42d474fce27e"),
+ (private: "0802122053DADF1D5A164D6B4ACDB15E24AA4C5B1D3461BDBD42ABEDB0A4404D56CED8FB",
+ public: "08021221037777e994e452c21604f91de093ce415f5432f701dd8cd1a7a6fea0e630bfca99"),
+ (private: "080012ae123082092a0201000282020100e1beab071d08200bde24eef00d049449b07770ff9910257b2d7d5dda242ce8f0e2f12e1af4b32d9efd2c090f66b0f29986dbb645dae9880089704a94e5066d594162ae6ee8892e6ec70701db0a6c445c04778eb3de1293aa1a23c3825b85c6620a2bc3f82f9b0c309bc0ab3aeb1873282bebd3da03c33e76c21e9beb172fd44c9e43be32e2c99827033cf8d0f0c606f4579326c930eb4e854395ad941256542c793902185153c474bed109d6ff5141ebf9cd256cf58893a37f83729f97e7cb435ec679d2e33901d27bb35aa0d7e20561da08885ef0abbf8e2fb48d6a5487047a9ecb1ad41fa7ed84f6e3e8ecd5d98b3982d2a901b4454991766da295ab78822add5612a2df83bcee814cf50973e80d7ef38111b1bd87da2ae92438a2c8cbcc70b31ee319939a3b9c761dbc13b5c086d6b64bf7ae7dacc14622375d92a8ff9af7eb962162bbddebf90acb32adb5e4e4029f1c96019949ecfbfeffd7ac1e3fbcc6b6168c34be3d5a2e5999fcbb39bba7adbca78eab09b9bc39f7fa4b93411f4cc175e70c0a083e96bfaefb04a9580b4753c1738a6a760ae1afd851a1a4bdad231cf56e9284d832483df215a46c1c21bdf0c6cfe951c18f1ee4078c79c13d63edb6e14feaeffabc90ad317e4875fe648101b0864097e998f0ca3025ef9638cd2b0caecd3770ab54a1d9c6ca959b0f5dcbc90caeefc4135baca6fd475224269bbe1b02030100010282020100a472ffa858efd8588ce59ee264b957452f3673acdf5631d7bfd5ba0ef59779c231b0bc838a8b14cae367b6d9ef572c03c7883b0a3c652f5c24c316b1ccfd979f13d0cd7da20c7d34d9ec32dfdc81ee7292167e706d705efde5b8f3edfcba41409e642f8897357df5d320d21c43b33600a7ae4e505db957c1afbc189d73f0b5d972d9aaaeeb232ca20eebd5de6fe7f29d01470354413cc9a0af1154b7af7c1029adcd67c74b4798afeb69e09f2cb387305e73a1b5f450202d54f0ef096fe1bde340219a1194d1ac9026e90b366cce0c59b239d10e4888f52ca1780824d39ae01a6b9f4dd6059191a7f12b2a3d8db3c2868cd4e5a5862b8b625a4197d52c6ac77710116ebd3ced81c4d91ad5fdfbed68312ebce7eea45c1833ca3acf7da2052820eacf5c6b07d086dabeb893391c71417fd8a4b1829ae2cf60d1749d0e25da19530d889461c21da3492a8dc6ccac7de83ac1c2185262c7473c8cc42f547cc9864b02a8073b6aa54a037d8c0de3914784e6205e83d97918b944f11b877b12084c0dd1d36592f8a4f8b8da5bb404c3d2c079b22b6ceabfbcb637c0dbe0201f0909d533f8bf308ada47aee641a012a494d31b54c974e58b87f140258258bb82f31692659db7aa07e17a5b2a0832c24e122d3a8babcc9ee74cbb07d3058bb85b15f6f6b2674aba9fd34367be9782d444335fbed31e3c4086c652597c27104938b47fa10282010100e9fdf843c1550070ca711cb8ff28411466198f0e212511c3186623890c0071bf6561219682fe7dbdfd81176eba7c4faba21614a20721e0fcd63768e6d925688ecc90992059ac89256e0524de90bf3d8a052ce6a9f6adafa712f3107a016e20c80255c9e37d8206d1bc327e06e66eb24288da866b55904fd8b59e6b2ab31bc5eab47e597093c63fab7872102d57b4c589c66077f534a61f5f65127459a33c91f6db61fc431b1ae90be92b4149a3255291baf94304e3efb77b1107b5a3bda911359c40a53c347ff9100baf8f36dc5cd991066b5bdc28b39ed644f404afe9213f4d31c9d4e40f3a5f5e3c39bebeb244e84137544e1a1839c1c8aaebf0c78a7fad590282010100f6fa1f1e6b803742d5490b7441152f500970f46feb0b73a6e4baba2aaf3c0e245ed852fc31d86a8e46eb48e90fac409989dfee45238f97e8f1f8e83a136488c1b04b8a7fb695f37b8616307ff8a8d63e8cfa0b4fb9b9167ffaebabf111aa5a4344afbabd002ae8961c38c02da76a9149abdde93eb389eb32595c29ba30d8283a7885218a5a9d33f7f01dbdf85f3aad016c071395491338ec318d39220e1c7bd69d3d6b520a13a30d745c102b827ad9984b0dd6aed73916ffa82a06c1c111e7047dcd2668f988a0570a71474992eecf416e068f029ec323d5d635fd24694fc9bf96973c255d26c772a95bf8b7f876547a5beabf86f06cd21b67994f944e7a5493028201010095b02fd30069e547426a8bea58e8a2816f33688dac6c6f6974415af8402244a22133baedf34ce499d7036f3f19b38eb00897c18949b0c5a25953c71aeeccfc8f6594173157cc854bd98f16dffe8f28ca13b77eb43a2730585c49fc3f608cd811bb54b03b84bddaa8ef910988567f783012266199667a546a18fd88271fbf63a45ae4fd4884706da8befb9117c0a4d73de5172f8640b1091ed8a4aea3ed4641463f5ff6a5e3401ad7d0c92811f87956d1fd5f9a1d15c7f3839a08698d9f35f9d966e5000f7cb2655d7b6c4adcd8a9d950ea5f61bb7c9a33c17508f9baa313eecfee4ae493249ebe05a5d7770bbd3551b2eeb752e3649e0636de08e3d672e66cb90282010100ad93e4c31072b063fc5ab5fe22afacece775c795d0efdf7c704cfc027bde0d626a7646fc905bb5a80117e3ca49059af14e0160089f9190065be9bfecf12c3b2145b211c8e89e42dd91c38e9aa23ca73697063564f6f6aa6590088a738722df056004d18d7bccac62b3bafef6172fc2a4b071ea37f31eff7a076bcab7dd144e51a9da8754219352aef2c73478971539fa41de4759285ea626fa3c72e7085be47d554d915bbb5149cb6ef835351f231043049cd941506a034bf2f8767f3e1e42ead92f91cb3d75549b57ef7d56ac39c2d80d67f6a2b4ca192974bfc5060e2dd171217971002193dba12e7e4133ab201f07500a90495a38610279b13a48d54f0c99028201003e3a1ac0c2b67d54ed5c4bbe04a7db99103659d33a4f9d35809e1f60c282e5988dddc964527f3b05e6cc890eab3dcb571d66debf3a5527704c87264b3954d7265f4e8d2c637dd89b491b9cf23f264801f804b90454d65af0c4c830d1aef76f597ef61b26ca857ecce9cb78d4f6c2218c00d2975d46c2b013fbf59b750c3b92d8d3ed9e6d1fd0ef1ec091a5c286a3fe2dead292f40f380065731e2079ebb9f2a7ef2c415ecbb488da98f3a12609ca1b6ec8c734032c8bd513292ff842c375d4acd1b02dfb206b24cd815f8e2f9d4af8e7dea0370b19c1b23cc531d78b40e06e1119ee2e08f6f31c6e2e8444c568d13c5d451a291ae0c9f1d4f27d23b3a00d60ad",
+ public: "080012a60430820222300d06092a864886f70d01010105000382020f003082020a0282020100e1beab071d08200bde24eef00d049449b07770ff9910257b2d7d5dda242ce8f0e2f12e1af4b32d9efd2c090f66b0f29986dbb645dae9880089704a94e5066d594162ae6ee8892e6ec70701db0a6c445c04778eb3de1293aa1a23c3825b85c6620a2bc3f82f9b0c309bc0ab3aeb1873282bebd3da03c33e76c21e9beb172fd44c9e43be32e2c99827033cf8d0f0c606f4579326c930eb4e854395ad941256542c793902185153c474bed109d6ff5141ebf9cd256cf58893a37f83729f97e7cb435ec679d2e33901d27bb35aa0d7e20561da08885ef0abbf8e2fb48d6a5487047a9ecb1ad41fa7ed84f6e3e8ecd5d98b3982d2a901b4454991766da295ab78822add5612a2df83bcee814cf50973e80d7ef38111b1bd87da2ae92438a2c8cbcc70b31ee319939a3b9c761dbc13b5c086d6b64bf7ae7dacc14622375d92a8ff9af7eb962162bbddebf90acb32adb5e4e4029f1c96019949ecfbfeffd7ac1e3fbcc6b6168c34be3d5a2e5999fcbb39bba7adbca78eab09b9bc39f7fa4b93411f4cc175e70c0a083e96bfaefb04a9580b4753c1738a6a760ae1afd851a1a4bdad231cf56e9284d832483df215a46c1c21bdf0c6cfe951c18f1ee4078c79c13d63edb6e14feaeffabc90ad317e4875fe648101b0864097e998f0ca3025ef9638cd2b0caecd3770ab54a1d9c6ca959b0f5dcbc90caeefc4135baca6fd475224269bbe1b0203010001"),
+
+ ]
+ for (private, public) in keys:
+ var seckey = PrivateKey.init(fromHex(private)).expect("private key")
+ var pubkey = PublicKey.init(fromHex(public)).expect("public key")
+ var calckey = seckey.getPublicKey().expect("public key")
+ check:
+ pubkey == calckey
+ var checkseckey = seckey.getBytes().expect("private key")
+ var checkpubkey = pubkey.getBytes().expect("public key")
+ check:
+ toHex(checkseckey) == stripSpaces(private).toUpper()
+ toHex(checkpubkey) == stripSpaces(public).toUpper()
+
test "Generate/Sign/Serialize/Deserialize/Verify test":
var msg = "message to sign"
var bmsg = cast[seq[byte]](msg)
diff --git a/tests/testdcutr.nim b/tests/testdcutr.nim
index 8cb7e946c4..83822e872e 100644
--- a/tests/testdcutr.nim
+++ b/tests/testdcutr.nim
@@ -90,7 +90,12 @@ suite "Dcutr":
asyncTest "Client connect timeout":
- proc connectTimeoutProc(): Future[void] {.async.} =
+ proc connectTimeoutProc(self: SwitchStub,
+ peerId: PeerId,
+ addrs: seq[MultiAddress],
+ forceDial = false,
+ reuseConnection = true,
+ upgradeDir = Direction.Out): Future[void] {.async.} =
await sleepAsync(100.millis)
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectTimeoutProc)
@@ -104,7 +109,12 @@ suite "Dcutr":
asyncTest "All client connect attempts fail":
- proc connectErrorProc(): Future[void] {.async.} =
+ proc connectErrorProc(self: SwitchStub,
+ peerId: PeerId,
+ addrs: seq[MultiAddress],
+ forceDial = false,
+ reuseConnection = true,
+ upgradeDir = Direction.Out): Future[void] {.async.} =
raise newException(CatchableError, "error")
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectErrorProc)
@@ -116,7 +126,7 @@ suite "Dcutr":
except DcutrError as err:
check err.parent of AllFuturesFailedError
- proc ductrServerTest(connectStub: proc (): Future[void] {.async.}) {.async.} =
+ proc ductrServerTest(connectStub: connectStubType) {.async.} =
let behindNATSwitch = newStandardSwitch()
let publicSwitch = SwitchStub.new(newStandardSwitch())
@@ -144,14 +154,24 @@ suite "Dcutr":
asyncTest "DCUtR server timeout when establishing a new connection":
- proc connectProc(): Future[void] {.async.} =
+ proc connectProc(self: SwitchStub,
+ peerId: PeerId,
+ addrs: seq[MultiAddress],
+ forceDial = false,
+ reuseConnection = true,
+ upgradeDir = Direction.Out): Future[void] {.async.} =
await sleepAsync(100.millis)
await ductrServerTest(connectProc)
asyncTest "DCUtR server error when establishing a new connection":
- proc connectProc(): Future[void] {.async.} =
+ proc connectProc(self: SwitchStub,
+ peerId: PeerId,
+ addrs: seq[MultiAddress],
+ forceDial = false,
+ reuseConnection = true,
+ upgradeDir = Direction.Out): Future[void] {.async.} =
raise newException(CatchableError, "error")
await ductrServerTest(connectProc)
diff --git a/tests/testhpservice.nim b/tests/testhpservice.nim
index 8ff6efcf20..4f6c7fb5c5 100644
--- a/tests/testhpservice.nim
+++ b/tests/testhpservice.nim
@@ -9,10 +9,7 @@
{.used.}
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import chronos
@@ -21,19 +18,15 @@ import ./helpers
import ./stubs/switchstub
import ../libp2p/[builders,
switch,
+ wire,
services/hpservice,
services/autorelayservice]
import ../libp2p/protocols/connectivity/relay/[relay, client]
import ../libp2p/protocols/connectivity/autonat/[service]
-import ../libp2p/nameresolving/nameresolver
-import ../libp2p/nameresolving/mockresolver
-
+import ../libp2p/nameresolving/[nameresolver, mockresolver]
import stubs/autonatclientstub
-proc isPublicAddrIPAddrMock(ta: TransportAddress): bool =
- return true
-
-proc createSwitch(r: Relay = nil, hpService: Service = nil, nameResolver: NameResolver = nil): Switch {.raises: [LPError, Defect].} =
+proc createSwitch(r: Relay = nil, hpService: Service = nil, nameResolver: NameResolver = nil): Switch {.raises: [LPError].} =
var builder = SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
@@ -71,19 +64,25 @@ suite "Hole Punching":
let privatePeerRelayAddr = newFuture[seq[MultiAddress]]()
let publicPeerSwitch = createSwitch(RelayClient.new())
+
+ proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
+ return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
+ publicPeerSwitch.peerInfo.addressMappers.add(addressMapper)
+ await publicPeerSwitch.peerInfo.update()
+
proc checkMA(address: seq[MultiAddress]) =
if not privatePeerRelayAddr.completed():
privatePeerRelayAddr.complete(address)
let autoRelayService = AutoRelayService.new(1, relayClient, checkMA, newRng())
- let hpservice = HPService.new(autonatService, autoRelayService, isPublicAddrIPAddrMock)
+ let hpservice = HPService.new(autonatService, autoRelayService)
- let privatePeerSwitch = createSwitch(relayClient, hpservice)
+ let privatePeerSwitch = createSwitch(relayClient, hpservice, nameresolver = MockResolver.default())
let peerSwitch = createSwitch()
let switchRelay = createSwitch(Relay.new())
- await allFutures(switchRelay.start(), privatePeerSwitch.start(), publicPeerSwitch.start(), peerSwitch.start())
+ await allFuturesThrowing(switchRelay.start(), privatePeerSwitch.start(), publicPeerSwitch.start(), peerSwitch.start())
await privatePeerSwitch.connect(switchRelay.peerInfo.peerId, switchRelay.peerInfo.addrs)
await privatePeerSwitch.connect(peerSwitch.peerInfo.peerId, peerSwitch.peerInfo.addrs) # for autonat
@@ -106,16 +105,8 @@ suite "Hole Punching":
let relayClient = RelayClient.new()
let privatePeerRelayAddr = newFuture[seq[MultiAddress]]()
- let resolver = MockResolver.new()
- resolver.ipResponses[("localhost", false)] = @["127.0.0.1"]
- resolver.ipResponses[("localhost", true)] = @["::1"]
-
- let publicPeerSwitch = createSwitch(RelayClient.new(), nameResolver = resolver)
-
- proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
- return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
- publicPeerSwitch.peerInfo.addressMappers.add(addressMapper)
- await publicPeerSwitch.peerInfo.update()
+ let publicPeerSwitch = createSwitch(RelayClient.new())
+ await publicPeerSwitch.setDNSAddr()
proc checkMA(address: seq[MultiAddress]) =
if not privatePeerRelayAddr.completed():
@@ -123,13 +114,13 @@ suite "Hole Punching":
let autoRelayService = AutoRelayService.new(1, relayClient, checkMA, newRng())
- let hpservice = HPService.new(autonatService, autoRelayService, isPublicAddrIPAddrMock)
+ let hpservice = HPService.new(autonatService, autoRelayService)
- let privatePeerSwitch = createSwitch(relayClient, hpservice, nameResolver = resolver)
+ let privatePeerSwitch = createSwitch(relayClient, hpservice, nameResolver = MockResolver.default())
let peerSwitch = createSwitch()
let switchRelay = createSwitch(Relay.new())
- await allFutures(switchRelay.start(), privatePeerSwitch.start(), publicPeerSwitch.start(), peerSwitch.start())
+ await allFuturesThrowing(switchRelay.start(), privatePeerSwitch.start(), publicPeerSwitch.start(), peerSwitch.start())
await privatePeerSwitch.connect(switchRelay.peerInfo.peerId, switchRelay.peerInfo.addrs)
await privatePeerSwitch.connect(peerSwitch.peerInfo.peerId, peerSwitch.peerInfo.addrs) # for autonat
@@ -143,9 +134,7 @@ suite "Hole Punching":
await allFuturesThrowing(
privatePeerSwitch.stop(), publicPeerSwitch.stop(), switchRelay.stop(), peerSwitch.stop())
- proc holePunchingTest(connectStub: proc (): Future[void] {.async.},
- isPublicIPAddrProc: IsPublicIPAddrProc,
- answer: Answer) {.async.} =
+ proc holePunchingTest(initiatorConnectStub: connectStubType, rcvConnectStub: connectStubType, answer: Answer) {.async.} =
# There's no check in this test cause it can't test hole punching locally. It exists just to be sure the rest of
# the code works properly.
@@ -168,11 +157,12 @@ suite "Hole Punching":
let autoRelayService1 = AutoRelayService.new(1, relayClient1, checkMA, newRng())
let autoRelayService2 = AutoRelayService.new(1, relayClient2, nil, newRng())
- let hpservice1 = HPService.new(autonatService1, autoRelayService1, isPublicIPAddrProc)
+ let hpservice1 = HPService.new(autonatService1, autoRelayService1)
let hpservice2 = HPService.new(autonatService2, autoRelayService2)
- let privatePeerSwitch1 = SwitchStub.new(createSwitch(relayClient1, hpservice1))
- let privatePeerSwitch2 = createSwitch(relayClient2, hpservice2)
+ let privatePeerSwitch1 = SwitchStub.new(createSwitch(relayClient1, hpservice1, nameresolver = MockResolver.default()))
+ let privatePeerSwitch2 = SwitchStub.new(createSwitch(relayClient2, hpservice2))
+ await privatePeerSwitch2.setDNSAddr()
let switchRelay = createSwitch(Relay.new())
let switchAux = createSwitch()
let switchAux2 = createSwitch()
@@ -181,7 +171,7 @@ suite "Hole Punching":
var awaiter = newFuture[void]()
- await allFutures(
+ await allFuturesThrowing(
switchRelay.start(), privatePeerSwitch1.start(), privatePeerSwitch2.start(),
switchAux.start(), switchAux2.start(), switchAux3.start(), switchAux4.start()
)
@@ -199,21 +189,43 @@ suite "Hole Punching":
await privatePeerSwitch2.connect(switchAux3.peerInfo.peerId, switchAux3.peerInfo.addrs)
await privatePeerSwitch2.connect(switchAux4.peerInfo.peerId, switchAux4.peerInfo.addrs)
- privatePeerSwitch1.connectStub = connectStub
+ privatePeerSwitch1.connectStub = initiatorConnectStub
await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1))
+ privatePeerSwitch2.connectStub = rcvConnectStub
- await sleepAsync(200.millis)
+ checkExpiring:
+ # we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
+ # in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
+ # tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
+ # dial will succeed.
+ privatePeerSwitch1.connManager.connCount(privatePeerSwitch2.peerInfo.peerId) == 1 and
+ not isRelayed(privatePeerSwitch1.connManager.selectMuxer(privatePeerSwitch2.peerInfo.peerId).connection)
await allFuturesThrowing(
privatePeerSwitch1.stop(), privatePeerSwitch2.stop(), switchRelay.stop(),
switchAux.stop(), switchAux2.stop(), switchAux3.stop(), switchAux4.stop())
asyncTest "Hole punching when peers addresses are private":
- await holePunchingTest(nil, isGlobal, NotReachable)
+ proc connectStub(self: SwitchStub,
+ peerId: PeerId,
+ addrs: seq[MultiAddress],
+ forceDial = false,
+ reuseConnection = true,
+ upgradeDir = Direction.Out): Future[void] {.async.} =
+ self.connectStub = nil # this stub should be called only once
+ await sleepAsync(100.millis) # avoid simultaneous dialing that causes address in use error
+ await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
+ await holePunchingTest(nil, connectStub, NotReachable)
asyncTest "Hole punching when there is an error during unilateral direct connection":
- proc connectStub(): Future[void] {.async.} =
+ proc connectStub(self: SwitchStub,
+ peerId: PeerId,
+ addrs: seq[MultiAddress],
+ forceDial = false,
+ reuseConnection = true,
+ upgradeDir = Direction.Out): Future[void] {.async.} =
+ self.connectStub = nil # this stub should be called only once
raise newException(CatchableError, "error")
- await holePunchingTest(connectStub, isPublicAddrIPAddrMock, Reachable)
+ await holePunchingTest(connectStub, nil, Reachable)
diff --git a/tests/testinterop.nim b/tests/testinterop.nim
index b53b5afddb..b2b0e2e8d1 100644
--- a/tests/testinterop.nim
+++ b/tests/testinterop.nim
@@ -6,7 +6,7 @@ proc switchMplexCreator(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov: TransportProvider = proc(upgr: Upgrade): Transport = TcpTransport.new({}, upgr),
relay: Relay = Relay.new(circuitRelayV1 = true)):
- Switch {.raises: [Defect, LPError].} =
+ Switch {.raises: [LPError].} =
SwitchBuilder.new()
.withSignedPeerRecord(false)
@@ -28,7 +28,7 @@ proc switchYamuxCreator(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov: TransportProvider = proc(upgr: Upgrade): Transport = TcpTransport.new({}, upgr),
relay: Relay = Relay.new(circuitRelayV1 = true)):
- Switch {.raises: [Defect, LPError].} =
+ Switch {.raises: [LPError].} =
SwitchBuilder.new()
.withSignedPeerRecord(false)
diff --git a/tests/testmultiaddress.nim b/tests/testmultiaddress.nim
index b7bcebc6de..025909e48b 100644
--- a/tests/testmultiaddress.nim
+++ b/tests/testmultiaddress.nim
@@ -9,10 +9,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import std/sequtils
@@ -67,6 +64,8 @@ const
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234",
"/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234",
+ "/ip4/127.0.0.1/tcp/8000/wss/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
+ "/ip4/127.0.0.1/tcp/8000/tls/ws/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"/unix/a/b/c/d/e",
"/unix/stdio",
"/ip4/1.2.3.4/tcp/80/unix/a/b/c/d/e/f",
diff --git a/tests/testmultistream.nim b/tests/testmultistream.nim
index f9913e585f..c029a237c2 100644
--- a/tests/testmultistream.nim
+++ b/tests/testmultistream.nim
@@ -21,10 +21,7 @@ import ../libp2p/multistream,
../libp2p/upgrademngrs/upgrade
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import ./helpers
@@ -79,7 +76,7 @@ proc newTestSelectStream(): TestSelectStream =
## Mock stream for handles `ls` test
type
- LsHandler = proc(procs: seq[byte]): Future[void] {.gcsafe, raises: [Defect].}
+ LsHandler = proc(procs: seq[byte]): Future[void] {.gcsafe, raises: [].}
TestLsStream = ref object of Connection
step*: int
@@ -131,7 +128,7 @@ proc newTestLsStream(ls: LsHandler): TestLsStream {.gcsafe.} =
## Mock stream for handles `na` test
type
- NaHandler = proc(procs: string): Future[void] {.gcsafe, raises: [Defect].}
+ NaHandler = proc(procs: string): Future[void] {.gcsafe, raises: [].}
TestNaStream = ref object of Connection
step*: int
diff --git a/tests/testnoise.nim b/tests/testnoise.nim
index 074d850441..f598b42029 100644
--- a/tests/testnoise.nim
+++ b/tests/testnoise.nim
@@ -38,10 +38,7 @@ const
type
TestProto = ref object of LPProtocol
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
method init(p: TestProto) {.gcsafe.} =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
@@ -75,7 +72,7 @@ proc createSwitch(ma: MultiAddress; outgoing: bool, secio: bool = false): (Switc
[Secure(Noise.new(rng, privateKey, outgoing = outgoing))]
connManager = ConnManager.new()
ms = MultistreamSelect.new()
- muxedUpgrade = MuxedUpgrade.new(muxers, secureManagers, connManager, ms)
+ muxedUpgrade = MuxedUpgrade.new(muxers, secureManagers, ms)
transports = @[Transport(TcpTransport.new(upgrade = muxedUpgrade))]
let switch = newSwitch(
diff --git a/tests/testrelayv1.nim b/tests/testrelayv1.nim
index cda512e994..b461f2d157 100644
--- a/tests/testrelayv1.nim
+++ b/tests/testrelayv1.nim
@@ -48,20 +48,20 @@ suite "Circuit Relay":
r {.threadvar.}: Relay
conn {.threadvar.}: Connection
msg {.threadvar.}: ProtoBuffer
- rcv {.threadvar.}: Option[RelayMessage]
+ rcv {.threadvar.}: Opt[RelayMessage]
proc createMsg(
- msgType: Option[RelayType] = RelayType.none,
- status: Option[StatusV1] = StatusV1.none,
- src: Option[RelayPeer] = RelayPeer.none,
- dst: Option[RelayPeer] = RelayPeer.none): ProtoBuffer =
+ msgType: Opt[RelayType] = Opt.none(RelayType),
+ status: Opt[StatusV1] = Opt.none(StatusV1),
+ src: Opt[RelayPeer] = Opt.none(RelayPeer),
+ dst: Opt[RelayPeer] = Opt.none(RelayPeer)): ProtoBuffer =
encode(RelayMessage(msgType: msgType, srcPeer: src, dstPeer: dst, status: status))
- proc checkMsg(msg: Option[RelayMessage],
- msgType: Option[RelayType] = none[RelayType](),
- status: Option[StatusV1] = none[StatusV1](),
- src: Option[RelayPeer] = none[RelayPeer](),
- dst: Option[RelayPeer] = none[RelayPeer]()) =
+ proc checkMsg(msg: Opt[RelayMessage],
+ msgType: Opt[RelayType] = Opt.none(RelayType),
+ status: Opt[StatusV1] = Opt.none(StatusV1),
+ src: Opt[RelayPeer] = Opt.none(RelayPeer),
+ dst: Opt[RelayPeer] = Opt.none(RelayPeer)) =
check: msg.isSome
let m = msg.get()
check: m.msgType == msgType
@@ -119,116 +119,116 @@ suite "Circuit Relay":
await srelay.start()
asyncTest "Handle CanHop":
- msg = createMsg(some(CanHop))
+ msg = createMsg(Opt.some(CanHop))
conn = await src.dial(srelay.peerInfo.peerId, srelay.peerInfo.addrs, RelayV1Codec)
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(StatusV1.Success))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(StatusV1.Success))
conn = await src.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayV1Codec)
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(HopCantSpeakRelay))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(HopCantSpeakRelay))
await conn.close()
asyncTest "Malformed":
conn = await srelay.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Status))
+ msg = createMsg(Opt.some(RelayType.Status))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
await conn.close()
- rcv.checkMsg(some(RelayType.Status), some(StatusV1.MalformedMessage))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(StatusV1.MalformedMessage))
asyncTest "Handle Stop Error":
conn = await srelay.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Stop),
- none(StatusV1),
- none(RelayPeer),
- some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)))
+ msg = createMsg(Opt.some(RelayType.Stop),
+ Opt.none(StatusV1),
+ Opt.none(RelayPeer),
+ Opt.some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(StopSrcMultiaddrInvalid))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(StopSrcMultiaddrInvalid))
conn = await srelay.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Stop),
- none(StatusV1),
- some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
- none(RelayPeer))
+ msg = createMsg(Opt.some(RelayType.Stop),
+ Opt.none(StatusV1),
+ Opt.some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
+ Opt.none(RelayPeer))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(StopDstMultiaddrInvalid))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(StopDstMultiaddrInvalid))
conn = await srelay.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Stop),
- none(StatusV1),
- some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)),
- some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)))
+ msg = createMsg(Opt.some(RelayType.Stop),
+ Opt.none(StatusV1),
+ Opt.some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)),
+ Opt.some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
await conn.close()
- rcv.checkMsg(some(RelayType.Status), some(StopDstMultiaddrInvalid))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(StopDstMultiaddrInvalid))
asyncTest "Handle Hop Error":
conn = await src.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Hop))
+ msg = createMsg(Opt.some(RelayType.Hop))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(HopCantSpeakRelay))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(HopCantSpeakRelay))
conn = await src.dial(srelay.peerInfo.peerId, srelay.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Hop),
- none(StatusV1),
- none(RelayPeer),
- some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)))
+ msg = createMsg(Opt.some(RelayType.Hop),
+ Opt.none(StatusV1),
+ Opt.none(RelayPeer),
+ Opt.some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(HopSrcMultiaddrInvalid))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(HopSrcMultiaddrInvalid))
conn = await src.dial(srelay.peerInfo.peerId, srelay.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Hop),
- none(StatusV1),
- some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)),
- some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)))
+ msg = createMsg(Opt.some(RelayType.Hop),
+ Opt.none(StatusV1),
+ Opt.some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)),
+ Opt.some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(HopSrcMultiaddrInvalid))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(HopSrcMultiaddrInvalid))
conn = await src.dial(srelay.peerInfo.peerId, srelay.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Hop),
- none(StatusV1),
- some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
- none(RelayPeer))
+ msg = createMsg(Opt.some(RelayType.Hop),
+ Opt.none(StatusV1),
+ Opt.some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
+ Opt.none(RelayPeer))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(HopDstMultiaddrInvalid))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(HopDstMultiaddrInvalid))
conn = await src.dial(srelay.peerInfo.peerId, srelay.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Hop),
- none(StatusV1),
- some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
- some(RelayPeer(peerId: srelay.peerInfo.peerId, addrs: srelay.peerInfo.addrs)))
+ msg = createMsg(Opt.some(RelayType.Hop),
+ Opt.none(StatusV1),
+ Opt.some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
+ Opt.some(RelayPeer(peerId: srelay.peerInfo.peerId, addrs: srelay.peerInfo.addrs)))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(HopCantRelayToSelf))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(HopCantRelayToSelf))
conn = await src.dial(srelay.peerInfo.peerId, srelay.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Hop),
- none(StatusV1),
- some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
- some(RelayPeer(peerId: srelay.peerInfo.peerId, addrs: srelay.peerInfo.addrs)))
+ msg = createMsg(Opt.some(RelayType.Hop),
+ Opt.none(StatusV1),
+ Opt.some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
+ Opt.some(RelayPeer(peerId: srelay.peerInfo.peerId, addrs: srelay.peerInfo.addrs)))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(HopCantRelayToSelf))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(HopCantRelayToSelf))
conn = await src.dial(srelay.peerInfo.peerId, srelay.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Hop),
- none(StatusV1),
- some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
- some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)))
+ msg = createMsg(Opt.some(RelayType.Hop),
+ Opt.none(StatusV1),
+ Opt.some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
+ Opt.some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(HopNoConnToDst))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(HopNoConnToDst))
await srelay.connect(dst.peerInfo.peerId, dst.peerInfo.addrs)
@@ -237,7 +237,7 @@ suite "Circuit Relay":
conn = await src.dial(srelay.peerInfo.peerId, srelay.peerInfo.addrs, RelayV1Codec)
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(HopCantSpeakRelay))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(HopCantSpeakRelay))
r.maxCircuit = tmp
await conn.close()
@@ -246,7 +246,7 @@ suite "Circuit Relay":
conn = await src.dial(srelay.peerInfo.peerId, srelay.peerInfo.addrs, RelayV1Codec)
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(HopCantSpeakRelay))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(HopCantSpeakRelay))
r.maxCircuitPerPeer = tmp
await conn.close()
@@ -255,13 +255,13 @@ suite "Circuit Relay":
await srelay.connect(dst2.peerInfo.peerId, dst2.peerInfo.addrs)
conn = await src.dial(srelay.peerInfo.peerId, srelay.peerInfo.addrs, RelayV1Codec)
- msg = createMsg(some(RelayType.Hop),
- none(StatusV1),
- some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
- some(RelayPeer(peerId: dst2.peerInfo.peerId, addrs: dst2.peerInfo.addrs)))
+ msg = createMsg(Opt.some(RelayType.Hop),
+ Opt.none(StatusV1),
+ Opt.some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)),
+ Opt.some(RelayPeer(peerId: dst2.peerInfo.peerId, addrs: dst2.peerInfo.addrs)))
await conn.writeLp(msg.buffer)
rcv = RelayMessage.decode(await conn.readLp(1024))
- rcv.checkMsg(some(RelayType.Status), some(HopCantDialDst))
+ rcv.checkMsg(Opt.some(RelayType.Status), Opt.some(HopCantDialDst))
await allFutures(dst2.stop())
asyncTest "Dial Peer":
diff --git a/tests/testrelayv2.nim b/tests/testrelayv2.nim
index 41267aaa4f..6802e8d0ef 100644
--- a/tests/testrelayv2.nim
+++ b/tests/testrelayv2.nim
@@ -81,7 +81,7 @@ suite "Circuit Relay V2":
let msg = HopMessage.decode(await conn.readLp(RelayMsgSize)).get()
check:
msg.msgType == HopMessageType.Status
- msg.status == some(StatusV2.ReservationRefused)
+ msg.status == Opt.some(StatusV2.ReservationRefused)
asyncTest "Too many reservations + Reconnect":
expect(ReservationError):
diff --git a/tests/testrendezvous.nim b/tests/testrendezvous.nim
index 14adc00cf8..6c7d248519 100644
--- a/tests/testrendezvous.nim
+++ b/tests/testrendezvous.nim
@@ -14,6 +14,7 @@ import chronos
import ../libp2p/[protocols/rendezvous,
switch,
builders,]
+import ../libp2p/discovery/[rendezvousinterface, discoverymngr]
import ./helpers
proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
diff --git a/tests/testrendezvousinterface.nim b/tests/testrendezvousinterface.nim
new file mode 100644
index 0000000000..b612094ef3
--- /dev/null
+++ b/tests/testrendezvousinterface.nim
@@ -0,0 +1,73 @@
+{.used.}
+
+# Nim-Libp2p
+# Copyright (c) 2023 Status Research & Development GmbH
+# Licensed under either of
+# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
+# * MIT license ([LICENSE-MIT](LICENSE-MIT))
+# at your option.
+# This file may not be copied, modified, or distributed except according to
+# those terms.
+
+import sequtils, strutils
+import chronos
+import ../libp2p/[protocols/rendezvous,
+ switch,
+ builders,]
+import ../libp2p/discovery/[rendezvousinterface, discoverymngr]
+import ./helpers
+
+proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
+ SwitchBuilder.new()
+ .withRng(newRng())
+ .withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
+ .withTcpTransport()
+ .withMplex()
+ .withNoise()
+ .withRendezVous(rdv)
+ .build()
+
+type
+ MockRendezVous = ref object of RendezVous
+ numAdvertiseNs1: int
+ numAdvertiseNs2: int
+
+ MockErrorRendezVous = ref object of MockRendezVous
+
+method advertise*(self: MockRendezVous, namespace: string, ttl: Duration) {.async.} =
+ if namespace == "ns1":
+ self.numAdvertiseNs1 += 1
+ elif namespace == "ns2":
+ self.numAdvertiseNs2 += 1
+ # Forward the call to the actual implementation
+ await procCall RendezVous(self).advertise(namespace, ttl)
+
+method advertise*(self: MockErrorRendezVous, namespace: string, ttl: Duration) {.async.} =
+ await procCall MockRendezVous(self).advertise(namespace, ttl)
+ raise newException(CatchableError, "MockErrorRendezVous.advertise")
+
+suite "RendezVous Interface":
+ teardown:
+ checkTrackers()
+
+ proc baseTimeToAdvertiseTest(rdv: MockRendezVous) {.async.} =
+ let
+ tta = 100.milliseconds
+ ttl = 2.hours
+ client = createSwitch(rdv)
+ dm = DiscoveryManager()
+
+ await client.start()
+ dm.add(RendezVousInterface.new(rdv = rdv, tta = tta, ttl = ttl))
+ dm.advertise(RdvNamespace("ns1"))
+ dm.advertise(RdvNamespace("ns2"))
+
+ checkExpiring: rdv.numAdvertiseNs1 >= 5
+ checkExpiring: rdv.numAdvertiseNs2 >= 5
+ await client.stop()
+
+ asyncTest "Check timeToAdvertise interval":
+ await baseTimeToAdvertiseTest(MockRendezVous.new(newRng()))
+
+ asyncTest "Check timeToAdvertise interval when there is an error":
+ await baseTimeToAdvertiseTest(MockErrorRendezVous.new(newRng()))
diff --git a/tests/testtcptransport.nim b/tests/testtcptransport.nim
index 51829dbd44..edcc17c19d 100644
--- a/tests/testtcptransport.nim
+++ b/tests/testtcptransport.nim
@@ -174,6 +174,23 @@ suite "TCP transport":
proc transProvider(): Transport = TcpTransport.new(upgrade = Upgrade())
+ asyncTest "Custom timeout":
+ let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
+ let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade(), connectionsTimeout=1.milliseconds)
+ asyncSpawn transport.start(ma)
+
+ proc acceptHandler() {.async, gcsafe.} =
+ let conn = await transport.accept()
+ await conn.join()
+
+ let handlerWait = acceptHandler()
+
+ let streamTransport = await connect(transport.addrs[0])
+ await handlerWait.wait(1.seconds) # when no issues will not wait that long!
+ await streamTransport.closeWait()
+ await transport.stop()
+
+
commonTransportTest(
transProvider,
"/ip4/0.0.0.0/tcp/0")
diff --git a/tests/testtortransport.nim b/tests/testtortransport.nim
index 2136064798..5c2cfe8bd3 100644
--- a/tests/testtortransport.nim
+++ b/tests/testtortransport.nim
@@ -9,10 +9,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-when (NimMajor, NimMinor) < (1, 4):
- {.push raises: [Defect].}
-else:
- {.push raises: [].}
+{.push raises: [].}
import tables
import chronos, stew/[byteutils]
@@ -135,9 +132,6 @@ suite "Tor transport":
await serverSwitch.stop()
test "It's not possible to add another transport in TorSwitch":
- when (NimMajor, NimMinor, NimPatch) < (1, 4, 0):
- type AssertionDefect = AssertionError
-
let torSwitch = TorSwitch.new(torServer = torServer, rng= rng, flags = {ReuseAddr})
expect(AssertionDefect):
torSwitch.addTransport(TcpTransport.new(upgrade = Upgrade()))
diff --git a/tests/testutility.nim b/tests/testutility.nim
index e6a499fa39..3d655a90ae 100644
--- a/tests/testutility.nim
+++ b/tests/testutility.nim
@@ -9,7 +9,6 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
-import strformat
import ./helpers
import ../libp2p/utility
diff --git a/tests/testwstransport.nim b/tests/testwstransport.nim
index b53f273088..d56fb36ba7 100644
--- a/tests/testwstransport.nim
+++ b/tests/testwstransport.nim
@@ -86,7 +86,9 @@ suite "WebSocket transport":
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0/wss").tryGet()]
let transport1 = WsTransport.new(Upgrade(), TLSPrivateKey.init(SecureKey), TLSCertificate.init(SecureCert), {TLSFlags.NoVerifyHost})
+ const correctPattern = mapAnd(TCP, mapEq("wss"))
await transport1.start(ma)
+ check correctPattern.match(transport1.addrs[0])
proc acceptHandler() {.async, gcsafe.} =
while true:
let conn = await transport1.accept()
@@ -108,3 +110,21 @@ suite "WebSocket transport":
await handlerWait.cancelAndWait()
await transport1.stop()
+
+ asyncTest "handles tls/ws":
+ let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0/tls/ws").tryGet()]
+ let transport1 = wsSecureTranspProvider()
+ const correctPattern = mapAnd(TCP, mapEq("tls"), mapEq("ws"))
+ await transport1.start(ma)
+ check transport1.handles(transport1.addrs[0])
+ check correctPattern.match(transport1.addrs[0])
+
+ # Would raise somewhere if this wasn't handled:
+ let
+ inboundConn = transport1.accept()
+ outboundConn = await transport1.dial(transport1.addrs[0])
+ closing = outboundConn.close()
+ await (await inboundConn).close()
+ await closing
+
+ await transport1.stop()