diff --git a/.flake8 b/.flake8 index 16d88aea3..0170d024a 100644 --- a/.flake8 +++ b/.flake8 @@ -6,7 +6,6 @@ exclude = # have to skip because the file should be fixed setup.py, # lots unused import because of fixtures - indy_client/test, indy_node/test, # config file docker-files/indy_config.py diff --git a/.travis.yml b/.travis.yml index c46e56af3..4a15e4dd1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,7 +18,6 @@ before_install: install: - pip install -U -e . - - pip install -U indy-client script: - python -m indy_node.test diff --git a/Jenkinsfile b/Jenkinsfile index bcc491aff..12a538405 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -27,29 +27,6 @@ def nodeTestUbuntu = { } } -def clientTestUbuntu = { - try { - echo 'Ubuntu Test: Checkout csm' - checkout scm - - echo 'Ubuntu Test: Build docker image' - def testEnv = dockerHelpers.build(name) - - testEnv.inside('--network host') { - echo 'Ubuntu Test: Install dependencies' - testHelpers.install() - - echo 'Ubuntu Test: Test' - testHelpers.testRunner([resFile: "test-result-client.${NODE_NAME}.txt", testDir: 'indy_client']) - //testHelpers.testJUnit(resFile: "test-result-client.${NODE_NAME}.xml") - } - } - finally { - echo 'Ubuntu Test: Cleanup' - step([$class: 'WsCleanup']) - } -} - def commonTestUbuntu = { try { echo 'Ubuntu Test: Checkout csm' @@ -90,4 +67,4 @@ def buildDebUbuntu = { repoName, releaseVersion, sourcePath -> options = new TestAndPublishOptions() options.enable([StagesEnum.PACK_RELEASE_COPY, StagesEnum.PACK_RELEASE_COPY_ST]) options.setCopyWithDeps(true) -testAndPublish(name, [ubuntu: [node: nodeTestUbuntu, client: clientTestUbuntu, common: commonTestUbuntu]], true, options, [ubuntu: buildDebUbuntu]) +testAndPublish(name, [ubuntu: [node: nodeTestUbuntu, common: commonTestUbuntu]], true, options, [ubuntu: buildDebUbuntu]) diff --git a/README.md b/README.md index 5235576da..4793161b5 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,6 @@ * [How to Contribute](#how-to-contribute) * [How to Install a Test Network](#how-to-install-a-test-network) * [How to Start Working with the Code](#how-to-start-working-with-the-code) -* [How to Start Indy Client CLI](#how-to-start-indy-client-cli) * [Continuous integration and delivery](https://github.com/hyperledger/indy-node/blob/master/docs/ci-cd.md) * [How to send a PR](#how-to-send-a-pr) * [Docs and links](#docs-and-links) @@ -49,11 +48,8 @@ Indy Node repo consists of the following parts: - indy-node: - [indy-plenum](https://github.com/hyperledger/indy-plenum)-based implementation of distributed ledger - Extends plenum's base pool functionality with specific transactions support (CLAIM_DEF, SCHEMA, POOL_UPGRADE, etc.) -- indy-client - - Contains client and CLI code - - Will be deprecated soon in favor of [indy-sdk](https://github.com/hyperledger/indy-sdk), so please use indy-sdk for your own applications dealing with Indy ecosystem. - indy-common - - Common code for both indy-node and indy-client parts + - Common code for indy-node - scripts - Some scripts that can be run for installed Node (in particular, scripts to start Nodes, generate keys, prepare test Network, etc.) - doc diff --git a/build-scripts/ubuntu-1604/postinst_node b/build-scripts/ubuntu-1604/postinst_node index 077c78052..a13ddcad6 100755 --- a/build-scripts/ubuntu-1604/postinst_node +++ b/build-scripts/ubuntu-1604/postinst_node @@ -6,10 +6,6 @@ GENERAL_CONFIG_DIR="/etc/indy" GENERAL_DATA_DIR="/var/lib/indy" GENERAL_LOG_DIR="/var/log/indy" -CLI_BASE_DIR="/home/indy/.indy-cli" -CLI_NETWORKS_DIR="$CLI_BASE_DIR/networks" -CLI_WALLETS_DIR="$CLI_BASE_DIR/wallets" - INSTALL_DIR='/usr/local/lib/python3.5/dist-packages' NOFILES_SOFT_LIMIT=65536 @@ -18,11 +14,6 @@ NOFILES_HARD_LIMIT=131072 CLIENT_CONNECTIONS_LIMIT=500 -# workaround when .indy become regular file -if [ -f $CLI_BASE_DIR ]; then - rm $CLI_BASE_DIR -fi - # create general indy config folder if does not exist mkdir -p $GENERAL_CONFIG_DIR # create general indy data folder if does not exist @@ -43,13 +34,6 @@ chmod -R ug+rwx $GENERAL_CONFIG_DIR chmod -R ug+rwx $GENERAL_DATA_DIR chmod -R ug+rwx $GENERAL_LOG_DIR -# create indy cli folder if does not exist -mkdir -p $CLI_BASE_DIR -mkdir -p $CLI_NETWORKS_DIR -mkdir -p $CLI_WALLETS_DIR -chown -R indy:indy $CLI_BASE_DIR -chmod -R ug+rwx $CLI_BASE_DIR - # init_indy_node script cat < /usr/local/bin/init_indy_node diff --git a/environment/cloudformation/training/GettingStartedGuideCluster.json b/environment/cloudformation/training/GettingStartedGuideCluster.json index da3e7885d..61492c478 100644 --- a/environment/cloudformation/training/GettingStartedGuideCluster.json +++ b/environment/cloudformation/training/GettingStartedGuideCluster.json @@ -3238,12 +3238,6 @@ "IPaddress": "10.0.0.201" } }, -// "ClientCodePyPi": { -// "Ubuntu1604": { -// "stable": "indy-client", -// "master": "indy-client-dev" -// } -// }, "RegionBasedData": { "ap-northeast-1": { "Ubuntu1604": "ami-aae114cc" diff --git a/environment/openshift/scripts/agent/start.sh b/environment/openshift/scripts/agent/start.sh deleted file mode 100644 index 8c83977b1..000000000 --- a/environment/openshift/scripts/agent/start.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -SCRIPT_DIR=$(dirname $0) - -$SCRIPT_DIR/initialize.sh - -agentName="$(echo "$AGENT_NAME" | tr '[:upper:]' '[:lower:]')" - -# Examples: -# /usr/bin/env python3 /usr/local/lib/python3.5/dist-packages/indy_client/test/agent/faber.py --port 5555 > faber.log -# /usr/bin/env python3 /usr/local/lib/python3.5/dist-packages/indy_client/test/agent/acme.py --port 6666 > acme.log -# /usr/bin/env python3 /usr/local/lib/python3.5/dist-packages/indy_client/test/agent/thrift.py --port 7777 > thrift.log - -echo "Starting ${AGENT_NAME} agent node ..." -echo "/usr/bin/env python3 /usr/local/lib/python3.5/dist-packages/indy_client/test/agent/${agentName}.py --port ${AGENT_PORT} > ${agentName}.log" -echo -exec /usr/bin/env python3 /usr/local/lib/python3.5/dist-packages/indy_client/test/agent/${agentName}.py --port ${AGENT_PORT} > ${agentName}.log \ No newline at end of file diff --git a/environment/openshift/scripts/client/start.sh b/environment/openshift/scripts/client/start.sh deleted file mode 100644 index ef22940c4..000000000 --- a/environment/openshift/scripts/client/start.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -SCRIPT_DIR=$(dirname $0) - -$SCRIPT_DIR/initialize.sh - - - -echo "This client is deprecated! Please, use the new libindy-based CLI: https://github.com/hyperledger/indy-sdk/tree/master/cli" -echo "Starting indy client node ..." -echo "The indy cli will not keep the pod running, so instead we'll sleep for infinity." -echo "To use the indy cli, rsh into the pod and run the cli in the session." -echo -sleep infinity -# indy \ No newline at end of file diff --git a/environment/vagrant/sandbox/DevelopmentEnvironment/AWS/scriptlets/common/pre/linux/setup b/environment/vagrant/sandbox/DevelopmentEnvironment/AWS/scriptlets/common/pre/linux/setup index 5ddde9136..b5b1b94df 100644 --- a/environment/vagrant/sandbox/DevelopmentEnvironment/AWS/scriptlets/common/pre/linux/setup +++ b/environment/vagrant/sandbox/DevelopmentEnvironment/AWS/scriptlets/common/pre/linux/setup @@ -107,4 +107,3 @@ make install #cd /home/$USER #echo "Setup and activate a Python virtual environment..." #virtualenv -p python${PYTHON_MAJOR_VERSION}.${PYTHON_MINOR_VERSION} indy-client -#source ./indy-client/bin/activate diff --git a/indy_common/config.py b/indy_common/config.py index 89e03cefc..874c3c46b 100644 --- a/indy_common/config.py +++ b/indy_common/config.py @@ -21,9 +21,6 @@ GENERAL_CONFIG_DIR = '/etc/indy/' -CLI_BASE_DIR = '~/.indy-cli/' -CLI_NETWORK_DIR = os.path.join(CLI_BASE_DIR, 'networks') - GENERAL_CONFIG_FILE = 'indy_config.py' NETWORK_CONFIG_FILE = 'indy_config.py' USER_CONFIG_FILE = 'indy_config.py' diff --git a/indy_common/plugin_helper.py b/indy_common/plugin_helper.py deleted file mode 100644 index cdaa94fb7..000000000 --- a/indy_common/plugin_helper.py +++ /dev/null @@ -1,66 +0,0 @@ -import os - - -def writeAnonCredPlugin(plugins_path, reloadTestModules: bool=False): - if not os.path.exists(plugins_path): - os.makedirs(plugins_path) - - initFile = os.path.join(plugins_path, "__init__.py") - if not os.path.exists(initFile): - with open(initFile, "a"): - pass - - anonPluginFilePath = os.path.join(plugins_path, "anoncreds.py") - if not os.path.exists(initFile): - anonPluginContent = "" \ - "import importlib\n" \ - "\n" \ - "import anoncreds.protocol.issuer\n" \ - "import anoncreds.protocol.verifier\n" \ - "import anoncreds.protocol.prover\n" \ - "\n" \ - "import indy_client.anon_creds.issuer\n" \ - "import indy_client.anon_creds.verifier\n"\ - "import indy_client.anon_creds.prover\n" \ - "\n" \ - "Name = \"Anon creds\"\n" \ - "Version = 1.1\n" \ - "IndyVersion = 1.1\n" \ - "\n" \ - "indy_client.anon_creds.issuer.Credential = anoncreds.protocol.types.Credential\n" \ - "indy_client.anon_creds.issuer.AttribType = anoncreds.protocol.types.AttribType\n" \ - "indy_client.anon_creds.issuer.AttribDef = anoncreds.protocol.types.AttribDef\n" \ - "indy_client.anon_creds.issuer.Attribs = anoncreds.protocol.types.Attribs\n" \ - "indy_client.anon_creds.issuer.AttrRepo = anoncreds.protocol.attribute_repo.AttrRepo\n" \ - "indy_client.anon_creds.issuer.InMemoryAttrRepo = anoncreds.protocol.attribute_repo.InMemoryAttrRepo\n" \ - "indy_client.anon_creds.issuer.Issuer = anoncreds.protocol.issuer.Issuer\n" \ - "indy_client.anon_creds.prover.Prover = anoncreds.protocol.prover.Prover\n" \ - "indy_client.anon_creds.verifier.Verifier = anoncreds.protocol.verifier.Verifier\n" \ - "indy_client.anon_creds.proof_builder.ProofBuilder = anoncreds.protocol.proof_builder.ProofBuilder\n" \ - "indy_client.anon_creds.proof_builder.Proof = anoncreds.protocol.types.Proof\n" \ - "indy_client.anon_creds.cred_def.CredDef = anoncreds.protocol.credential_definition.CredentialDefinition\n" \ - - modules_to_reload = ["indy_client.cli.cli"] - test_modules_to_reload = [ - "indy_client.test.helper", "indy_client.test.cli.helper", - "indy_client.test.anon_creds.conftest", - "indy_client.test.anon_creds.test_anon_creds", - # "indy_client.test.anon_creds.anon_creds_demo" - ] - - if reloadTestModules: - modules_to_reload.extend(test_modules_to_reload) - - reload_module_code = \ - "reload_modules = " + str(modules_to_reload) + "\n" \ - "for m in reload_modules:\n" \ - " try:\n" \ - " module_obj = importlib.import_module(m)\n" \ - " importlib.reload(module_obj)\n" \ - " except AttributeError as ae:\n" \ - " print(\"Plugin loading failed: module {}, detail: {}\".format(m, str(ae)))\n" \ - "\n" - - anonPluginContent += reload_module_code - with open(anonPluginFilePath, "w") as f: - f.write(anonPluginContent) diff --git a/indy_common/test/conftest.py b/indy_common/test/conftest.py index 0824b05c1..1b608108b 100644 --- a/indy_common/test/conftest.py +++ b/indy_common/test/conftest.py @@ -18,7 +18,6 @@ from plenum.test.conftest import GENERAL_CONFIG_DIR, \ txnPoolNodesLooper, overriddenConfigValues # noqa - logger = getlogger() @@ -74,24 +73,22 @@ def general_conf_tdir_for_func(tdir_for_func): return general_config_dir -def _tconf(general_config, client_temp_dir): +def _tconf(general_config): config = getConfig(general_config_dir=general_config) for k, v in overriddenConfigValues.items(): setattr(config, k, v) config.MinSepBetweenNodeUpgrades = 5 - config.CLI_BASE_DIR = client_temp_dir - config.CLI_NETWORK_DIR = os.path.join(config.CLI_BASE_DIR, 'networks') return config @pytest.fixture(scope="module") -def tconf(general_conf_tdir, client_tdir): - return _tconf(general_conf_tdir, client_tdir) +def tconf(general_conf_tdir): + return _tconf(general_conf_tdir) @pytest.fixture() -def tconf_for_func(general_conf_tdir_for_func, client_tdir): - return _tconf(general_conf_tdir_for_func, client_tdir) +def tconf_for_func(general_conf_tdir_for_func): + return _tconf(general_conf_tdir_for_func) @pytest.fixture(scope="module") diff --git a/indy_node/test/attrib_txn/test_nym_attrib.py b/indy_node/test/attrib_txn/test_nym_attrib.py index 1b3f33748..90c70aa20 100644 --- a/indy_node/test/attrib_txn/test_nym_attrib.py +++ b/indy_node/test/attrib_txn/test_nym_attrib.py @@ -162,7 +162,7 @@ def test_user_add_attrs_for_herself_and_get_it( @pytest.mark.skip(reason="INDY-896 ATTR cannot be added without dest") -def test_attr_with_no_dest_added(nodeSet, tdirWithClientPoolTxns, looper, attributeData): +def test_attr_with_no_dest_added(nodeSet, looper, attributeData): pass # user_wallet = Wallet() # signer = DidSigner() @@ -199,8 +199,7 @@ def testGetTxnsNoSeqNo(): @pytest.mark.skip(reason="SOV-560. Come back to it later since " "requestPendingTxns move to wallet") -def testGetTxnsSeqNo(nodeSet, tdirWithClientPoolTxns, - trustAnchorWallet, looper): +def testGetTxnsSeqNo(nodeSet, trustAnchorWallet, looper): pass """ Test GET_TXNS from client and provide seqNo to fetch from diff --git a/indy_node/test/conftest.py b/indy_node/test/conftest.py index b9d30e7a4..33bde3a0e 100644 --- a/indy_node/test/conftest.py +++ b/indy_node/test/conftest.py @@ -18,10 +18,10 @@ strict_types.defaultShouldCheck = True # noinspection PyUnresolvedReferences -from plenum.test.conftest import tdir, client_tdir, nodeReg, \ +from plenum.test.conftest import tdir, nodeReg, \ whitelist, concerningLogLevels, logcapture, \ tdirWithPoolTxns, tdirWithDomainTxns, \ - tdirWithClientPoolTxns, txnPoolNodeSet, \ + txnPoolNodeSet, \ poolTxnData, dirName, poolTxnNodeNames, allPluginsPath, tdirWithNodeKeepInited, \ poolTxnStewardData, poolTxnStewardNames, getValueFromModule, \ patchPluginManager, txnPoolNodesLooper, warncheck, \ diff --git a/requirement.txt b/requirement.txt deleted file mode 100644 index 461b2063d..000000000 --- a/requirement.txt +++ /dev/null @@ -1 +0,0 @@ -indy-client \ No newline at end of file diff --git a/scripts/client_connections/README.md b/scripts/client_connections/README.md deleted file mode 100644 index 67c28134f..000000000 --- a/scripts/client_connections/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Test for checking that all simultaneous clients have a chance to connect to the pool with enabled client stack restart - -**Steps to reproduce:** - - Using scrips from environment/docker/pool start up and configure the pool so that client stack restart is disabled: - - ``$ cd environment/docker/pool`` - - ``$ ./pool_start.sh`` - - ``$ for i in {1..4} ; do docker exec -u root -it node${i} setup_iptables 970$(( i * 2 )) 100 ; done`` - - ``$ for i in {1..4} ; do docker exec -u root -it node${i} bash -c "echo -e '\nTRACK_CONNECTED_CLIENTS_NUM_ENABLED = False\nCLIENT_STACK_RESTART_ENABLED = False\nMAX_CONNECTED_CLIENTS_NUM = 100\nMIN_STACK_RESTART_TIMEOUT = 15\nMAX_STACK_RESTART_TIME_DEVIATION = 2' >> /etc/indy/indy_config.py && systemctl restart indy-node" ; done`` - - - Prepare another docker container, which contains script for creating N simultaneous client connection, in our test we use build 618 of indysdk version 1.5.0 that keeps connection infinitely: - - ``docker run -itd --privileged --name indy-cli --net=pool-network node1 bash`` - - ``docker cp ../../../scripts/client_connections/just_connect_N_times.py indy-cli:/home/indy`` - - ``docker cp node1:/var/lib/indy/sandbox/pool_transactions_genesis /tmp`` - - ``docker cp /tmp/pool_transactions_genesis indy-cli:/home/indy`` - - ``docker exec -it -u root indy-cli apt update`` - - ``docker exec -it -u root indy-cli apt install -y --allow-downgrades libindy=1.5.0~618`` - - ``docker exec -it -u root indy-cli pip install --upgrade python3-indy==1.5.0.dev618`` - - the `just_connect_N_times.py` script will try to create 150 simultaneous connections to pool. - - - Then run script, test should fail as client stack restart is disabled, press `ctrl-C` et the end to terminate parallel processes: - - ``docker exec -u root -it indy-cli python3 /home/indy/just_connect_N_times.py -g /home/indy/pool_transactions_genesis -c 150`` - - - Enable client stack restart on all nodes of the pool: - - ``for i in {1..4} ; do docker exec -u root -it node${i} bash -c "echo -e '\nTRACK_CONNECTED_CLIENTS_NUM_ENABLED = True\nCLIENT_STACK_RESTART_ENABLED = True\n' >> /etc/indy/indy_config.py && systemctl restart indy-node" ; done`` - - - Then run script again, test should pass as client stack restart is enabled and all clients have a chance to connect: - - ``docker exec -u root -it indy-cli python3 /home/indy/just_connect_N_times.py -g /home/indy/pool_transactions_genesis -c 150`` - -**Some notes** - - As default, indy-sdk has a connection timeout about 50 seconds. In that case, we expect, that limited count of client will be connected to the pool and - other not. When 50 second is left, process with client connection will return error 307 (PoolLedgerTimeout). - Each client is run in a different process. - - NOTE: for now, new indy-sdk client is marked as connected to a pool if it is connected to n-f pool nodes. In that case, max possible connected clients can be evaluated as: - - `max_connected_clients = limit * n / (n-f)`, and in this test with n=4 and limit=100, maximum number of successfully connected clients without stack restart can be between 100 and 133. - We consider the test as passed if the number of `finally` connected clients is greater than described `max_connected_clients`. \ No newline at end of file diff --git a/scripts/client_connections/just_connect_N_times.py b/scripts/client_connections/just_connect_N_times.py deleted file mode 100644 index 555ac005a..000000000 --- a/scripts/client_connections/just_connect_N_times.py +++ /dev/null @@ -1,121 +0,0 @@ -import argparse -import json -import multiprocessing -import time -import asyncio - -from multiprocessing import Process - -import os - -import functools - -import sys -from indy import pool - -count_of_connected = 0 -count_of_not_connected = 0 -client_connections_node_limit = 100 -pool_size = 4 -max_failure_tolerance = 1 - - -def run_client(genesis_path, pipe_conn, client_number): - - async def run_test(genesis_path, loop, pipe_conn): - try: - pool_cfg = json.dumps({"genesis_txn": genesis_path}) - - # TODO: remove after latest changes committed - pool_name = "pool_{}_{}".format(int(time.time()), os.getpid()) - await pool.set_protocol_version(2) - - await pool.create_pool_ledger_config(pool_name, pool_cfg) - await pool.open_pool_ledger(pool_name, None) - pipe_conn.send((0, client_number)) - time.sleep(100000) - except Exception: - pipe_conn.send((1, client_number)) - loop.call_soon(loop.stop) - return - - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - try: - loop.run_until_complete(asyncio.gather( - run_test(genesis_path, loop, pipe_conn) - )) - except Exception as e: - pipe_conn.send(e) - - -def get_max_connected_clients_without_stack_restart(limit, N, F): - return int(limit * (N / (N - F))) - - -def read_cb(pipe_conn): - global count_of_connected - global count_of_not_connected - global max_connected_clients_without_stack_restart - global arg_clients_num - res = pipe_conn.recv() - if isinstance(res, tuple): - code, cl_number = res - if code == 0: - print("Client with number {} is connected".format(cl_number)) - count_of_connected += 1 - elif code == 1: - print("Client with number {} is not connected".format(cl_number)) - count_of_not_connected += 1 - print("===============================================") - print("Count of connected clients: {}".format(count_of_connected)) - print("Count of not connected clients: {}".format(count_of_not_connected)) - print("===============================================") - if count_of_connected + count_of_not_connected == arg_clients_num: - result_str = "\n===== TEST {}: connected clients {}, not connected clients {}, max(limit, N, F) {} =====".\ - format("PASSED" if count_of_connected > max_connected_clients_without_stack_restart else "FAILED", - count_of_connected, - count_of_not_connected, - max_connected_clients_without_stack_restart) - print(result_str) - - else: - print(res) - - -async def start_all_procs(args, wr): - global client_connections_node_limit - processes = [] - for client_number in range(args.clients): - if client_number == client_connections_node_limit: - # Give a chance all clients that fit the limit to connect - time.sleep(10) - process = Process(target=run_client, args=(args.genesis_path, wr, client_number)) - processes.append(process) - process.start() - - -parser = argparse.ArgumentParser(description="Create N simultaneous connection to pool ") -parser.add_argument('-c', '--clients', default=100, type=int, required=False, dest='clients', - help='Number of client you want to create. ') -parser.add_argument('-g', '--genesis', required=True, dest='genesis_path', type=str, - help='Path to genesis txns file. ' - 'Default value is ~/.indy-cli/networks/sandbox/pool_transactions_genesis') - -args = parser.parse_args() -arg_clients_num = args.clients - -count_failed_clients = 0 -max_connected_clients_without_stack_restart = \ - get_max_connected_clients_without_stack_restart( - client_connections_node_limit, pool_size, max_failure_tolerance) - -rd, wr = multiprocessing.Pipe() -main_loop = asyncio.get_event_loop() -main_loop.add_reader(rd, functools.partial(read_cb, rd)) -print("Connecting clients...") -asyncio.run_coroutine_threadsafe(start_all_procs(args, wr), loop=main_loop) -try: - main_loop.run_forever() -except KeyboardInterrupt: - sys.exit(0) diff --git a/scripts/create_dirs.sh b/scripts/create_dirs.sh index 24b4e6568..d7567d8a2 100755 --- a/scripts/create_dirs.sh +++ b/scripts/create_dirs.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # dirs to be created -node_dirs="/etc/indy /var/lib/indy /var/log/indy /home/${USER}/.indy-cli" +node_dirs="/etc/indy /var/lib/indy /var/log/indy" # create dirs for dr in $node_dirs @@ -20,9 +20,6 @@ if [ ! -f /etc/indy/indy_config.py ]; then echo "BACKUP_DIR = '/var/lib/indy/backup'" | sudo tee -a /etc/indy/indy_config.py echo "PLUGINS_DIR = '/var/lib/indy/plugins'" | sudo tee -a /etc/indy/indy_config.py echo "NODE_INFO_DIR = '/var/lib/indy'" | sudo tee -a /etc/indy/indy_config.py - - echo "CLI_BASE_DIR = '~/.indy-cli/'" | sudo tee -a /etc/indy/indy_config.py - echo "CLI_NETWORK_DIR = '~/.indy-cli/networks'" | sudo tee -a /etc/indy/indy_config.py fi # grant permissions diff --git a/scripts/git b/scripts/git index d651dacd9..5640d6cab 100644 --- a/scripts/git +++ b/scripts/git @@ -1,7 +1,7 @@ #!/usr/bin/env bash # set -e -dirs=( "indy-common" "indy-client" "indy-node" "plenum" "anoncreds" "ledger" ) +dirs=( "indy-common" "indy-node" "plenum" "anoncreds" "ledger" ) # quiet pushd pushd() { diff --git a/scripts/read_ledger b/scripts/read_ledger index a818b3314..ea9267692 100755 --- a/scripts/read_ledger +++ b/scripts/read_ledger @@ -46,7 +46,6 @@ def read_args(): parser.add_argument('--count', required=False, action='store_true', help="returns the number of txns in the given ledger") parser.add_argument('--node_name', required=False, help="Node's name") - parser.add_argument('--client_name', required=False, help="Client's name") parser.add_argument('--serializer', required=False, default='json', help="How to represent the data (json by default)") parser.add_argument('--network', required=False, type=str, @@ -55,20 +54,10 @@ def read_args(): return parser.parse_args() -def get_ledger_dir(node_name, client_name, network): - if node_name and client_name: - print("Either 'node_name' or 'client_name' can be specified") - exit() - +def get_ledger_dir(node_name, network): config = getConfig() _network = network if network else config.NETWORK_NAME ledger_base_dir = config.LEDGER_DIR - if client_name: - # Build path to data if --client_name was specified - ledger_data_dir = os.path.join(config.CLI_BASE_DIR, _network, - config.clientDataDir, client_name) - return ledger_data_dir - if node_name: # Build path to data if --node_name was specified ledger_data_dir = os.path.join(ledger_base_dir, _network, _DATA, node_name) @@ -178,7 +167,7 @@ if __name__ == '__main__': args = read_args() config = getConfig() - ledger_data_dir = get_ledger_dir(args.node_name, args.client_name, args.network) + ledger_data_dir = get_ledger_dir(args.node_name, args.network) read_copy_ledger_data_dir = None try: # RocksDB supports real read-only mode and does not need to have a ledger copy.