From bae657d4d47593ebde4f951085f326aabab264be Mon Sep 17 00:00:00 2001 From: Philipp Schlarb Date: Thu, 19 Aug 2021 13:16:11 +0200 Subject: [PATCH 01/31] updated dev setup for ubuntu 16 and setup-dev.md Signed-off-by: Philipp Schlarb --- dev-setup/ubuntu/init-dev-project.sh | 3 ++- dev-setup/ubuntu/setup-dev-python.sh | 5 ++--- docs/source/setup-dev.md | 18 ++++++++++++++---- 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/dev-setup/ubuntu/init-dev-project.sh b/dev-setup/ubuntu/init-dev-project.sh index 014c030a4..ec788d04e 100755 --- a/dev-setup/ubuntu/init-dev-project.sh +++ b/dev-setup/ubuntu/init-dev-project.sh @@ -21,7 +21,8 @@ echo "Created virtual environment" echo "Installing indy-node..." pushd indy-node -pip install -e .[tests] +pip install pyzmq +pip install --use-deprecated legacy-resolver -e .[tests] popd echo "Installed indy-node..." diff --git a/dev-setup/ubuntu/setup-dev-python.sh b/dev-setup/ubuntu/setup-dev-python.sh index b849b731f..a146edb5f 100755 --- a/dev-setup/ubuntu/setup-dev-python.sh +++ b/dev-setup/ubuntu/setup-dev-python.sh @@ -4,12 +4,11 @@ set -e echo 'Installing python 3.5 and pip...' sudo apt-get update sudo apt-get install -y software-properties-common python-software-properties -sudo add-apt-repository ppa:deadsnakes/ppa -sudo apt-get update -sudo apt-get install -y python3.5 python3-pip python3.5-dev +sudo apt-get install -y python3.5 python3-pip python3.5-dev virtualenvwrapper echo 'Installed python 3.5 and pip...' echo 'Installing virtualenvwrapper' +sudo -H pip3 install pbr importlib-metadata==1.7.0 sudo -H pip3 install virtualenvwrapper echo '' >> ~/.bashrc echo '# Python virtual environment wrapper' >> ~/.bashrc diff --git a/docs/source/setup-dev.md b/docs/source/setup-dev.md index b90d188c7..f1badde05 100644 --- a/docs/source/setup-dev.md +++ b/docs/source/setup-dev.md @@ -23,7 +23,7 @@ You can also have a look at the scripts mentioned below to follow them and perfo 1. Go to the destination folder for the project 1. Run `init-dev-project.sh ` to clone indy-plenum and indy-node projects and create a virtualenv to work in -1. Activate new virtualenv `workon ` +1. Activate new virtualenv `workon ` if not activated 1. [Optionally] Install Pycharm 1. [Optionally] Open and configure projects in Pycharm: - Open both indy-plenum and indy-node in one window @@ -57,11 +57,10 @@ and virtual environment on Ubuntu, or follow the detailed instructions below. ##### Ubuntu -1. Run ```sudo add-apt-repository ppa:deadsnakes/ppa``` -2. Run ```sudo apt-get update``` +1. Run ```sudo apt-get update``` -3. On Ubuntu 14, run ```sudo apt-get install python3.5``` (python3.5 is pre-installed on most Ubuntu 16 systems; if not, do it there as well.) +2. On Ubuntu 14, run ```sudo apt-get install python3.5``` (python3.5 is pre-installed on most Ubuntu 16 systems; if not, do it there as well.) ##### CentOS/Redhat @@ -172,6 +171,7 @@ source /bin/activate Optionally, you can install virtual environment wrapper as follows: ``` +apt install virtualenvwrapper pip3 install virtualenvwrapper echo '' >> ~/.bashrc echo '# Python virtual environment wrapper' >> ~/.bashrc @@ -195,6 +195,13 @@ Navigate to the root directory of the source (for each project) and install requ ``` pip install -e .[tests] ``` +If pip is complaining about not finding a distribution for indy and missmatching metadata, try ```pip install --use-deprecated legacy-resolver -e .[tests]```. + +``` +https://files.pythonhosted.org/packages/61/8c/91a004be0934f4cb3dd9a6a0cc91d46046e67ddb678e440562b26040230e/python3-indy-1.15.0-dev-1618.tar.gz#sha256=eb61e5aa1f64c299dadd74bcf7f30e2bcde6df18aa51491f5a8d4530d64b4b53 (from indy-node==1.13.0.dev0) has different version in metadata: '1.15.0' +ERROR: Could not find a version that satisfies the requirement python3-indy==1.15.0-dev-1618 (from indy-node[tests]) +ERROR: No matching distribution found for python3-indy==1.15.0-dev-1618 +``` If you are working with both indy-plenum and indy-node, then please make sure that both projects are installed with -e option, and not from pypi (have a look at the sequence at `init-dev-project.sh`). @@ -203,3 +210,6 @@ and run tests ``` pytest . ``` + +The test may file with a too many open files error. +If that happens check and set your limits with```ulimit```. From 953cace936c47a7775e9b72ae41cab8ac82fe12f Mon Sep 17 00:00:00 2001 From: udosson Date: Mon, 13 Sep 2021 15:14:23 +0200 Subject: [PATCH 02/31] fix uploading of deb files with the same name but different distribution Signed-off-by: udosson --- .github/actions/publish-deb/publishPackages | 2 +- .github/actions/publish-deb/upload-spec.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/publish-deb/publishPackages b/.github/actions/publish-deb/publishPackages index 449d7e31a..76d7fe868 100755 --- a/.github/actions/publish-deb/publishPackages +++ b/.github/actions/publish-deb/publishPackages @@ -26,7 +26,7 @@ for name in ${fileList}; do jfrog rt u \ --deb ${distribution}/${component}/${architecture} \ --spec ${uploadSpec}\ - --spec-vars "SOURCE_DIR=${sourceDirectory};PACKAGE_NAME=${name};COMPONENT=${component};PACKAGE_STARTING_LETTER=${startingLetter};PACKAGE_SHORT_NAME=${shortName}" \ + --spec-vars "SOURCE_DIR=${sourceDirectory};PACKAGE_NAME=${name};COMPONENT=${component};PACKAGE_STARTING_LETTER=${startingLetter};PACKAGE_SHORT_NAME=${shortName};DISTRIBUTION=${distribution}" \ --detailed-summary echo "=====================================================================================================================" echo diff --git a/.github/actions/publish-deb/upload-spec.json b/.github/actions/publish-deb/upload-spec.json index e5db11c58..d98c56a72 100644 --- a/.github/actions/publish-deb/upload-spec.json +++ b/.github/actions/publish-deb/upload-spec.json @@ -2,7 +2,7 @@ "files": [ { "pattern": "${SOURCE_DIR}/${PACKAGE_NAME}", - "target": "indy/pool/${COMPONENT}/${PACKAGE_STARTING_LETTER}/${PACKAGE_SHORT_NAME}/" + "target": "indy/pool/${DISTRIBUTION}/${COMPONENT}/${PACKAGE_STARTING_LETTER}/${PACKAGE_SHORT_NAME}/" } ] } \ No newline at end of file From 75c13515dcf119f0f3376192a2e3a9de0b7d1912 Mon Sep 17 00:00:00 2001 From: Robin Klemens <39259938+udosson@users.noreply.github.com> Date: Mon, 13 Sep 2021 18:23:43 +0200 Subject: [PATCH 03/31] Update version of Indy SKD Update version of Indy SDK to the same version used in Indy-Plenum which is at python3-indy==1.15.0-dev-1625 Signed-off-by: udosson --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 76583685d..253c32c45 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ BASE_DIR = os.path.join(os.path.expanduser("~"), ".indy") tests_require = ['attrs==19.1.0', 'pytest==3.3.1', 'pytest-xdist==1.22.1', 'pytest-forked==0.2', - 'python3-indy==1.15.0-dev-1618', 'pytest-asyncio==0.8.0'] + 'python3-indy==1.15.0-dev-1625', 'pytest-asyncio==0.8.0'] setup( name=metadata['__title__'], From 2a19753733c6c082960d00d174fc017e437a6587 Mon Sep 17 00:00:00 2001 From: udosson Date: Thu, 14 Oct 2021 17:26:16 +0200 Subject: [PATCH 04/31] added tag to pull of lint image Signed-off-by: udosson --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 466667309..d620457cd 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -259,7 +259,7 @@ jobs: needs: [workflow-setup, lint-image] runs-on: ubuntu-20.04 container: - image: ghcr.io/${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }}/node-lint + image: ghcr.io/${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }}/node-lint:ubuntu-18-04 steps: - name: Check out code uses: actions/checkout@v2 From 0aff581eefc56036b7d2cfd645bb12e56a27ab85 Mon Sep 17 00:00:00 2001 From: pSchlarb Date: Thu, 28 Oct 2021 10:27:02 +0200 Subject: [PATCH 05/31] LF Endings Signed-off-by: pSchlarb --- ...3-01-identity-owner-anyone-can-write.batch | 264 ++++++++--------- .../indy-cli-batches/AS-03-01-steward.batch | 272 +++++++++--------- 2 files changed, 268 insertions(+), 268 deletions(-) diff --git a/acceptance/indy-cli-batches/AS-03-01-identity-owner-anyone-can-write.batch b/acceptance/indy-cli-batches/AS-03-01-identity-owner-anyone-can-write.batch index 03a429316..fbb211095 100644 --- a/acceptance/indy-cli-batches/AS-03-01-identity-owner-anyone-can-write.batch +++ b/acceptance/indy-cli-batches/AS-03-01-identity-owner-anyone-can-write.batch @@ -1,133 +1,133 @@ -# setup environment -- pool create AS-0301-owner gen_txn_file=./pool_transactions_genesis -pool connect AS-0301-owner -- wallet create AS-03-wallet-owner key=testkey storage_config={"pool":"AS-0301-owner"} -wallet open AS-03-wallet-owner key=testkey -################# -# setup section # -################# -did new seed=000000000000000000000000Trustee1 metadata="Default Trustee" -did new seed=TestTrustee100000000000000000003 metadata="test trustee 1" -did new seed=TestTrustee200000000000000000003 metadata="test trustee 2" -did new seed=TestSteward100000000000000000003 metadata="test steward 1" -# did new seed=TestSteward300000000000000000003 metadata="test steward 3" -did new seed=TestSteward200000000000000000003 metadata="test steward 2" -did new seed=TestEndorser10000000000000000003 metadata="test endorser 1" -did new seed=TestEndorser20000000000000000003 metadata="test endorser 2" -did new seed=TestEndorser30000000000000000003 metadata="test endorser 3" -did new seed=RandomUser1000000000000000000003 metadata="test identity 1" -did new seed=RandomUser2000000000000000000003 metadata="test identity 2" -did new seed=RandomUser3000000000000000000003 metadata="test identity 3" -did new seed=NetworkMonitor000000000000000001 metadata="network monitor 1" -did new seed=NetworkMonitor000000000000000002 metadata="network monitor 2" -did new seed=NetworkMonitor000000000000000003 metadata="network monitor 3" -# AS Trustee CREATE Steward (steward1) -did use V4SGRU86Z58d6TV7PBUe6f -ledger nym did=AhqUV2zHYdNaWLFCCe7xCn role=STEWARD verkey=~YUY2ChUGWJovtU6XTn61D8 -- ledger get-nym did=AhqUV2zHYdNaWLFCCe7xCn -# AS Trustee CREATE NetworkMonitor (network monitor 1) -did use V4SGRU86Z58d6TV7PBUe6f -ledger nym did=GSoagH52cX69hnn7npUTWy role=NETWORK_MONITOR verkey=~5JEp1pUJbzD1YqimvA6hk6 -- ledger get-nym did=GSoagH52cX69hnn7npUTWy -# AS Trustee CREATE NetworkMonitor (network monitor 2) and blacklist it -did use V4SGRU86Z58d6TV7PBUe6f -ledger nym did=CjPA39BhAVG5d3mV2ZEAYE role=NETWORK_MONITOR verkey=~MnEdozF43zQFRdZLN25mya -ledger nym did=CjPA39BhAVG5d3mV2ZEAYE role= -- ledger get-nym did=CjPA39BhAVG5d3mV2ZEAYE -# AS Trustee CREATE Endorser (endorser1) -did use V4SGRU86Z58d6TV7PBUe6f -ledger nym did=DriVwCMbtEgkmoEHKin6Ah role=ENDORSER verkey=~YPZot1kM4DLwvsX6mtcKd9 -- ledger get-nym did=DriVwCMbtEgkmoEHKin6Ah -# AS Trustee CREATE IdentityOwner (user1) -did use V4SGRU86Z58d6TV7PBUe6f -ledger nym did=LBbKEeczA9iL21p4Kgxcuf verkey=~3ZvjdaYs4cdFYXAwNGR85p -- ledger get-nym did=LBbKEeczA9iL21p4Kgxcuf -################ -# test section # -################ -### Actions with NYMs ### -# #1.1 AS IdentityOwner REMOVE Trustee (FAIL) -did use LBbKEeczA9iL21p4Kgxcuf -- ledger nym did=V4SGRU86Z58d6TV7PBUe6f role= -# #1.2 CHECK Trustee IS VALID -did use V4SGRU86Z58d6TV7PBUe6f -ledger nym did=CYdQe2tmSwhv2XdicegoAn role=TRUSTEE verkey=~E7MjQHm14YnmZD9RErTBsi -- ledger get-nym did=CYdQe2tmSwhv2XdicegoAn -# #2 AS IdentityOwner CREATE Trustee (trustee1) (FAIL) -did use LBbKEeczA9iL21p4Kgxcuf -- ledger nym did=FiAsNdcWnpB2L22ZUGyKHa role=TRUSTEE verkey=~BZHjLX8NYwJXPTE746hn6Y -- ledger get-nym did=FiAsNdcWnpB2L22ZUGyKHa -# #3.1 AS IdentityOwner REMOVE Steward (steward1) (FAIL) -did use LBbKEeczA9iL21p4Kgxcuf -- ledger nym did=AhqUV2zHYdNaWLFCCe7xCn role= -# #3.2 CHECK Steward IS VALID -did use AhqUV2zHYdNaWLFCCe7xCn -ledger nym did=4xuWDwsQSqzQmYSheSWFyg role=ENDORSER verkey=~BmnEpJMi6kJHUcxcVJa2R4 -- ledger get-nym did=4xuWDwsQSqzQmYSheSWFyg -# #4 AS IdentityOwner CREATE Steward (steward2) (FAIL) -did use LBbKEeczA9iL21p4Kgxcuf -- ledger nym did=CbPwHxKEibPhV4pgXWpu26 role=STEWARD verkey=~MviYa49QADQXAM68WSiLPD -- ledger get-nym did=CbPwHxKEibPhV4pgXWpu26 -# #5.1 AS IdentityOwner REMOVE Endorser (FAIL) -did use LBbKEeczA9iL21p4Kgxcuf -- ledger nym did=DriVwCMbtEgkmoEHKin6Ah role= -# #5.2 CHECK Endorser IS VALID -did use DriVwCMbtEgkmoEHKin6Ah -ledger nym did=Q8uxmCGdXgLHHtaTwYtVJG verkey=~UpRqa9gQ1jsbUBvKYPtWSo -- ledger get-nym did=Q8uxmCGdXgLHHtaTwYtVJG -# #6 AS IdentityOwner CREATE Endorser (FAIL) -did use LBbKEeczA9iL21p4Kgxcuf -- ledger nym did=2b6xTx2HniDU77nxHm6zWB role=ENDORSER verkey=~HF34ymwfTJngb8zFDmCyvX -- ledger get-nym did=2b6xTx2HniDU77nxHm6zWB -# #7.1 AS IdentityOwner CREATE IdentityOwner (PASS with ANYONE_CAN_WRITE=True) -did use LBbKEeczA9iL21p4Kgxcuf -ledger nym did=Fk9ENxnz1ztDgdivQBJTCZ verkey=~NT9ANFeDhCLKDg5PNKZXKK -- ledger get-nym did=Fk9ENxnz1ztDgdivQBJTCZ -# #8 AS IdentityOwner CREATE NetworkMonitor (FAIL) -did use LBbKEeczA9iL21p4Kgxcuf -- ledger nym did=Nd4dUF85aa1JmDFScLfk7o role=NETWORK_MONITOR verkey=~5aJLqtphR3tqymWUwg98Sz -# #9.1 AS IdentityOwner REMOVE NetworkMonitor (FAIL) -did use LBbKEeczA9iL21p4Kgxcuf -- ledger nym did=GSoagH52cX69hnn7npUTWy role= -# #9.2 CHECK NetworkMonitor IS VALID -did use GSoagH52cX69hnn7npUTWy -ledger get-validator-info nodes=Node1 -# #10 check that can't add roles back when don't have necessary permissions -did use LBbKEeczA9iL21p4Kgxcuf -- ledger nym did=Q8uxmCGdXgLHHtaTwYtVJG role=ENDORSER -- ledger nym did=Q8uxmCGdXgLHHtaTwYtVJG role=STEWARD -- ledger nym did=Q8uxmCGdXgLHHtaTwYtVJG role=TRUSTEE -- ledger nym did=CjPA39BhAVG5d3mV2ZEAYE role=NETWORK_MONITOR -### Additional cases ### -did use V4SGRU86Z58d6TV7PBUe6f -did new seed=RandomAttribOwner000000000000003 -ledger nym did=UffJCJngTXc1o84dQ7aEUb verkey=~J4HtnGz2wW2nE7VuMeg39M -did new seed=RandomUserForRotateKey0000000003 -ledger nym did=X3zYajU7gbz9Pu8k6E7Ppf verkey=~3ZDo6g4ZDRKGauKrR452xU -# Only owner can edit existing NYMs -did use X3zYajU7gbz9Pu8k6E7Ppf -did rotate-key seed=RandomUserForRotateKey0NewKey003 -# Not owner can't edit existing NYMs (FAIL) -did use UffJCJngTXc1o84dQ7aEUb -- ledger nym did=X3zYajU7gbz9Pu8k6E7Ppf verkey=~3ZDo6g4ZDRKGauKrR452xU -# Only owners can create ATTRIBs -did use X3zYajU7gbz9Pu8k6E7Ppf -ledger attrib did=X3zYajU7gbz9Pu8k6E7Ppf raw={"endpoint":{"ha":"127.0.0.1:5555"}} -# Only owners can edit ATTRIBs -ledger attrib did=X3zYajU7gbz9Pu8k6E7Ppf raw={"endpoint":{"ha":"127.0.0.1:6666"}} -# Not owner can't create attrib (FAIL) -did use X3zYajU7gbz9Pu8k6E7Ppf -- ledger attrib did=UffJCJngTXc1o84dQ7aEUb raw={"endpoint":{"ha":"127.0.0.1:5555"}} -# Not owner can't edit attrib (FAIL) -did use UffJCJngTXc1o84dQ7aEUb -- ledger attrib did=X3zYajU7gbz9Pu8k6E7Ppf raw={"endpoint":{"ha":"127.0.0.1:5555"}} -# Identity Owner can create SCHEMA: (PASS with ANYONE_CAN_WRITE=True) -ledger schema name=IdentityOwnerSchema version=1.0 attr_names=name,age -# Identity Owner can create CLAIM_DEF: (PASS with ANYONE_CAN_WRITE=True, but will fail because of wrong schema id) -- ledger cred-def schema_id=1 signature_type=CL primary={"n":"1","s":"2","rms":"3","r":{"age":"4","name":"5"},"rctxt":"6","z":"7"} tag=3 -# Identity Owner can't RESTART POOL -did use LBbKEeczA9iL21p4Kgxcuf -- ledger pool-restart action=start datetime=2020-01-25T12:49:05.258870+00:00 nodes=Node4 -# Identity Owner can't get validator-info -did use LBbKEeczA9iL21p4Kgxcuf +# setup environment +- pool create AS-0301-owner gen_txn_file=./pool_transactions_genesis +pool connect AS-0301-owner +- wallet create AS-03-wallet-owner key=testkey storage_config={"pool":"AS-0301-owner"} +wallet open AS-03-wallet-owner key=testkey +################# +# setup section # +################# +did new seed=000000000000000000000000Trustee1 metadata="Default Trustee" +did new seed=TestTrustee100000000000000000003 metadata="test trustee 1" +did new seed=TestTrustee200000000000000000003 metadata="test trustee 2" +did new seed=TestSteward100000000000000000003 metadata="test steward 1" +# did new seed=TestSteward300000000000000000003 metadata="test steward 3" +did new seed=TestSteward200000000000000000003 metadata="test steward 2" +did new seed=TestEndorser10000000000000000003 metadata="test endorser 1" +did new seed=TestEndorser20000000000000000003 metadata="test endorser 2" +did new seed=TestEndorser30000000000000000003 metadata="test endorser 3" +did new seed=RandomUser1000000000000000000003 metadata="test identity 1" +did new seed=RandomUser2000000000000000000003 metadata="test identity 2" +did new seed=RandomUser3000000000000000000003 metadata="test identity 3" +did new seed=NetworkMonitor000000000000000001 metadata="network monitor 1" +did new seed=NetworkMonitor000000000000000002 metadata="network monitor 2" +did new seed=NetworkMonitor000000000000000003 metadata="network monitor 3" +# AS Trustee CREATE Steward (steward1) +did use V4SGRU86Z58d6TV7PBUe6f +ledger nym did=AhqUV2zHYdNaWLFCCe7xCn role=STEWARD verkey=~YUY2ChUGWJovtU6XTn61D8 +- ledger get-nym did=AhqUV2zHYdNaWLFCCe7xCn +# AS Trustee CREATE NetworkMonitor (network monitor 1) +did use V4SGRU86Z58d6TV7PBUe6f +ledger nym did=GSoagH52cX69hnn7npUTWy role=NETWORK_MONITOR verkey=~5JEp1pUJbzD1YqimvA6hk6 +- ledger get-nym did=GSoagH52cX69hnn7npUTWy +# AS Trustee CREATE NetworkMonitor (network monitor 2) and blacklist it +did use V4SGRU86Z58d6TV7PBUe6f +ledger nym did=CjPA39BhAVG5d3mV2ZEAYE role=NETWORK_MONITOR verkey=~MnEdozF43zQFRdZLN25mya +ledger nym did=CjPA39BhAVG5d3mV2ZEAYE role= +- ledger get-nym did=CjPA39BhAVG5d3mV2ZEAYE +# AS Trustee CREATE Endorser (endorser1) +did use V4SGRU86Z58d6TV7PBUe6f +ledger nym did=DriVwCMbtEgkmoEHKin6Ah role=ENDORSER verkey=~YPZot1kM4DLwvsX6mtcKd9 +- ledger get-nym did=DriVwCMbtEgkmoEHKin6Ah +# AS Trustee CREATE IdentityOwner (user1) +did use V4SGRU86Z58d6TV7PBUe6f +ledger nym did=LBbKEeczA9iL21p4Kgxcuf verkey=~3ZvjdaYs4cdFYXAwNGR85p +- ledger get-nym did=LBbKEeczA9iL21p4Kgxcuf +################ +# test section # +################ +### Actions with NYMs ### +# #1.1 AS IdentityOwner REMOVE Trustee (FAIL) +did use LBbKEeczA9iL21p4Kgxcuf +- ledger nym did=V4SGRU86Z58d6TV7PBUe6f role= +# #1.2 CHECK Trustee IS VALID +did use V4SGRU86Z58d6TV7PBUe6f +ledger nym did=CYdQe2tmSwhv2XdicegoAn role=TRUSTEE verkey=~E7MjQHm14YnmZD9RErTBsi +- ledger get-nym did=CYdQe2tmSwhv2XdicegoAn +# #2 AS IdentityOwner CREATE Trustee (trustee1) (FAIL) +did use LBbKEeczA9iL21p4Kgxcuf +- ledger nym did=FiAsNdcWnpB2L22ZUGyKHa role=TRUSTEE verkey=~BZHjLX8NYwJXPTE746hn6Y +- ledger get-nym did=FiAsNdcWnpB2L22ZUGyKHa +# #3.1 AS IdentityOwner REMOVE Steward (steward1) (FAIL) +did use LBbKEeczA9iL21p4Kgxcuf +- ledger nym did=AhqUV2zHYdNaWLFCCe7xCn role= +# #3.2 CHECK Steward IS VALID +did use AhqUV2zHYdNaWLFCCe7xCn +ledger nym did=4xuWDwsQSqzQmYSheSWFyg role=ENDORSER verkey=~BmnEpJMi6kJHUcxcVJa2R4 +- ledger get-nym did=4xuWDwsQSqzQmYSheSWFyg +# #4 AS IdentityOwner CREATE Steward (steward2) (FAIL) +did use LBbKEeczA9iL21p4Kgxcuf +- ledger nym did=CbPwHxKEibPhV4pgXWpu26 role=STEWARD verkey=~MviYa49QADQXAM68WSiLPD +- ledger get-nym did=CbPwHxKEibPhV4pgXWpu26 +# #5.1 AS IdentityOwner REMOVE Endorser (FAIL) +did use LBbKEeczA9iL21p4Kgxcuf +- ledger nym did=DriVwCMbtEgkmoEHKin6Ah role= +# #5.2 CHECK Endorser IS VALID +did use DriVwCMbtEgkmoEHKin6Ah +ledger nym did=Q8uxmCGdXgLHHtaTwYtVJG verkey=~UpRqa9gQ1jsbUBvKYPtWSo +- ledger get-nym did=Q8uxmCGdXgLHHtaTwYtVJG +# #6 AS IdentityOwner CREATE Endorser (FAIL) +did use LBbKEeczA9iL21p4Kgxcuf +- ledger nym did=2b6xTx2HniDU77nxHm6zWB role=ENDORSER verkey=~HF34ymwfTJngb8zFDmCyvX +- ledger get-nym did=2b6xTx2HniDU77nxHm6zWB +# #7.1 AS IdentityOwner CREATE IdentityOwner (PASS with ANYONE_CAN_WRITE=True) +did use LBbKEeczA9iL21p4Kgxcuf +ledger nym did=Fk9ENxnz1ztDgdivQBJTCZ verkey=~NT9ANFeDhCLKDg5PNKZXKK +- ledger get-nym did=Fk9ENxnz1ztDgdivQBJTCZ +# #8 AS IdentityOwner CREATE NetworkMonitor (FAIL) +did use LBbKEeczA9iL21p4Kgxcuf +- ledger nym did=Nd4dUF85aa1JmDFScLfk7o role=NETWORK_MONITOR verkey=~5aJLqtphR3tqymWUwg98Sz +# #9.1 AS IdentityOwner REMOVE NetworkMonitor (FAIL) +did use LBbKEeczA9iL21p4Kgxcuf +- ledger nym did=GSoagH52cX69hnn7npUTWy role= +# #9.2 CHECK NetworkMonitor IS VALID +did use GSoagH52cX69hnn7npUTWy +ledger get-validator-info nodes=Node1 +# #10 check that can't add roles back when don't have necessary permissions +did use LBbKEeczA9iL21p4Kgxcuf +- ledger nym did=Q8uxmCGdXgLHHtaTwYtVJG role=ENDORSER +- ledger nym did=Q8uxmCGdXgLHHtaTwYtVJG role=STEWARD +- ledger nym did=Q8uxmCGdXgLHHtaTwYtVJG role=TRUSTEE +- ledger nym did=CjPA39BhAVG5d3mV2ZEAYE role=NETWORK_MONITOR +### Additional cases ### +did use V4SGRU86Z58d6TV7PBUe6f +did new seed=RandomAttribOwner000000000000003 +ledger nym did=UffJCJngTXc1o84dQ7aEUb verkey=~J4HtnGz2wW2nE7VuMeg39M +did new seed=RandomUserForRotateKey0000000003 +ledger nym did=X3zYajU7gbz9Pu8k6E7Ppf verkey=~3ZDo6g4ZDRKGauKrR452xU +# Only owner can edit existing NYMs +did use X3zYajU7gbz9Pu8k6E7Ppf +did rotate-key seed=RandomUserForRotateKey0NewKey003 +# Not owner can't edit existing NYMs (FAIL) +did use UffJCJngTXc1o84dQ7aEUb +- ledger nym did=X3zYajU7gbz9Pu8k6E7Ppf verkey=~3ZDo6g4ZDRKGauKrR452xU +# Only owners can create ATTRIBs +did use X3zYajU7gbz9Pu8k6E7Ppf +ledger attrib did=X3zYajU7gbz9Pu8k6E7Ppf raw={"endpoint":{"ha":"127.0.0.1:5555"}} +# Only owners can edit ATTRIBs +ledger attrib did=X3zYajU7gbz9Pu8k6E7Ppf raw={"endpoint":{"ha":"127.0.0.1:6666"}} +# Not owner can't create attrib (FAIL) +did use X3zYajU7gbz9Pu8k6E7Ppf +- ledger attrib did=UffJCJngTXc1o84dQ7aEUb raw={"endpoint":{"ha":"127.0.0.1:5555"}} +# Not owner can't edit attrib (FAIL) +did use UffJCJngTXc1o84dQ7aEUb +- ledger attrib did=X3zYajU7gbz9Pu8k6E7Ppf raw={"endpoint":{"ha":"127.0.0.1:5555"}} +# Identity Owner can create SCHEMA: (PASS with ANYONE_CAN_WRITE=True) +ledger schema name=IdentityOwnerSchema version=1.0 attr_names=name,age +# Identity Owner can create CLAIM_DEF: (PASS with ANYONE_CAN_WRITE=True, but will fail because of wrong schema id) +- ledger cred-def schema_id=1 signature_type=CL primary={"n":"1","s":"2","rms":"3","r":{"age":"4","name":"5"},"rctxt":"6","z":"7"} tag=3 +# Identity Owner can't RESTART POOL +did use LBbKEeczA9iL21p4Kgxcuf +- ledger pool-restart action=start datetime=2020-01-25T12:49:05.258870+00:00 nodes=Node4 +# Identity Owner can't get validator-info +did use LBbKEeczA9iL21p4Kgxcuf - ledger get-validator-info nodes=Node1 \ No newline at end of file diff --git a/acceptance/indy-cli-batches/AS-03-01-steward.batch b/acceptance/indy-cli-batches/AS-03-01-steward.batch index 1ed6ed1be..7ec4713c8 100644 --- a/acceptance/indy-cli-batches/AS-03-01-steward.batch +++ b/acceptance/indy-cli-batches/AS-03-01-steward.batch @@ -1,136 +1,136 @@ -# setup environment -- pool create AS-0301-steward gen_txn_file=./pool_transactions_genesis -pool connect AS-0301-steward -- wallet create AS-03-wallet-steward key=testkey storage_config={"pool":"AS-0301-steward"} -wallet open AS-03-wallet-steward key=testkey -################# -# setup section # -################# -did new seed=TestTrustee100000000000000000001 metadata="test trustee 1" -did new seed=TestSteward100000000000000000001 metadata="test steward 1" -did new seed=TestSteward200000000000000000001 metadata="test steward 2" -did new seed=TestSteward300000000000000000001 metadata="test steward 3" -did new seed=TestEndorser00000000000000000001 metadata="test endorser 0" -did new seed=TestEndorser10000000000000000001 metadata="test endorser 1" -did new seed=RandomUser1000000000000000000001 metadata="test identity 1" -did new seed=RandomUser2000000000000000000001 metadata="test identity 2" -# did new seed=RandomUser3000000000000000000001 metadata="test identity 3" -did new seed=RandomUser4000000000000000000001 metadata="test identity 4" -did new seed=RandomUser5000000000000000000001 metadata="test identity 5" -did new seed=000000000000000000000000Trustee1 metadata="default trustee" -did new seed=NetworkMonitor100000000000000001 metadata="network monitor 1" -did new seed=NetworkMonitor200000000000000001 metadata="network monitor 2" -did new seed=NetworkMonitor300000000000000001 metadata="network monitor 3" -did use V4SGRU86Z58d6TV7PBUe6f -# AS Trustee CREATE Steward (steward1) -ledger nym did=7qFmEyYCXcmUFVied5Sp3b role=STEWARD verkey=~Mj3PFUSi6qmrTRonFXHx9n -- ledger get-nym did=7qFmEyYCXcmUFVied5Sp3b -# AS Trustee CREATE Steward (steward3) -ledger nym did=CEJJcfjNGt7YcNLyXaszaq role=STEWARD verkey=~AAGGsLT3yQtTmNdKrHXtC2 -- ledger get-nym did=CEJJcfjNGt7YcNLyXaszaq -################ -# test section # -################ -# #1 AS Steward (steward1) CREATE Trustee (trustee1) (FAIL) -did use 7qFmEyYCXcmUFVied5Sp3b -- ledger nym did=81CCbJwqSyGNLLEtVo1kMq role=TRUSTEE verkey=~UimNgHv7X45jxnh65faK3h -- ledger get-nym did=81CCbJwqSyGNLLEtVo1kMq -# #2.1 AS Steward (steward1) REMOVE Trustee (FAIL) -did use 7qFmEyYCXcmUFVied5Sp3b -- ledger nym did=V4SGRU86Z58d6TV7PBUe6f role= -# #2.2 CHECK Trustee IS VALID -did use V4SGRU86Z58d6TV7PBUe6f -ledger nym did=81CCbJwqSyGNLLEtVo1kMq role=TRUSTEE verkey=~UimNgHv7X45jxnh65faK3h -- ledger get-nym did=81CCbJwqSyGNLLEtVo1kMq -# #3 AS Steward (steward1) CREATE Steward (steward2) (FAIL) -did use 7qFmEyYCXcmUFVied5Sp3b -- ledger nym did=XVP5k4E62PMJJWt2nPacHy role=STEWARD verkey=~Ka6GqJ3qBYRE4Ku9uyFuQW -- ledger get-nym did=XVP5k4E62PMJJWt2nPacHy -# #4.1 AS Steward (steward1) REMOVE Steward (steward3) (FAIL) -did use 7qFmEyYCXcmUFVied5Sp3b -- ledger nym did=CEJJcfjNGt7YcNLyXaszaq role= -# #4.2 CHECK Steward IS VALID -did use CEJJcfjNGt7YcNLyXaszaq -ledger nym did=Bhe7Uh5E1LYLgpLcbuVjj2 role=ENDORSER verkey=~NMpYrG7tAXYV4ujYZjddKu -- ledger get-nym did=Bhe7Uh5E1LYLgpLcbuVjj2 -# #5.1 AS Steward (steward1) CREATE Endorser (PASS) -did use 7qFmEyYCXcmUFVied5Sp3b -ledger nym did=CDcGtKx1boRYFwPBaGkMmk role=ENDORSER verkey=~PuCGfSiTB3NZGi1SH8w7H -- ledger get-nym did=CDcGtKx1boRYFwPBaGkMmk -# #5.2 CHECK Endorser IS VALID -did use CDcGtKx1boRYFwPBaGkMmk -ledger nym did=YUGDShR1RSr5T2CwbM7Hhu verkey=~GfsAf6NpSQDJ1ZWW2X7BiD -- ledger get-nym did=YUGDShR1RSr5T2CwbM7Hhu -# #6.1 AS Steward (steward1) REMOVE Endorser (FAIL) -did use 7qFmEyYCXcmUFVied5Sp3b -- ledger nym did=CDcGtKx1boRYFwPBaGkMmk role= -# #6.2 CHECK Endorser IS VALID -did use CDcGtKx1boRYFwPBaGkMmk -ledger nym did=Jt7aMnw77aoaBMyhXUNjtt verkey=~DT5pLP1wcvsgAzM78sqiRJ -- ledger get-nym did=Jt7aMnw77aoaBMyhXUNjtt -# #7.1 AS Steward (steward1) RESTORE Endorser (PASS) -did use V4SGRU86Z58d6TV7PBUe6f -ledger nym did=CDcGtKx1boRYFwPBaGkMmk role= -did use 7qFmEyYCXcmUFVied5Sp3b -ledger nym did=CDcGtKx1boRYFwPBaGkMmk role=ENDORSER -# #7.2 CHECK Endorser IS VALID -did use CDcGtKx1boRYFwPBaGkMmk -ledger nym did=XkZJxs6Uadv6MQeKGGZdZ6 verkey=~Aza4zyTRazcVsokmqNJfsg -- ledger get-nym did=XkZJxs6Uadv6MQeKGGZdZ6 -# #8 AS Steward (steward1) CREATE IdentityOwner (PASS) -did use 7qFmEyYCXcmUFVied5Sp3b -ledger nym did=Xm3b3LtJ3UoL5KeYT7ti7j verkey=~GmTyyvE4eHWeSWuiAtmE19 -- ledger get-nym did=Xm3b3LtJ3UoL5KeYT7ti7j -# #9.1 AS Steward (steward1) REMOVE self (FAIL) -did use 7qFmEyYCXcmUFVied5Sp3b -- ledger nym did=7qFmEyYCXcmUFVied5Sp3b role= -# #9.2 CHECK Steward IS VALID -ledger nym did=Jt7aMnw77aoaBMyhXUNjt1 verkey=~DT5pLP1wcvsgAzM78sqiR1 role=ENDORSER -- ledger get-nym did=Jt7aMnw77aoaBMyhXUNjtt -# #10 AS Steward (steward1) CREATE NetworkMonitor (PASS) -did use 7qFmEyYCXcmUFVied5Sp3b -ledger nym did=7e6ttq44HKVSHitCE3BLXv role=NETWORK_MONITOR verkey=~9NGbkySmcB9d6rZ7pPxwtY -# #11.1 AS Steward (steward1) REMOVE NetworkMonitor (PASS) -ledger nym did=7e6ttq44HKVSHitCE3BLXv role= -# #11.2 CHECK NetworkMonitor IS INVALID -did use 7e6ttq44HKVSHitCE3BLXv -- ledger get-validator-info nodes=Node1 -# #12.1 AS Steward (steward1) RESTORE NetworkMonitor (PASS) -did use 7qFmEyYCXcmUFVied5Sp3b -ledger nym did=7e6ttq44HKVSHitCE3BLXv role=NETWORK_MONITOR -# #12.2 CHECK NetworkMonitor IS VALID -did use 7e6ttq44HKVSHitCE3BLXv -ledger get-validator-info nodes=Node1 -### Additional cases ### -did use V4SGRU86Z58d6TV7PBUe6f -did new seed=RandomAttribOwner000000000000001 -ledger nym did=SvXt2QGwZF1kXTcpd2pJ37 verkey=~TmkMNbfcATrfJopaq4KcdV role=STEWARD -did new seed=RandomUserForRotateKey0000000001 -ledger nym did=6LKnRH6hWPSpoWu824s5JH verkey=~JyFXPqScFNBSoPG4cgTeb1 role=STEWARD -# Only owner can edit existing NYMs -did use SvXt2QGwZF1kXTcpd2pJ37 -did rotate-key seed=RandomUserForRotateKey0NewKey001 -# Not owner can't edit existing NYMs (FAIL) -did use 6LKnRH6hWPSpoWu824s5JH -- ledger nym did=SvXt2QGwZF1kXTcpd2pJ37 verkey=~TmkMNbfcATrfJopaq4KcdV -# Only owners can create ATTRIBs -did use SvXt2QGwZF1kXTcpd2pJ37 -ledger attrib did=SvXt2QGwZF1kXTcpd2pJ37 raw={"endpoint":{"ha":"127.0.0.1:5555"}} -# Only owners can edit ATTRIBs -ledger attrib did=SvXt2QGwZF1kXTcpd2pJ37 raw={"endpoint":{"ha":"127.0.0.1:6666"}} -# Not owner can't create attrib (FAIL) -did use SvXt2QGwZF1kXTcpd2pJ37 -- ledger attrib did=6LKnRH6hWPSpoWu824s5JH raw={"endpoint":{"ha":"127.0.0.1:5555"}} -# Not owner can't edit attrib (FAIL) -did use 6LKnRH6hWPSpoWu824s5JH -- ledger attrib did=SvXt2QGwZF1kXTcpd2pJ37 raw={"endpoint":{"ha":"127.0.0.1:5555"}} -# Steward can create SCHEMA: -ledger schema name=StewardSchema version=1.0 attr_names=name,age -# Steward can create CLAIM_DEF (will fail because of wrong schema id): -- ledger cred-def schema_id=1 signature_type=CL primary={"n":"1","s":"2","rms":"3","r":{"age":"4","name":"5"},"rctxt":"6","z":"7"} tag=1 -# Steward can't RESTART POOL -did use SvXt2QGwZF1kXTcpd2pJ37 -- ledger pool-restart action=start datetime=2020-01-25T12:49:05.258870+00:00 nodes=Node4 -# Steward can get validator-info -did use SvXt2QGwZF1kXTcpd2pJ37 -ledger get-validator-info nodes=Node1 +# setup environment +- pool create AS-0301-steward gen_txn_file=./pool_transactions_genesis +pool connect AS-0301-steward +- wallet create AS-03-wallet-steward key=testkey storage_config={"pool":"AS-0301-steward"} +wallet open AS-03-wallet-steward key=testkey +################# +# setup section # +################# +did new seed=TestTrustee100000000000000000001 metadata="test trustee 1" +did new seed=TestSteward100000000000000000001 metadata="test steward 1" +did new seed=TestSteward200000000000000000001 metadata="test steward 2" +did new seed=TestSteward300000000000000000001 metadata="test steward 3" +did new seed=TestEndorser00000000000000000001 metadata="test endorser 0" +did new seed=TestEndorser10000000000000000001 metadata="test endorser 1" +did new seed=RandomUser1000000000000000000001 metadata="test identity 1" +did new seed=RandomUser2000000000000000000001 metadata="test identity 2" +# did new seed=RandomUser3000000000000000000001 metadata="test identity 3" +did new seed=RandomUser4000000000000000000001 metadata="test identity 4" +did new seed=RandomUser5000000000000000000001 metadata="test identity 5" +did new seed=000000000000000000000000Trustee1 metadata="default trustee" +did new seed=NetworkMonitor100000000000000001 metadata="network monitor 1" +did new seed=NetworkMonitor200000000000000001 metadata="network monitor 2" +did new seed=NetworkMonitor300000000000000001 metadata="network monitor 3" +did use V4SGRU86Z58d6TV7PBUe6f +# AS Trustee CREATE Steward (steward1) +ledger nym did=7qFmEyYCXcmUFVied5Sp3b role=STEWARD verkey=~Mj3PFUSi6qmrTRonFXHx9n +- ledger get-nym did=7qFmEyYCXcmUFVied5Sp3b +# AS Trustee CREATE Steward (steward3) +ledger nym did=CEJJcfjNGt7YcNLyXaszaq role=STEWARD verkey=~AAGGsLT3yQtTmNdKrHXtC2 +- ledger get-nym did=CEJJcfjNGt7YcNLyXaszaq +################ +# test section # +################ +# #1 AS Steward (steward1) CREATE Trustee (trustee1) (FAIL) +did use 7qFmEyYCXcmUFVied5Sp3b +- ledger nym did=81CCbJwqSyGNLLEtVo1kMq role=TRUSTEE verkey=~UimNgHv7X45jxnh65faK3h +- ledger get-nym did=81CCbJwqSyGNLLEtVo1kMq +# #2.1 AS Steward (steward1) REMOVE Trustee (FAIL) +did use 7qFmEyYCXcmUFVied5Sp3b +- ledger nym did=V4SGRU86Z58d6TV7PBUe6f role= +# #2.2 CHECK Trustee IS VALID +did use V4SGRU86Z58d6TV7PBUe6f +ledger nym did=81CCbJwqSyGNLLEtVo1kMq role=TRUSTEE verkey=~UimNgHv7X45jxnh65faK3h +- ledger get-nym did=81CCbJwqSyGNLLEtVo1kMq +# #3 AS Steward (steward1) CREATE Steward (steward2) (FAIL) +did use 7qFmEyYCXcmUFVied5Sp3b +- ledger nym did=XVP5k4E62PMJJWt2nPacHy role=STEWARD verkey=~Ka6GqJ3qBYRE4Ku9uyFuQW +- ledger get-nym did=XVP5k4E62PMJJWt2nPacHy +# #4.1 AS Steward (steward1) REMOVE Steward (steward3) (FAIL) +did use 7qFmEyYCXcmUFVied5Sp3b +- ledger nym did=CEJJcfjNGt7YcNLyXaszaq role= +# #4.2 CHECK Steward IS VALID +did use CEJJcfjNGt7YcNLyXaszaq +ledger nym did=Bhe7Uh5E1LYLgpLcbuVjj2 role=ENDORSER verkey=~NMpYrG7tAXYV4ujYZjddKu +- ledger get-nym did=Bhe7Uh5E1LYLgpLcbuVjj2 +# #5.1 AS Steward (steward1) CREATE Endorser (PASS) +did use 7qFmEyYCXcmUFVied5Sp3b +ledger nym did=CDcGtKx1boRYFwPBaGkMmk role=ENDORSER verkey=~PuCGfSiTB3NZGi1SH8w7H +- ledger get-nym did=CDcGtKx1boRYFwPBaGkMmk +# #5.2 CHECK Endorser IS VALID +did use CDcGtKx1boRYFwPBaGkMmk +ledger nym did=YUGDShR1RSr5T2CwbM7Hhu verkey=~GfsAf6NpSQDJ1ZWW2X7BiD +- ledger get-nym did=YUGDShR1RSr5T2CwbM7Hhu +# #6.1 AS Steward (steward1) REMOVE Endorser (FAIL) +did use 7qFmEyYCXcmUFVied5Sp3b +- ledger nym did=CDcGtKx1boRYFwPBaGkMmk role= +# #6.2 CHECK Endorser IS VALID +did use CDcGtKx1boRYFwPBaGkMmk +ledger nym did=Jt7aMnw77aoaBMyhXUNjtt verkey=~DT5pLP1wcvsgAzM78sqiRJ +- ledger get-nym did=Jt7aMnw77aoaBMyhXUNjtt +# #7.1 AS Steward (steward1) RESTORE Endorser (PASS) +did use V4SGRU86Z58d6TV7PBUe6f +ledger nym did=CDcGtKx1boRYFwPBaGkMmk role= +did use 7qFmEyYCXcmUFVied5Sp3b +ledger nym did=CDcGtKx1boRYFwPBaGkMmk role=ENDORSER +# #7.2 CHECK Endorser IS VALID +did use CDcGtKx1boRYFwPBaGkMmk +ledger nym did=XkZJxs6Uadv6MQeKGGZdZ6 verkey=~Aza4zyTRazcVsokmqNJfsg +- ledger get-nym did=XkZJxs6Uadv6MQeKGGZdZ6 +# #8 AS Steward (steward1) CREATE IdentityOwner (PASS) +did use 7qFmEyYCXcmUFVied5Sp3b +ledger nym did=Xm3b3LtJ3UoL5KeYT7ti7j verkey=~GmTyyvE4eHWeSWuiAtmE19 +- ledger get-nym did=Xm3b3LtJ3UoL5KeYT7ti7j +# #9.1 AS Steward (steward1) REMOVE self (FAIL) +did use 7qFmEyYCXcmUFVied5Sp3b +- ledger nym did=7qFmEyYCXcmUFVied5Sp3b role= +# #9.2 CHECK Steward IS VALID +ledger nym did=Jt7aMnw77aoaBMyhXUNjt1 verkey=~DT5pLP1wcvsgAzM78sqiR1 role=ENDORSER +- ledger get-nym did=Jt7aMnw77aoaBMyhXUNjtt +# #10 AS Steward (steward1) CREATE NetworkMonitor (PASS) +did use 7qFmEyYCXcmUFVied5Sp3b +ledger nym did=7e6ttq44HKVSHitCE3BLXv role=NETWORK_MONITOR verkey=~9NGbkySmcB9d6rZ7pPxwtY +# #11.1 AS Steward (steward1) REMOVE NetworkMonitor (PASS) +ledger nym did=7e6ttq44HKVSHitCE3BLXv role= +# #11.2 CHECK NetworkMonitor IS INVALID +did use 7e6ttq44HKVSHitCE3BLXv +- ledger get-validator-info nodes=Node1 +# #12.1 AS Steward (steward1) RESTORE NetworkMonitor (PASS) +did use 7qFmEyYCXcmUFVied5Sp3b +ledger nym did=7e6ttq44HKVSHitCE3BLXv role=NETWORK_MONITOR +# #12.2 CHECK NetworkMonitor IS VALID +did use 7e6ttq44HKVSHitCE3BLXv +ledger get-validator-info nodes=Node1 +### Additional cases ### +did use V4SGRU86Z58d6TV7PBUe6f +did new seed=RandomAttribOwner000000000000001 +ledger nym did=SvXt2QGwZF1kXTcpd2pJ37 verkey=~TmkMNbfcATrfJopaq4KcdV role=STEWARD +did new seed=RandomUserForRotateKey0000000001 +ledger nym did=6LKnRH6hWPSpoWu824s5JH verkey=~JyFXPqScFNBSoPG4cgTeb1 role=STEWARD +# Only owner can edit existing NYMs +did use SvXt2QGwZF1kXTcpd2pJ37 +did rotate-key seed=RandomUserForRotateKey0NewKey001 +# Not owner can't edit existing NYMs (FAIL) +did use 6LKnRH6hWPSpoWu824s5JH +- ledger nym did=SvXt2QGwZF1kXTcpd2pJ37 verkey=~TmkMNbfcATrfJopaq4KcdV +# Only owners can create ATTRIBs +did use SvXt2QGwZF1kXTcpd2pJ37 +ledger attrib did=SvXt2QGwZF1kXTcpd2pJ37 raw={"endpoint":{"ha":"127.0.0.1:5555"}} +# Only owners can edit ATTRIBs +ledger attrib did=SvXt2QGwZF1kXTcpd2pJ37 raw={"endpoint":{"ha":"127.0.0.1:6666"}} +# Not owner can't create attrib (FAIL) +did use SvXt2QGwZF1kXTcpd2pJ37 +- ledger attrib did=6LKnRH6hWPSpoWu824s5JH raw={"endpoint":{"ha":"127.0.0.1:5555"}} +# Not owner can't edit attrib (FAIL) +did use 6LKnRH6hWPSpoWu824s5JH +- ledger attrib did=SvXt2QGwZF1kXTcpd2pJ37 raw={"endpoint":{"ha":"127.0.0.1:5555"}} +# Steward can create SCHEMA: +ledger schema name=StewardSchema version=1.0 attr_names=name,age +# Steward can create CLAIM_DEF (will fail because of wrong schema id): +- ledger cred-def schema_id=1 signature_type=CL primary={"n":"1","s":"2","rms":"3","r":{"age":"4","name":"5"},"rctxt":"6","z":"7"} tag=1 +# Steward can't RESTART POOL +did use SvXt2QGwZF1kXTcpd2pJ37 +- ledger pool-restart action=start datetime=2020-01-25T12:49:05.258870+00:00 nodes=Node4 +# Steward can get validator-info +did use SvXt2QGwZF1kXTcpd2pJ37 +ledger get-validator-info nodes=Node1 From 5ff852b93e615ce67455c7445fc1a4a2a39b2556 Mon Sep 17 00:00:00 2001 From: udosson Date: Tue, 2 Nov 2021 19:24:14 +0100 Subject: [PATCH 06/31] pinned dependencies because of missing support for python 3.5 Signed-off-by: udosson --- .github/workflows/build/Dockerfile | 6 ++++++ Jenkinsfile.ci | 2 +- build-scripts/ubuntu-1604/Dockerfile | 6 ++++-- ci/code-validation.dockerfile | 2 +- ci/pipeline.groovy | 2 +- ci/ubuntu.dockerfile | 4 +++- setup.py | 2 +- 7 files changed, 17 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build/Dockerfile b/.github/workflows/build/Dockerfile index 37cbd0e82..a3496e381 100644 --- a/.github/workflows/build/Dockerfile +++ b/.github/workflows/build/Dockerfile @@ -26,4 +26,10 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 \ # install fpm RUN gem install --no-ri --no-rdoc rake fpm + +RUN pip3 install -U \ + # TODO: Investigate why pyzmq has to be installed additionally + # This changed with switching from from 1.13.0.dev1034 (build and published by Jenkins instance of Sovrin) to version 1.13.0.dev143 (GHA) + 'pyzmq==18.1.0' + RUN indy_image_clean \ No newline at end of file diff --git a/Jenkinsfile.ci b/Jenkinsfile.ci index 1fc543ddd..0e494780b 100644 --- a/Jenkinsfile.ci +++ b/Jenkinsfile.ci @@ -73,7 +73,7 @@ def withTestEnv(body) { buildDocker("hyperledger/indy-node-ci", "ci/ubuntu.dockerfile ci").inside { echo 'Test: Install dependencies' - sh "pip install pip==10.0.0" + sh "pip install 'pip<10.0.0' 'pyzmq==18.1.0'" install() body.call('python') } diff --git a/build-scripts/ubuntu-1604/Dockerfile b/build-scripts/ubuntu-1604/Dockerfile index 1c9f348eb..0bffa9712 100644 --- a/build-scripts/ubuntu-1604/Dockerfile +++ b/build-scripts/ubuntu-1604/Dockerfile @@ -19,8 +19,10 @@ RUN apt-get update -y && apt-get install -y \ # issues with pip>=10: # https://github.com/pypa/pip/issues/5240 # https://github.com/pypa/pip/issues/5221 -RUN python3 -m pip install -U pip setuptools \ - && pip3 list +RUN python3 -m pip install -U \ + 'pip<10.0.0' \ + 'setuptools<=50.3.2' + # install fpm RUN gem install --no-ri --no-rdoc rake fpm diff --git a/ci/code-validation.dockerfile b/ci/code-validation.dockerfile index c69c97f0f..c81e0bef2 100644 --- a/ci/code-validation.dockerfile +++ b/ci/code-validation.dockerfile @@ -13,7 +13,7 @@ RUN apt-get update -y && apt-get install -y \ python3-nacl RUN pip3 install -U \ 'pip<10.0.0' \ - setuptools \ + 'setuptools<=50.3.2' \ pep8==1.7.1 \ pep8-naming==0.6.1 \ flake8==3.5.0 diff --git a/ci/pipeline.groovy b/ci/pipeline.groovy index d03a2b216..efeed1486 100644 --- a/ci/pipeline.groovy +++ b/ci/pipeline.groovy @@ -148,7 +148,7 @@ def systemTests(Closure body) { def uid = sh(returnStdout: true, script: 'id -u').trim() docker.build("hyperledger/indy-node-ci", "--build-arg uid=$uid -f ci/ubuntu.dockerfile ci").inside { sh """ - pip install pip==10.0.0 + pip install 'pip<10.0.0' 'pyzmq==18.1.0' pip install .[tests] >$pipLogName """ diff --git a/ci/ubuntu.dockerfile b/ci/ubuntu.dockerfile index c4799e8d9..37844a427 100644 --- a/ci/ubuntu.dockerfile +++ b/ci/ubuntu.dockerfile @@ -11,7 +11,7 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 RUN apt-get update -y && apt-get install -y \ python3-nacl \ ursa=0.3.2-2 \ - libindy=1.15.0~1618-xenial \ + libindy=1.15.0~1625-xenial \ # rocksdb python wrapper libbz2-dev \ zlib1g-dev \ @@ -19,6 +19,8 @@ RUN apt-get update -y && apt-get install -y \ libsnappy-dev \ rocksdb=5.8.8 +ENV PATH="/home/$user/$venv/bin:$PATH" + RUN indy_ci_add_user $uid $user $venv RUN indy_image_clean diff --git a/setup.py b/setup.py index 253c32c45..173d4058e 100644 --- a/setup.py +++ b/setup.py @@ -54,7 +54,7 @@ data_files=[( (BASE_DIR, ['data/nssm_original.exe']) )], - install_requires=['indy-plenum==1.13.0.dev1034', + install_requires=['indy-plenum==1.13.0.dev143', 'timeout-decorator==0.4.0', 'distro==1.3.0'], setup_requires=['pytest-runner'], From 3c015ca2c27c4ca2685d271f23f887eb1d8a3611 Mon Sep 17 00:00:00 2001 From: udosson Date: Wed, 3 Nov 2021 17:27:48 +0100 Subject: [PATCH 07/31] re-added the adjustment of packages for the cannonical archive Signed-off-by: udosson --- build-scripts/ubuntu-1604/prepare-package.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build-scripts/ubuntu-1604/prepare-package.sh b/build-scripts/ubuntu-1604/prepare-package.sh index 50d827506..8b8bc7d95 100755 --- a/build-scripts/ubuntu-1604/prepare-package.sh +++ b/build-scripts/ubuntu-1604/prepare-package.sh @@ -32,8 +32,8 @@ if [ "$distro_packages" = "debian-packages" ]; then # Update the package names to match the versions that are pre-installed on the os. echo -e "\nAdapt the dependencies for the Canonical archive" #### ToDo adjust packages for the Cannonical archive for Ubuntu 20.04 (focal) - # sed -i "s~timeout-decorator~python3-timeout-decorator~" setup.py - # sed -i "s~distro~python3-distro~" setup.py + sed -i "s~timeout-decorator~python3-timeout-decorator~" setup.py + sed -i "s~distro~python3-distro~" setup.py elif [ "$distro_packages" = "python-packages" ]; then echo -e "\nNo adaption of dependencies for python packages" else From 5994b3fab897d9e8f999400607670f5228627939 Mon Sep 17 00:00:00 2001 From: udosson Date: Thu, 4 Nov 2021 08:56:54 +0100 Subject: [PATCH 08/31] updated version of setup-jfrog-cli to v2 Signed-off-by: udosson --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index d620457cd..ae4e9411e 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -372,7 +372,7 @@ jobs: uses: actions/checkout@v1 - name: Setup JFrog CLI - uses: jfrog/setup-jfrog-cli@v1 + uses: jfrog/setup-jfrog-cli@v2 env: JF_ARTIFACTORY_1: ${{ secrets.INDY_ARTIFACTORY_REPO_CONFIG }} From d785d0a65a77a65c0fc335c75c8f91766b4d52e1 Mon Sep 17 00:00:00 2001 From: udosson Date: Mon, 8 Nov 2021 21:37:14 +0100 Subject: [PATCH 09/31] bump indy-plenum to version 1.13.0.dev169 Signed-off-by: udosson --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 173d4058e..ce64b29ac 100644 --- a/setup.py +++ b/setup.py @@ -54,7 +54,7 @@ data_files=[( (BASE_DIR, ['data/nssm_original.exe']) )], - install_requires=['indy-plenum==1.13.0.dev143', + install_requires=['indy-plenum==1.13.0.dev169', 'timeout-decorator==0.4.0', 'distro==1.3.0'], setup_requires=['pytest-runner'], From 6bd7f20deb79df6a1f4bc8906bba7bee6bf9f1a8 Mon Sep 17 00:00:00 2001 From: udosson Date: Tue, 9 Nov 2021 17:15:44 +0100 Subject: [PATCH 10/31] prepares indy-plenum package version of debian version depedency Signed-off-by: udosson --- build-scripts/ubuntu-1604/prepare-package.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/build-scripts/ubuntu-1604/prepare-package.sh b/build-scripts/ubuntu-1604/prepare-package.sh index 8b8bc7d95..b9d8ce8a6 100755 --- a/build-scripts/ubuntu-1604/prepare-package.sh +++ b/build-scripts/ubuntu-1604/prepare-package.sh @@ -34,6 +34,10 @@ if [ "$distro_packages" = "debian-packages" ]; then #### ToDo adjust packages for the Cannonical archive for Ubuntu 20.04 (focal) sed -i "s~timeout-decorator~python3-timeout-decorator~" setup.py sed -i "s~distro~python3-distro~" setup.py + + echo -e "\n\nPrepares indy-plenum debian package version" + sed -i -r "s~indy-plenum==([0-9\.]+[0-9])(\.)?([a-z]+)~indy-plenum==\1\~\3~" setup.py + elif [ "$distro_packages" = "python-packages" ]; then echo -e "\nNo adaption of dependencies for python packages" else From 9c3d0e42fa8595086829cf097e6d12f6e3d40298 Mon Sep 17 00:00:00 2001 From: Philipp Schlarb Date: Thu, 4 Nov 2021 14:57:18 +0100 Subject: [PATCH 11/31] Baseimage changes as discussed in #1684 Signed-off-by: Philipp Schlarb --- .../docker/baseimage/indy-baseci.ubuntu.dockerfile | 2 +- environment/docker/baseimage/indy-baseci.version | 2 +- .../baseimage/indy-baseimage.ubuntu.dockerfile | 14 ++++++++++---- .../docker/baseimage/indy-baseimage.version | 2 +- .../baseimage/indy-core-baseci.ubuntu.dockerfile | 2 +- .../docker/baseimage/indy-core-baseci.version | 2 +- 6 files changed, 15 insertions(+), 9 deletions(-) diff --git a/environment/docker/baseimage/indy-baseci.ubuntu.dockerfile b/environment/docker/baseimage/indy-baseci.ubuntu.dockerfile index fe2865344..a9e16eba2 100644 --- a/environment/docker/baseimage/indy-baseci.ubuntu.dockerfile +++ b/environment/docker/baseimage/indy-baseci.ubuntu.dockerfile @@ -1,4 +1,4 @@ -FROM __NS__/indy-baseimage:0.0.3-master +FROM __NS__/indy-baseimage:0.0.4 LABEL maintainer="Hyperledger " # indy repos diff --git a/environment/docker/baseimage/indy-baseci.version b/environment/docker/baseimage/indy-baseci.version index 1ff0a77bd..81340c7e7 100644 --- a/environment/docker/baseimage/indy-baseci.version +++ b/environment/docker/baseimage/indy-baseci.version @@ -1 +1 @@ -0.0.3-master +0.0.4 diff --git a/environment/docker/baseimage/indy-baseimage.ubuntu.dockerfile b/environment/docker/baseimage/indy-baseimage.ubuntu.dockerfile index 41fb405a4..6ea5c1a2c 100644 --- a/environment/docker/baseimage/indy-baseimage.ubuntu.dockerfile +++ b/environment/docker/baseimage/indy-baseimage.ubuntu.dockerfile @@ -19,10 +19,16 @@ RUN apt-get update && apt-get install -y \ python-setuptools # pypi based packages -RUN pip3 install -U \ - 'pip<10.0.0' \ - setuptools \ - virtualenv +RUN pip3 install -U\ + "pip <10.0.0" \ + "setuptools<=50.3.2" + +# needs to be installed separately and pinned to version 20.0.25 to be compatible with Python3.5 and packages like zipp==1.2.0 +RUN pip3 install -U \ + 'virtualenv==20.0.35' + + +RUN ln -s /usr/bin/pip3 /usr/bin/pip COPY scripts/clean.sh /usr/local/bin/indy_image_clean RUN chmod 755 /usr/local/bin/indy_image_clean diff --git a/environment/docker/baseimage/indy-baseimage.version b/environment/docker/baseimage/indy-baseimage.version index 1ff0a77bd..81340c7e7 100644 --- a/environment/docker/baseimage/indy-baseimage.version +++ b/environment/docker/baseimage/indy-baseimage.version @@ -1 +1 @@ -0.0.3-master +0.0.4 diff --git a/environment/docker/baseimage/indy-core-baseci.ubuntu.dockerfile b/environment/docker/baseimage/indy-core-baseci.ubuntu.dockerfile index 6b67e0d9d..350f51bac 100644 --- a/environment/docker/baseimage/indy-core-baseci.ubuntu.dockerfile +++ b/environment/docker/baseimage/indy-core-baseci.ubuntu.dockerfile @@ -1,4 +1,4 @@ -FROM __NS__/indy-baseci:0.0.3-master +FROM __NS__/indy-baseci:0.0.4 LABEL maintainer="Hyperledger " # indy repos diff --git a/environment/docker/baseimage/indy-core-baseci.version b/environment/docker/baseimage/indy-core-baseci.version index 1ff0a77bd..81340c7e7 100644 --- a/environment/docker/baseimage/indy-core-baseci.version +++ b/environment/docker/baseimage/indy-core-baseci.version @@ -1 +1 @@ -0.0.3-master +0.0.4 From 1da2405438ca0ce9ceff339b5e2ae16b631c5234 Mon Sep 17 00:00:00 2001 From: Philipp Schlarb Date: Mon, 29 Nov 2021 16:04:12 +0100 Subject: [PATCH 12/31] FIX wrong Slicing and SLICE_TOTAL_SLICES Signed-off-by: Philipp Schlarb --- .github/workflows/build.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ae4e9411e..5f6ed3e0f 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -199,6 +199,8 @@ jobs: # - https://github.com/ScaCap/action-surefire-report/issues/17 env: NODE_OPTIONS: '--max_old_space_size=4096' + #SLICE_TOTAL_SLICES needs to match the total number of slices in the matrix strategy. + SLICE_TOTAL_SLICES: 11 container: image: ghcr.io/${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }}/node-build:ubuntu-16-04 strategy: @@ -231,9 +233,9 @@ jobs: # Explicitly use the existing pip cache location in the node-build image. pip --cache-dir /root/.cache/pip install .[tests] - - name: Run Indy Node ${{ matrix.module }} test slice ${{ matrix.slice }}/${{ strategy.job-total }} + - name: Run Indy Node ${{ matrix.module }} test slice ${{ matrix.slice }}/ ${{ env.SLICE_TOTAL_SLICES }} id: node-test - run: RUSTPYTHONASYNCIODEBUG=0 python3 runner.py --pytest "python3 -m pytest -l -vv" --dir "${{ matrix.module }}" --output "test-result-node-${{ matrix.slice }}.txt" --test-only-slice "${{ matrix.slice }}/${{ strategy.job-total }}" + run: RUSTPYTHONASYNCIODEBUG=0 python3 runner.py --pytest "python3 -m pytest -l -vv" --dir "${{ matrix.module }}" --output "test-result-node-${{ matrix.slice }}.txt" --test-only-slice "${{ matrix.slice }}/ ${{ env.SLICE_TOTAL_SLICES }}" - name: Publish Test Report if: success() || failure() From d73baf182283311d4a15c2e3ec7636194021e454 Mon Sep 17 00:00:00 2001 From: Philipp Schlarb Date: Fri, 27 Aug 2021 10:33:18 +0200 Subject: [PATCH 13/31] Added documentation for creating a new network from scratch Co-authored-by: Wade Barnes Signed-off-by: Philipp Schlarb --- docs/source/NewNetwork/CLIInstall.md | 116 +++++ docs/source/NewNetwork/CreateDID.md | 33 ++ docs/source/NewNetwork/NewNetwork.md | 144 ++++++ docs/source/configuring-2nics.md | 148 +++++++ docs/source/index.rst | 7 +- docs/source/installation-and-configuration.md | 418 ++++++++++++++++++ docs/source/start-nodes.md | 2 +- sample/Network/DIDs.txt | 71 +++ sample/Network/README.md | 132 ++++++ sample/Network/Stewards.csv | 5 + sample/Network/Trustees.csv | 4 + sample/Network/domain_transactions_genesis | 7 + sample/Network/pool_transactions_genesis | 4 + 13 files changed, 1089 insertions(+), 2 deletions(-) create mode 100644 docs/source/NewNetwork/CLIInstall.md create mode 100644 docs/source/NewNetwork/CreateDID.md create mode 100644 docs/source/NewNetwork/NewNetwork.md create mode 100644 docs/source/configuring-2nics.md create mode 100644 docs/source/installation-and-configuration.md create mode 100644 sample/Network/DIDs.txt create mode 100644 sample/Network/README.md create mode 100644 sample/Network/Stewards.csv create mode 100644 sample/Network/Trustees.csv create mode 100644 sample/Network/domain_transactions_genesis create mode 100644 sample/Network/pool_transactions_genesis diff --git a/docs/source/NewNetwork/CLIInstall.md b/docs/source/NewNetwork/CLIInstall.md new file mode 100644 index 000000000..4ce980c75 --- /dev/null +++ b/docs/source/NewNetwork/CLIInstall.md @@ -0,0 +1,116 @@ +# Installing the `indy-cli` + +The `indy-cli` is developed under the [indy-sdk](https://github.com/hyperledger/indy-sdk). This documentation may be not up to date. + +You will need to perform the following once for each `indy-cli` machine you would like to set up (only 1 is required). +It is recommended that you install the `indy-cli` in your native work environment if possible, so you always have it available even when traveling. + +## Containerized `indy-cli` Environment + +The following sections describe how to install and configure the `indy-cli` directly on a machine or VM. However, possibly the most convenient option is to use a containerized `indy-cli` environment like the one included with [von-network](https://github.com/bcgov/von-network). For information on how to use the containerized `indy-cli` in `von-network`, refer to [Using the containerized indy-cli](https://github.com/bcgov/von-network/blob/main/docs/Indy-CLI.md) + +## Windows: +To install the `indy-cli` on Windows 10 perform the following steps: +1. Download https://repo.sovrin.org/windows/indy-cli/stable/1.16.0/indy-cli_1.16.0.zip and unzip it. + If there is a newer version under https://repo.sovrin.org/windows/indy-cli/stable/ it instead. +2. Open a command prompt. (This will work differently if you use Windows Terminal). +3. `cd` to the directory where you unzipped the `indy-cli` package. For example, if you unzipped directly in your ‘downloads’ directory like I did you would type: `cd \Users\\Downloads\indy-cli_1.14.2` +4. Create a JSON Config file containing your taaAcceptanceMechanism in the directory where indy-cli.exe resides (I created \Users\\Downloads\indy-cli_1.14.2\cliconfig.json on my machine) + ```json + { + "taaAcceptanceMechanism": "for_session" + } + ``` +5. Run `indy-cli.exe --config cliconfig.json` to verify proper installation. You should see a new window appear with an `indy>` prompt, (If you are double clicking to start `indy-cli`, you need to right click on the .exe in your window and add the --config parameter first.) If you get an error stating that it is missing vcruntime140.dll then do the following: +6. Download and install vc_redist.x64.exe from the Visual Studio 2017 section on the https://support.microsoft.com/en-ae/help/2977003/the-latest-supported-visual-c-downloads page, and then rerun indy-cli.exe to see if it works as described in previous step. +7. Type ‘exit’ in the `indy-cli` + +## Ubuntu: +To install the `indy-cli` on Ubuntu, perform the following steps from the ubuntu command line: + +1. `sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88` +2. `sudo add-apt-repository "deb https://repo.sovrin.org/sdk/deb xenial stable"` +3. `sudo add-apt-repository "deb https://repo.sovrin.org/deb xenial stable"` +4. `sudo apt-get update -y` +5. `sudo apt-get upgrade -y ` +6. `sudo apt-get install -y indy-cli` +7. `cd ~` +8. Create a JSON Config file containing your taaAcceptanceMechanism in your home directory: +`vim ~/cliconfig.json` + + Press the “i” key and paste the following into the file: + ```json + { + "taaAcceptanceMechanism": "for_session" + } + ``` + Press the “esc” key then the following characters to write the file and quit +`:wq` +9. Run `indy-cli --config ~/cliconfig.json` to start the `indy-cli` + +## Mac: + +Since there is not a prepackaged version of the `indy-cli` prepared for the Mac, the following steps will help you to create an environment, build, and run the `indy-cli` in a Mac terminal. + +Open a Terminal +Run the following commands in the terminal: + +1. `cd ~` +2. `mkdir github` +3. `cd github` +4. `git clone https://github.com/hyperledger/indy-sdk.git`(might need xcode-select --install if error occurs) +5. `/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"` +6. `curl https://sh.rustup.rs -sSf | sh` +7. Follow onscreen instructions to install rust +8. `brew install pkg-config libsodium automake autoconf cmake openssl zeromq zmq` + NOTE: the openssl path needs to match what you currently have on your system + +9. Run > `ls /usr/local/Cellar/openssl/` + Note the name of the directory shown (the example below shows 1.0.2p but the latest version is 1.1.1l) + + Use this directory in place of the one listed below in your .profile file + +10. Add the following lines to your` ~/.profile file `(making the correction shown in the previous step if needed) + ``` + export PATH="$HOME/.cargo/bin:$PATH:~/github/indy-sdk/libindy/target/debug:~/github/indy-sdk/cli/target/debug" + export PKG_CONFIG_ALLOW_CROSS=1 + export CARGO_INCREMENTAL=1 + export RUST_LOG=indy=trace + export RUST_TEST_THREADS=1 + export OPENSSL_DIR=/usr/local/Cellar/openssl/1.0.2p #use your path + export LIBRARY_PATH=~/github/indy-sdk/libindy/target/debug/ + export LIBINDY_DIR=~/github/indy-sdk/libindy/target/debug/ + ``` +11. Run the following commands from your terminal to build the `indy-cli`: + ``` + source ~/.profile + cd ~/github/indy-sdk/libindy + cargo build + cd ../cli + cargo build + ``` +12. Create a JSON Config file containing your taaAcceptanceMechanism in your home directory: + `vim ~/cliconfig.json` + + Press the “i” key and paste the following into the file: + ```json + { + "taaAcceptanceMechanism": "for_session" + } + ``` + Press the “esc” key then the following characters to write the file and quit + `:wq` +13. You can now run `indy-cli` from within a terminal by typing + + `indy-cli --config ~/cliconfig.json` + + `indy> exit` (To exit from the `indy-cli` prompt when you ar done) + + If the above gives error regarding library not loaded libssl.1.0.0, you will probably need to run the following command (all in one line should work) to revert your version: + + ``` + brew uninstall --ignore-dependencies openssl; brew uninstall openssl; + brew install https://github.com/tebelorg/Tump/releases/download/v1.0.0/openssl.rb + ``` + + diff --git a/docs/source/NewNetwork/CreateDID.md b/docs/source/NewNetwork/CreateDID.md new file mode 100644 index 000000000..eb8033d12 --- /dev/null +++ b/docs/source/NewNetwork/CreateDID.md @@ -0,0 +1,33 @@ +# Creating a DID using the `indy-cli` + +You will need to perform the following commands once for each `indy-cli` machine that you want to run on. The following commands contain suggestions to save certain values in a secure place. Please do not share those values or that place with anyone. + +_If you just need to quickly generate a set of secrets (Seed and wallet key), or a Seed, DID, and Verkey and do not have an `indy-cli` environment already setup, you can use the `indy-cli` features integrated into `von-network`. Refer to [Generate a set of Secrets](https://github.com/bcgov/von-network/blob/main/docs/Indy-CLI.md#generate-a-set-of-secrets), and [Generate your DID](https://github.com/bcgov/von-network/blob/main/docs/Indy-CLI.md#generate-your-did) for details._ + + +1. Start your `indy-cli` using the instructions from [Installing the `indy-cli`](./CLIInstall.md) for your platform. + + All following commands are executing inside the `indy-cli`. + +2. Create a wallet with: + + `wallet create key` + + You will be prompted for a wallet key. What you type will not be displayed on the console. Your wallet key is a secure key that only you should know, and it should be randomly generated. Save it in a secure place for later use. You will use it every time you need to send transactions to the ledger from the `indy-cli`. + +3. `wallet open wallet_name key` + + You will be prompted for your wallet key. What you type will not be displayed on the console. + +4. `did new seed` + + You will be prompted for a seed. What you type will not be displayed on the console. + + If you have lost your original seed or have never created one, then create a new one. This seed is used to regenerate your DID and to add your DID to your wallet(s). + + The seed is a 32 character string that only you can know. + + > WARNING: Whoever knows your Seed can recreate your exact DID in their own wallet and use it to manage the ledger. + + Save your Seed in a secure place so that only you can recreate your DID whenever needed. + Also save the public DID and verkey generated from this step so that you will know and can verify your public DID. \ No newline at end of file diff --git a/docs/source/NewNetwork/NewNetwork.md b/docs/source/NewNetwork/NewNetwork.md new file mode 100644 index 000000000..821b03a5a --- /dev/null +++ b/docs/source/NewNetwork/NewNetwork.md @@ -0,0 +1,144 @@ +# Setting up a New Network + +## Introduction + + The purpose of this document is to describe in some detail the process of building a brand-new Indy Node Network (Network) using 4 Stewards on their own separate nodes. + It goes into more details than [Starting a Network](../start-nodes.md). + These instructions are intended to be used for a distributed or “production” level environment but can be adapted to be used to build a private network. + + This document is heavily based on [Create New Indy Network](https://docs.google.com/document/d/1XE2QOiGWuRzWdlxiI9LrG9Am9dCfPXBXnv52wGHorNE) and the [Steward Validator Preparation Guide v3](https://docs.google.com/document/d/18MNB7nEKerlcyZKof5AvGMy0GP9T82c4SWaxZkPzya4). + +## I. Create Network Governance documents (Optional) + + Network Governance describes the policies and procedures by which your new network will run and be maintained. Here’s an example: [Sovrin Governance Framework](https://docs.google.com/document/d/1K8l5MfXQWQtpT49-FHuYn_ZnRC5m0Nwk) + + +## II. Assign Network Trustees + + Trustees are the people who manage the network and protect the integrity of the Network Governance. This includes managing the `auth_rules`. + + For a production Network, at least 3 Trustees representing three different persons are required and more are preferred. For a test Network one Trustee is required and 3 or more are preferred (all Trustee DID’s may belong to the same user on a test network if needed). + + Initial Trustees (3 preferred) must create and submit a Trustee DID and Verkey so that the domain genesis file can be built. + + Each trustee has to [instal the `indy-cli`](./CLIInstall.md) and [create a Trustee DID](./CreateDID.md). + + Once the Trustees have created their DID and Verkey give the Trustees access to a spreadsheet like [this one](https://docs.google.com/spreadsheets/d/1LDduIeZp7pansd9deXeVSqGgdf0VdAHNMc7xYli3QAY/edit#gid=0) and have them fill out their own row of the Trustees sheet. The completed sheet will be used to generate the genesis transaction files for the network. + + +## III. Genesis Stewards + + A Steward is an organization responsible for running a Node on the Network + + Exactly 4 “Genesis” Stewards are needed to establish the network, more Stewards can be added later. + + Each Genesis Steward’s node information will be included in the Genesis Pool file, so they should be willing to install and maintain a Node on the new Network for an extended period of time. + + The Stewards must: + 1. Generate Steward DIDs as described in [Creating DID](./CreateDID.md). + 1. Install their node as described in [Installation and configuration of Indy-Node](../installation-and-configuration.md) (with some small adjustments): + 1. Determine a name for the new network and have the stewards substitute it in the appropriate places in the guide, such as when setting the network name and creating the directory when creating the keys for the node. + 1. They all need to stop at the normal place ([3.5. Add Node to a Pool](../installation-and-configuration.md#3.5.-Add-Node-to-a-Pool)) as instructed in the guide as the steps that follow differ when creating a new network. The following sections of this guide describe the steps required to start the new network. + + Once the Stewards have created their DID and Verkey, and performed the initial setup of they node, give the Stewards access to a spreadsheet like [this one](https://docs.google.com/spreadsheets/d/1LDduIeZp7pansd9deXeVSqGgdf0VdAHNMc7xYli3QAY/edit#gid=0) and have them fill out their own row of the Stewards sheet. The completed sheet will be used to generate the genesis transaction files for the network. + +## IV. Create and Distribute genesis transaction files + + Save the sheets filled out by the Trustees and Stewards as separate files in csv format, and use the [genesis_from_files.py](https://github.com/sovrin-foundation/steward-tools/tree/master/create_genesis) script to generate the `pool_transactions_genesis` and `domain_transactions_genesis` files for the network. + + >Tip: The `generategenesisfiles` in `von-network` provides a convenient wrapper around the `genesis_from_files.py` and runs it in a container including all of the dependencies. For more information refer to [Generate Genesis Files](https://github.com/bcgov/von-network/blob/main/docs/Indy-CLI.md#generate-genesis-files). + + Double check the files contain the correct information: + - The `domain_transactions_genesis` file should contain all of the DIDs and Verkeys for the Trustees (`"role":"0"`) and the Stewards (`"role":"2"`). + - The `pool_transactions_genesis` file should contain each of the nodes with all their unique information. + + Publish the genesis files to a public location, such as a GitHub repository associated with your network. The Stewards and end users will need this information. + + Inform the Stewards and Trustees where they can download the genesis files. + + - The Trustees and Stewards will need to register the `pool_transactions_genesis` with their `indy-cli` to complete the setup and to be able to connect to the network once it's running. How and where they need to register the `pool_transactions_genesis` depends on how they setup their `indy-cli` environment; [Installing the `indy-cli`](./CLIInstall.md) + + - The Stewards will also need to download the genesis files onto their nodes while completing the setup. All of the following steps are to be completed on the node. + 1. Set the network name in `/etc/indy/indy_config.py`, replacing `` in the following command with the actual network name; + + `sudo sed -i -re "s/(NETWORK_NAME = ')\w+/\1/" /etc/indy/indy_config.py` + + 1. Create a network directory and download the genesis files into it. _The directory name must be the same on all of the nodes and it must match the name of the network._ + 1. `sudo -i -u indy mkdir /var/lib/indy/` + 1. `cd /var/lib/indy/` + 1. `sudo curl -o domain_transactions_genesis ` + 1. `sudo curl -o pool_transactions_genesis ` + 1. `sudo chown indy:indy *` + - It is important the files are owned by `indy:indy`. + +## V. Schedule a meeting to instantiate the new network + + Invite all Genesis Stewards to a meeting where they can execute commands and share their screens for both an `indy-cli` and for their Validator Nodes being added to the Network. + + > NOTE: It is very useful to go through some checks for each node to verify their setup before continuing. Some large amounts of debug and recovery work can be avoided by 5-10 minutes of checking configs of each node at the beginning of the meeting. + > - `/etc/indy/indy_config.py` + > - all nodes need to have the same network name. + > - the name of the network should correspond to the `/var/lib/indy/` directory on each node which contains the genesis files for the network, and the files in the directory should be owned by `indy:indy`. + > - `/etc/indy/indy.env` + > - all nodes should have local ip addresses in this file and be pointing at the correct ports. + > - Genesis files + > - Ensure both `pool_transactions_genesis` and `domain_transactions_genesis` files contain the expected content. + > - Verify the software version on all the nodes match + > ``` + > dpkg -l | grep indy + > dpkg -l | grep sovrin + > ``` + > - Network Connectivity + > - Use `nc -l ` (on the host), and `nc -vz ` (on the remote) to test the following. + > - Check the network connectivity between nodes using the `node_ip:port` combinations. Ensure that each node can communicate with all of the other nodes. + > - Check the network connectivity between the nodes and a client using the `client_ip:port` combinations. Ensure each node is accessible to client machines. + + Once all of the checks are complete have the Stewards simultaneously start their nodes as described in section [3.5.2. Enable the Service](../installation-and-configuration.md#3.5.2.-Enable-the-Service) of the Installation and configuration of Indy-Node guide, and walk though the remainder of that guide. + +## VI. Configure the Indy Network + +### `auth_rules` + Update the network's `auth_rules` to help enforce the governance rules for the network. + + For more information on `auth_rules` refer to: + - [Default AUTH_MAP Rules](../auth_rules.md) + - [auth_rules Walkthough](https://docs.google.com/document/d/1xk0A5FljKOZ2Fazri6J5mAfnYWXdOMl2LwrFK16MJIY) + +### `TAA` (Transaction Author Agreements) + Add a `TAA` to the network. + + For more information on `TAA`s refer to: + - [Transaction Author Agreement - `indy-sdk`](https://github.com/hyperledger/indy-sdk/blob/master/docs/how-tos/transaction-author-agreement.md) + - [Transaction Author Agreement - `indy-plenum`](https://github.com/hyperledger/indy-plenum/blob/master/docs/source/transaction_author_agreement.md) + - [Transaction Author Agreement Design](../../../design/txn_author_agreement.md) + - [TAA for CLI Walkthrough](https://docs.google.com/document/d/1Ma-EJkYpRfPOZApyEvcWrkb4EKn71XrIFd9KvZL0Whg) + +## Where to go from here? + + ### Add more Nodes + + For the network to remain in write consensus in the event of node failures the network needs to be comprised of `3f+1` nodes, where `f` is the number of failed nodes. + + For a network of 4 nodes the network can remain in write consensus if a single node at a time fails, however if more than a single node fails at a time the network will loose write consensus and go into a read-only state. Similarly, a network comprised of 7 nodes can withstand up to 2 nodes failing at any given time. Therefore, it's recommended to have at least 7 nodes running in your network. + + Examples: + + | Failures to Withstand | 3f+1 | Required Nodes | + |--|--|--| + | 1 | 3(1)+1 | 4 | + | 2 | 3(2)+1 | 7 | + | 3 | 3(3)+1 | 10 | + + ### Network Monitoring + + [hyperledger/indy-node-monitor](https://github.com/hyperledger/indy-node-monitor) is the community supported and maintained tool for network monitoring. + + #### Manual + - Run `indy-node-monitor` at least three times a day to detect any issues with the network. + + #### Automated + - Run `indy-node-monitor` on a schedule (every 15-30 minutes) and add a notification plugin to alert you to any issues. _Please consider contributing your work back to the project._ + +## Hands On Walkthrough + + An example walkthrough of the above mentioned steps can be found in the `sample/Network` [folder](../../../sample/Network/README.md). diff --git a/docs/source/configuring-2nics.md b/docs/source/configuring-2nics.md new file mode 100644 index 000000000..38f7faa22 --- /dev/null +++ b/docs/source/configuring-2nics.md @@ -0,0 +1,148 @@ +# Configuring a 2 NIC Node +First some caveats and warnings. These are notes based on setting up 2 NICs on an AWS VM. It might be possible to adapt them for other environments as well, particularly the "Configure Network Interfaces in Ubuntu" section. + +>WARNING: +When you are doing network configuration, it is very possible to put your VM into a state where you are no longer able to log into it over the network. This may be difficult or impossible to recover from. Be very careful. If you have questions, doubts, or just need help, reach out prior to following these instructions. + +## Initial networking steps in an AWS console +Create security group "validator client" +- Port 22 for ssh +- Port 9702 for Validator client connections + + +Create security group "validator inter-node" +- Port 9701 for Validator inter-node connections +- Initially set up your Validator IP address to accept connections from anywhere, but later modify it as follows to only allow connections from specific IP addresses. + - To generate an allow list, run the following command on a Validator Node: + + `current_validators.py --writeJson | node_address_list.py --outFormat aws` + +Setup Validator instance +1. Provision VM + - Use security group "validator client" for the default network interface + - make note of the instance ID when completed +2. Add and configure a 2nd network interface in AWS. + - On EC2 left side menu - Network & Security -> Network Interfaces -> Create Network Interface + 1. Subnet -> Select a different subnet in the same zone as your instance + 2. Private IP -> auto assign + 3. Security groups -> validator inter-node + - On the main screen, select the new interface and click the Attach button + Find and select the instance ID (recorded in step 1) +3. Note the Network Interface ID of each network interface + - On EC2 left side menu - INSTANCES -> Instances + - Select your instance + - At the bottom of the screen select the description tab and scroll down to ‘Network interfaces’ + - Click on each interface and then record the ‘Interface ID’ and the ‘Private IP Address’ for later use. +4. Create 2 Elastic IP’s, 1 for each NIC, and associate them with the network interfaces + - On EC2 left side menu - Network & Security ->Elastic IPs + 1. Click Allocate New Address + 1. Give your new addresses appropriate names so that you can identify them later. (i.e. BuilderNet Client and BuilderNet Inter-Node) + 2. I used Amazon IP addresses, but you can use your own if you like + 3. Repeat steps 1 and 2 to create a second Elastic IP + 2. For each new Elastic IP do the following: + 1. Select one of the Elastic IP’s you just created + 2. Click Actions -> Associate address + - Resource type -> ‘Network interface’ + - Network Interface -> + - Private IP -> (there should only be one option and it should match the internal IP address of the chosen interface) + - Leave checkbox empty (this might not matter) + - Click “Associate” + 3. Make sure you do this for both interfaces of your instance + +## Configure the Network Interfaces in Ubuntu +1. Disable automatic network management by AWS. (These steps are for AWS users only and will keep AWS from overwriting your settings) Run the following from the Ubuntu command line: +`sudo su -` +`echo 'network: {config: disabled}' > /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg` +>WARNING: The following steps use the common network interface names eth0 and eth1. You must substitute the interface device names used by your system or your instance will lose its network connection and you might not be able to reattach to it. +2. Run the following steps from the Ubuntu command line: + + a. `ip a` + - Record the interface device names and their local IP addresses for later use. + + b. `route -n` + - Record the Gateway for later use. + + c. `cd /etc/network/interfaces.d` + + d. `vim 50-cloud-init.cfg` + - Cut the existing eth0 lines from this file in preparation for moving them to a new file in this same directory. + - Example 50-cloud-init.cfg now looks like: + ``` + auto lo + iface lo inet loopback + ``` + + e. `vim eth0.cfg (use .cfg if your interface name is not eth0)` + - Paste the eth0 lines cut from the 50-cloud-init.cfg file and add the following lines, indented 3 spaces: + ``` + up ip route add default via dev tab 1 + up ip rule add from >/32 tab 1 + up ip rule add to >/32 tab 1 + up ip route flush cache + ``` + - Example eth0.cfg + ``` + auto eth0 + iface eth0 inet dhcp + up ip route add default via 172.31.32.1 dev eth0 tab 1 + up ip rule add from 172.31.33.147/32 tab 1 + up ip rule add to 172.31.33.147/32 tab 1 + up ip route flush cache + ``` + + f. Repeat step `e` but for the second network interface: + - `cp eth0.cfg eth1.cfg` + - `vi eth1.cfg` + - Replace all instances of eth0 with eth1 + - Change to the one corresponding to eth1 + - Change ‘tab 1’ to ‘tab 2’ + - Example eth1.cfg + ``` + auto eth1 + iface eth1 inet dhcp + up ip route add default via 172.31.32.1 dev eth1 tab 2 + up ip rule add from 172.31.35.63/32 tab 2 + up ip rule add to 172.31.35.63/32 tab 2 + up ip route flush cache + ``` + g. `ifup eth1` + - Check to make sure eth1 came up and is working properly. If the eth0 interface becomes unusable, you should then be able to log in through eth1 to fix it. +3. Reboot your machine + +## Tests +If the configuration is working, you should be able to connect a "listener" process to the IP address and port for the client connections. Then from a different, client machine, you should be able to reach that port on that IP address, firewalls permitting. You should also be able to do the same thing for the node IP address and port. Netcat is ubiquitous and convenient for these tests. + +On the Validator: +``` +nc -l < client port> +``` +On the client machine: +``` +nc -v -z +``` +Expected result: +``` +Success! +``` +On the Validator: +``` +nc -l < node port> +``` +On the client machine: +``` +nc -v -z +``` +Expected result: +``` +Success! +``` +Other combinations should fail or not return. Note that in AWS, the netcat commands executed on the Validator should use the private IP address, and the netcat commands executed on the client should use the public IP (Elastic) address. + +Finally, remember to later modify firewalls to allow and deny traffic: + +On client IP address, allow: +- Port 22 from your home network(s). +- Port 9702 (or whatever you have configured for clients) from anywhere. + +On node IP address, allow +-Port 9701 (or whatever you have configured for inter-validator) from an allow list of other Validators. diff --git a/docs/source/index.rst b/docs/source/index.rst index b56ef6cb0..a5508c2e8 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -23,7 +23,12 @@ Welcome to Hyperledger Indy Node's documentation! indy-file-structure-guideline.md write-code-guideline.md setup-dev.md - + installation-and-configuration.md + configuring-2nics.md + NewNetwork/NewNetwork.md + NewNetwork/CLIInstall.md + NewNetwork/CreateDID.md + NewNetwork/CreateValidatorKeys.md diff --git a/docs/source/installation-and-configuration.md b/docs/source/installation-and-configuration.md new file mode 100644 index 000000000..bdb124505 --- /dev/null +++ b/docs/source/installation-and-configuration.md @@ -0,0 +1,418 @@ +# Installation and configuration of Indy-Node + +## 1. Introduction +The purpose of this document is to describe how to setup a production level Indy-Validator-Node and register it on an existing network using an `indy-cli` machine which you also configure along the way. This documentation is based heavily on the [Sovrin Steward Validator Preparation Guide v3](https://docs.google.com/document/d/18MNB7nEKerlcyZKof5AvGMy0GP9T82c4SWaxZkPzya4). + +For information on how to setup a new network, refer to [New Network](./NewNetwork/NewNetwork.md) + +## 2. Preliminaries to the Set Up +Before you start this process, you’ll need to gather a couple of things and make a few decisions. + +As you proceed through these steps, you will be generating data that will be needed later. As you follow the instructions and obtain the following, store them for later use: + +- Your Steward Seed + - This is extremely important, and it must be kept safe and private. It is used to generate the public / private key pair that you will use as a Steward to post transactions to the ledger. +- Your Steward distributed identity (DID) + - This is public information that is generated based on your Steward Seed. It is an identifier for your organization that will be written to the ledger by an Indy network Trustee. +- Your Steward verification key (verkey) + - This is public information that is generated based on your Steward Seed. It will be written to the ledger by an Indy network Trustee along with your DID, and will be used to verify transactions that you write to the ledger +- The Validator IP Address for inter-node communications + - This IP address must be configured for exclusive use of inter-node consensus traffic. Ideally, traffic to this address will be allow-listed in your firewall. +- The Validator node port +- The Validator IP Address for client connections + - This IP address must be open for connections from anywhere, since clients around the world will need to be able to connect to your node freely. +- The Validator client port +- The Validator alias + - A human readable name for your node. This value is case sensitive. +- The Validator node seed + - This is distinct from your Steward seed and will be used to generate public and private keys that your Validator node will use for communications with other Validators. Like the Steward Seed, it should be kept secure. +- The Validator Node Identifier + - This is distinct from your Steward verkey. It is also public information that will be placed on the ledger but is used as a public key by your Validator node, rather than by you, the Steward. +- The Validator BLS public key. + - Used by the Validator to sign individual transactions that will be committed to the ledger. It is public information that will be written to the ledger. +- The Validator BLS key proof-of-possession (pop) + - A cryptographic check against certain forgeries that can be done with BLS keys. + +### 2.1 Two Machines +You’ll need two machines: one is your Validator node and the other an `indy-cli` machine to run the `indy-cli` with which you will interact with the ledger. They can be physical machines, virtual machines, or a combination. The machine with the `indy-cli` can be turned on and off at your convenience (refer to [3.1. `indy-cli` Machine Installation](##3.1.-indy-cli-Machine-Installation) for more details), only the Validator node needs to be public and constantly running. + +>Important: for security reasons, you must not use your Validator node as an `indy-cli` client. If you do, it could expose your Steward credentials needlessly. + +Your Validator **must run Ubuntu 16.04 (64-bit)** as, _at the time of writing_, this is the only version that has a verified and validated release package. Work is actively being done on an Ubuntu 20.04 release. This guide presupposes that your `indy-cli` machine will run on Ubuntu as well. + +Your Validator node should have two NICs, each with associated IP addresses and ports. One NIC will be used for inter-validator communication, and the other for connections from clients, including Indy edge agents, as well as ssh and other connections you use for administration. This two NIC approach is required as a defense against denial-of-service attacks, since the NIC for inter-validator communications should be behind a firewall that is configured to be much more restrictive of inbound connections than the client-facing NIC is. + +It is currently possible to have just one NIC and IP address, as the transition for older Stewards to change to 2 NICs is ongoing. The inability to or delay of adding a second NIC will likely affect which network your node will be placed on. A resource that may help you to configure your node to use two NICs is described ind [Configuring a 2 NIC Node](./configuring-2nics.md) + +### 2.2 Validator Node Preliminary Information + +#### Get the IP Addresses +Your Validator node will be the machine that will become a part of an Indy network. It should have two static, publicly accessible, world routable IP addresses. It should be configured so that outgoing TCP/IP connections are from these same addresses, as well as incoming connections. + +Obtain IP addresses that meet this requirement. + +#### Choose Port Numbers +The Validator node will also be required to have the following: + +- Node Port: TCP + - The Validators use this IP address and port combination to communicate with each other. +- Client Port: TCP + - Clients use this IP address and port combination to communicate with the Validator node, to interact with the ledger. + +By convention, please choose ports 9701 and 9702 for your Node and Client ports, respectively. + +#### Choose an Alias: +Your Validator node will need to have an alias. This will be used later when we create a key for the node. It can be any convenient, unique name that you don’t mind the world seeing. It need not reference your company name; however it should be distinguishable from the other Validator nodes on the network. Many Stewards choose a Validator alias that identifies their organization, for pride of their contribution to the cause of self-sovereign identity. + + +## 3. Setup and Configuration + +Some instructions must be executed on the Validator node, and others on the `indy-cli` machine. The command line prompts in the instructions will help remind you which machine should be used for each command. + +### 3.1. `indy-cli` Machine Installation + +The following instructions describe how to install and configure the `indy-cli` directly on a machine or VM. The other, possibly more convenient, option is to use a containerized `indy-cli` environment like the one included with [von-network](https://github.com/bcgov/von-network). For information on how to use the containerized `indy-cli` in `von-network`, refer to [Using the containerized indy-cli](https://github.com/bcgov/von-network/blob/main/docs/Indy-CLI.md) + +#### 3.1.1. Install the `indy-cli` +On the machine you’ve chosen for the `indy-cli`, open a terminal and run the following lines to install the `indy-cli` package. + +``` +ubuntu@cli$ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 +ubuntu@cli$ sudo apt-get install -y software-properties-common python-software-properties +ubuntu@cli$ sudo add-apt-repository "deb https://repo.sovrin.org/sdk/deb xenial stable" +ubuntu@cli$ sudo add-apt-repository "deb https://repo.sovrin.org/deb xenial stable" +ubuntu@cli$ sudo apt-get update -y +ubuntu@cli$ sudo apt-get upgrade -y +ubuntu@cli$ sudo apt-get install -y indy-cli +``` + +#### 3.1.2. Add an Acceptance Mechanism +To write to an Indy Node Ledger, you’ll need to sign the Transaction Author Agreement (TAA). You can learn more about the TAA [here](https://github.com/hyperledger/indy-sdk/blob/master/docs/how-tos/transaction-author-agreement.md). This agreement is incorporated into the process of connecting to the node pool and requires an acceptance mechanism. For the `indy-cli`, the default mechanism is “For Session” and the following instructions are required to be able to use “For Session” for your `indy-cli`: + +Create a JSON config file containing your taaAcceptanceMechanism. (You can also add plugins to this config file, but for now just set it up as basic as possible.) + +This example cliconfig.json file contains the line that sets the AML: +```json +{ +"taaAcceptanceMechanism": "for_session" +} +``` + +To start the `indy-cli` using your new config file, run the following: +`ubuntu@cli$ indy-cli --config /cliconfig.json` + +Now all of the appropriate transactions will have an “Agreement Accepted” authorization attached to them during this `indy-cli` session. + +#### 3.1.3. Obtain the Genesis Files +Obtain the genesis transaction files for the Network with the following steps. For the sake of this documentation, we will use the genesis files from the Sovrin Networks. Information on how to create a genesis file can be found [here](./NewNetwork/NewNetwork.md). These files contain bootstrap information about some of the Validator nodes, which will be used by your `indy-cli` to connect to the networks. + +If you are at the `indy` prompt, please exit: + +`indy> exit` + +Most Stewards will currently be onboarded to the BuilderNet. Obtain the genesis transaction file for it: + +`ubuntu@cli$ cd ` + +`ubuntu@cli:~ $ curl -O https://raw.githubusercontent.com/sovrin-foundation/sovrin/master/sovrin/pool_transactions_builder_genesis` + +You will also want to obtain the genesis files for the StagingNet and MainNet, for the possibility of moving between networks: + +``` +ubuntu@cli:~ $ curl -O https://raw.githubusercontent.com/sovrin-foundation/sovrin/stable/sovrin/pool_transactions_sandbox_genesis +ubuntu@cli:~ $ curl -O https://raw.githubusercontent.com/sovrin-foundation/sovrin/stable/sovrin/pool_transactions_live_genesis +``` + +#### 3.1.4. Generate the Steward Key +Next, generate a Steward key using the `indy-cli` machine you just installed. This will be comprised of a public and private key pair, generated from a seed. Knowing your seed will allow you to regenerate the key on demand. To keep this secure, you will need to have a very secure seed that is not easy to guess. + +##### Generate a Seed +>WARNING: +You want to guard your seed well. The seed will be used to generate your public (verification) key as well as your secret private key. If your seed falls into the wrong hands, someone could regenerate your private key, and take over your identity on the ledger. Keys can be rotated, which can stop some of the damage, but damage will still have been done. + +>Note: +It is the same procedure as described in [CreateDID](./NewNetwork/CreateDID.md). + +In the terminal, run the following to install a good random string generator, and then use it to generate your seed: +``` +ubuntu@cli$ sudo apt install pwgen +ubuntu@cli$ pwgen -s 32 1 +``` +EXAMPLE: +``` +ubuntu@cli$ pwgen -s 32 -1 +ShahXae2ieG1uibeoraepa4eyu6mexei +``` + +>IMPORTANT: +Keep this seed in a safe place, such as an encrypted password manager or other secure location designated by your organization. You will need it later in this guide, as well as in the future for other Steward interactions with the ledger. + +##### Run the `indy-cli` and generate key +Next we run the `indy-cli` by entering: + +`ubuntu@cli$ indy-cli --config /cliconfig.json` + +In the command line, enter the following to create your pool configuration and your wallet locally. In these instructions, we use "buildernet" for the pool name and "buildernet_wallet" for the wallet name, although you may use other names of your choosing, if desired. The encrypted wallet will be used to store important information on this machine, such as your public and private keys. When creating your wallet, you will need to provide a "key" that is any string desired. It will be the encryption key of your local wallet. + +``` +indy> pool create buildernet gen_txn_file=pool_transactions_builder_genesis +indy> wallet create buildernet_wallet key +``` +Upon entering this command, you’ll see a prompt to enter your wallet key. Enter the key and hit enter. + +>IMPORTANT: +To be able to retain your wallet and not re-create it when you need it in the future, keep this wallet key in a secure location as well. + +Using the pool configuration and wallet you have created, connect to the pool and open the wallet: + +`indy> pool connect buildernet` + +When you connect to a Network with TAA enabled, you will be asked whether you want to view the Agreement. Type ‘y’ to accept to see the Agreement, then select ‘y’ again to accept the Agreement displayed. If you do not accept the agreement, then you will not be allowed to write to the Network. + +`indy> wallet open buildernet_wallet key` + +`` + +Using the seed that you generated with pwgen, place your public and private keys into your wallet. + +`indy> did new seed` + +`` + +The result should look something like this: + +`Did "DIDDIDDIDDIDDIDDIDDID" has been created with "~VERKEYVERKEYVERKEYVERKEY" verkey` + +>IMPORTANT: Save the “DID” and “verkey” portions of this. They are not secret, but they will be used when you are prompted to supply your Steward verkey and DID. + +### 3.2 Validator Node Installation +#### 3.2.1. Perform Network Test +This test is to confirm that your Validator node can connect with external devices. + +Note that the communication protocol used for both node and client connections is ZMQ. If your firewall uses deep packet inspection, be sure to allow this protocol bi-directionally. + +The tests in this section are to ensure your node's networking is operational, and that firewalls will allow TCP traffic to and from your IP addresses and ports. The assumptions are that for this stage of testing, you will be able to reach both sets of IP address/port combinations from an arbitrary client, but that later you will implement rules on your firewall restricting access to your node (inter-validator) IP address/port. + +##### 3.2.1.1 Test the node (inter-validator) connection to your Validator +Use netcat to listen on the "node" IP address and port of your Validator + +>IMPORTANT: +Many providers, such as AWS, use local, non-routable IP addresses on their nodes and then use NAT to translate these to public, routable IP addresses. If you are on such a system, use the local address to listen on, and the public address to ping with. + +`ubuntu@validator$ nc -l ` + +The above command will wait for a connection. On a system that can be used as a client, such as your `indy-cli` machine, do a TCP ping of that IP address and port: + +`ubuntu@cli$ nc -vz ` + +If the test is successful, the ping will return a "succeeded" message and the commands on both nodes will exit. + +##### 3.2.1.2 Test the client (edge agent) connection to your Validator +Repeat the above test on your Validator and a test client but using the Validator's "client" IP address and port. + +>Important: The “client” IP address referred to here is not the `indy-cli` machine’s IP address. Reminder: The Validator node has a node IP address for communications with other Validators and a “client” IP address for communications with edge agents (anything outside the Network of Validators). + +On your Validator: + +`ubuntu@validator$ nc -l ` + +On your client: + +`ubuntu@cli$ nc -vz ` + +If the test is successful, the ping will return a "succeeded" message and the commands on both nodes will exit. + +>IMPORTANT: +If your system uses NAT, the same approach should be used as above. + +##### 3.2.1.3 Test the connection from your node to another Validator on the BuilderNet +One of the Validator nodes on the BuilderNet is named "FoundationBuilder", which has a node IP address and port of 50.112.53.5 and 9701, respectively. On your Validator, make sure that your node is able to connect to this node on BuilderNet by TCP pinging its node IP address and port: + +``` +ubuntu@validator$ nc -vz 50.112.53.5 9701 +Connection to 50.112.53.5 9701 port [tcp/*] succeeded! +``` +When the above three tests are successful, you may proceed. + +#### 3.2.2 Install the Validator Node +Continue on your Validator node machine. + +>Important: You must use a login user with sudo privileges (**not root or indy**) to run these commands, unless otherwise indicated. + +``` +ubuntu@validator$ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 +ubuntu@validator$ sudo apt-get install -y software-properties-common +ubuntu@validator$ sudo add-apt-repository "deb https://repo.sovrin.org/deb xenial stable" +ubuntu@validator$ sudo apt update +ubuntu@validator$ sudo apt upgrade -y +ubuntu@validator$ sudo apt install -y sovrin +``` +#### 3.2.3 Create the Key for the Validator Node + +>IMPORTANT: +Many providers, such as AWS, use local, non-routable IP addresses on their nodes and then use NAT to translate these to public, routable IP addresses. If you are on such a system, use the local addresses for the init_indy_node command. + +Please run the following on the Validator before running `init_indy_node`. + +In the `/etc/indy/indy_config.py` file, change the Network name from “sandbox” (Sovrin StagingNet) to “net3” (Sovrin BuilderNet) (use sudo to edit the file or use `sudo sed -i -re "s/(NETWORK_NAME = ')\w+/\1net3/" /etc/indy/indy_config.py)` then run the following commands: +``` +sudo -i -u indy mkdir /var/lib/indy/net3 +cd /var/lib/indy/net3 +sudo curl -o domain_transactions_genesis https://raw.githubusercontent.com/sovrin-foundation/sovrin/master/sovrin/domain_transactions_builder_genesis +sudo curl -o pool_transactions_genesis https://raw.githubusercontent.com/sovrin-foundation/sovrin/master/sovrin/pool_transactions_builder_genesis +``` +Make sure that both genesis files are owned by `indy:indy` by running: + +`sudo chown indy:indy *` + +Enter the following where `` is the alias you chose for your Validator node machine and `, , and ` are the correct values for your Validator. + +>Note: The node IP and client IP addresses should be the LOCAL addresses for your node. + +`ubuntu@validator$ sudo -i -u indy init_indy_node ` + +You will see something like this: +``` +Node-stack name is Node19 +Client-stack name is Node19C +Generating keys for random seed b'FA7b1cc42Da11B8F4BC83990cECF63aD' +Init local keys for client-stack +Public key is a9abcd497631de182bb6f767ffb4921cdf83ffdb20e9d22e252883b4fc34bf2f +Verification key is 3d604d22c4bbfd55508a5a7e0008847bdeccd98a41acd048b500030020629ee1 +Init local keys for node-stack +Public key is a9abcd497631de182bb6f767ffb4921cdf83ffdb20e9d22e252883b4fc34bf2f +Verification key is bfede8c4581f03d16eb053450d103477c6e840e5682adc67dc948a177ab8bc9b +BLS Public key is 4kCWXzcEEzdh93rf3zhhDEeybLij7AwcE4NDewTf3LRdn8eoKBwufFcUyyvSJ4GfPpTQLuX6iHjQwnCCQx4sSpfnptCWzvFEdJnhNSt4tJMQ2EzjcL9ewRWi24QxAaCnwbm2BBGJXF7JjqFgMzGfuFXXHhGPX3UtdfAphrojk3A1sgq +Proof of possession for BLS key is QqPuAnjnkYcE51H11Tub12i7Yri3ZLHmEYtJuaH1NFYKZBLi87SXgC3tMHxw3LMxErnbFwJCSdJKbTb2aCVmGzqXQtVWSpTVEQCsaSm4SUZLbzWVoHNQqDJASRYNbHH2CqpR2MtntA4YNb2WixNSZNXFSdHMbB1yMQ7XUcZqtGHhcb +``` + +**Store the original command, the random seed, the verification key, the BLS public key, and the BLS key proof-of-possession (POP).** These are the keys for your Validator node (not to be confused with the keys for you in your Steward role). The Validator verification key and BLS key (with its POP) are public and will be published on the ledger. + +>The random seed should be protected from disclosure. + +### 3.3 Run the Technical Verification Script +>Note: +These steps are only required for becoming a Steward in Sovrin Networks. +However, you could incorporate this process into your own Network registration procedures. + +Download this script and set the execution flag on it: + +``` +ubuntu@validator$ cd ~ +ubuntu@validator$ curl -O https://raw.githubusercontent.com/sovrin-foundation/steward-tools/master/steward_tech_check.py +``` +`ubuntu@validator$ chmod +x steward_tech_check.py` + +Execute it, answering the questions that it asks. There are no wrong answers; please be honest. Questions that can be answered by scripting are automatically completed for you. + +`ubuntu@validator$ sudo ./steward_tech_check.py` + +After the script completes, copy the output beginning at '== Results for "A Steward MUST" ==', and send the results to the support team of the related network for review. + +### 3.4. Provide Information to Trustees +At this point you should have the following data available: + +- Your Steward verkey and DID +- The Validator ‘node IP address’ +- The Validator ‘client IP address’ +- The Validator ‘node port’ +- The Validator ‘client port’ +- The Validator alias +- The Validator verkey +- The BLS key and Proof of possession (pop) + +### 3.5. Add Node to a Pool + +>DO NOT proceed further with this document until your Steward DID and verkey (the public key) is on the ledger. + +#### 3.5.1 Configuration +After you have been informed that your public key has been placed onto the ledger of the Network, you may complete the configuration steps to activate your Validator node on that network. + +Things to verify before activating the node: +- `cat /etc/indy/indy_config.py` + - Ensure the network configuration is correct. +- `cat /etc/indy/indy.env` + - Verify the node alias and IPs +- `cat /var/lib/indy/net3/domain_transactions_genesis` + - Verify the file contains the correct content. +- `cat /var/lib/indy/net3/pool_transactions_genesis` + - Verify the file contains correct content. + +##### Make Sure Your Version Is Current +In some cases, some time may have passed before reaching this point. You should ensure that you have the current version of indy software installed before proceeding. On the Validator node, execute the following. + +Verify Versions +``` +dpkg -l | grep indy +``` + +##### Add Validator Node to Ledger +On your `indy-cli` machine, if you are not still on the `indy-cli` prompt, you will need to return to it. To get back to where you were, type `indy-cli --config /cliconfig.json`, connect to the network pool, designate the wallet to use (using the same wallet key as before), and enter the DID that was returned earlier, when you typed `did new seed` (then enter your seed) for your Steward user: + +``` +ubuntu@cli$ indy-cli --config /cliconfig.json +indy> pool connect buildernet +indy> wallet open buildernet_wallet key= +indy> did use +``` + +>Note: You may need to create a new wallet and run `did new seed` then enter `` instead, if you did not save your wallet or forgot your wallet key. + +If the connection is successful, enter the following, substituting the correct data as appropriate. An example follows. + +>Suggestion: Edit this in a text editor first, then copy and paste it into the `indy-cli`. Some editors will insert 'smart quotes' in place of regular ones. This will cause the command to fail. + +>IMPORTANT: +Many providers, such as AWS, use local, non-routable IP addresses on their nodes and then use NAT to translate these to public, routable IP addresses. If you are on such a system, use the routable public addresses for the ledger node command. + +``` +indy> ledger node target= node_ip= node_port= client_ip= client_port= alias= services=VALIDATOR blskey= blskey_pop= +``` + +Example: +``` +indy> ledger node target=4Tn3wZMNCvhSTXPcLinQDnHyj56DTLQtL61ki4jo2Loc node_ip=18.136.178.42 client_ip=18.136.178.42 node_port=9701 client_port=9702 services=VALIDATOR alias=Node19 blskey=4kCWXzcEEzdh93rf3zhhDEeybLij7AwcE4NDewTf3LRdn8eoKBwufFcUyyvSJ4GfPpTQLuX6iHjQwnCCQx4sSpfnptCWzvFEdJnhNSt4tJMQ2EzjcL9ewRWi24QxAaCnwbm2BBGJXF7JjqFgMzGfuFXXHhGPX3UtdfAphrojk3A1sgq blskey_pop=QqPuAnjnkYcE51H11Tub12i7Yri3ZLHmEYtJuaH1NFYKZBLi87SXgC3tMHxw3LMxErnbFwJCSdJKbTb2aCVmGzqXQtVWSpTVEQCsaSm4SUZLbzWVoHNQqDJASRYNbHH2CqpR2MtntA4YNb2WixNSZNXFSdHMbB1yMQ7XUcZqtGHhcb +``` + +>Suggestion: Save this command. You will use it again if you later move to another Network. + +#### 3.5.2. Enable the Service +In the Validator node: + +Return to the Validator node machine. + +Start the Validator service: + +`ubuntu@validator$ sudo systemctl start indy-node` + +Verify the start: + +`ubuntu@validator$ sudo systemctl status indy-node.service` + +Enable the service so that it will auto-restart when your node reboots: + +`ubuntu@validator$ sudo systemctl enable indy-node.service` + +### 3.6. See if the Node Is Working +If the setup is successful, your Validator node now connects to the Validator pool. + +In the Validator node: +`ubuntu@validator$ sudo validator-info` + +If your node is configured properly, you should see several nodes being selected as the primary or its backups, as in this example: + + England (1) + Singapore (3) + Virginia (4) + RFCU (5) + Canada (0) + Korea (2) + + +>Note: A ledger with a lot of transactions on it, like what often exists on the BuilderNet, can take a lot of time to sync up a new Validator node. If you don't get the right results for this test right away, try it again in a few minutes. + +To check that messages and connections are occurring normally you can run the following commands to follow the log file: +In the Validator node: + +`ubuntu@validator$ sudo tail -f /var/log/indy/net3/.log` + diff --git a/docs/source/start-nodes.md b/docs/source/start-nodes.md index 24c85887a..d3d1be71c 100644 --- a/docs/source/start-nodes.md +++ b/docs/source/start-nodes.md @@ -1,6 +1,6 @@ # Create a Network and Start Nodes -Please be aware that recommended way of starting a pool is to [use Docker](https://github.com/hyperledger/indy-node/blob/master/environment/docker/pool/README.md). +Please be aware that recommended way of starting a pool for development is to [use Docker](https://github.com/hyperledger/indy-node/blob/master/environment/docker/pool/README.md). In order to run your own Network, you need to do the following for each Node: 1. Install Indy Node diff --git a/sample/Network/DIDs.txt b/sample/Network/DIDs.txt new file mode 100644 index 000000000..1ac98944d --- /dev/null +++ b/sample/Network/DIDs.txt @@ -0,0 +1,71 @@ +newNetwork:indy> did new seed=000000000000000000000000Trustee1 + Did "V4SGRU86Z58d6TV7PBUe6f" has been created with "~CoRER63DVYnWZtK8uAzNbx" verkey + newNetwork:indy> did new seed=000000000000000000000000Trustee2 + Did "LnXR1rPnncTPZvRdmJKhJQ" has been created with "~RTBtVN3iwcFhbWZzohFTMi" verkey + newNetwork:indy> did new seed=000000000000000000000000Trustee3 + Did "PNQm3CwyXbN5e39Rw3dXYx" has been created with "~AHtGeRXtGjVfXALtXP9WiX" verkey + + + newNetwork:indy> did new seed=000000000000000000000000Steward1 + Did "Th7MpTaRZVRYnPiabds81Y" has been created with "~7TYfekw4GUagBnBVCqPjiC" verkey + newNetwork:indy> did new seed=000000000000000000000000Steward2 + Did "EbP4aYNeTHL6q385GuVpRV" has been created with "~RHGNtfvkgPEUQzQNtNxLNu" verkey + newNetwork:indy> did new seed=000000000000000000000000Steward3 + Did "4cU41vWW82ArfxJxHkzXPG" has been created with "~EMoPA6HrpiExVihsVfxD3H" verkey + newNetwork:indy> did new seed=000000000000000000000000Steward4 + Did "TWwCRQRZ2ZHMJFn9TzLp7W" has been created with "~UhP7K35SAXbix1kCQV4Upx" verkey + + + + + $ init_indy_node Steward1 0.0.0.0 9701 0.0.0.0 9702 + Node-stack name is Steward1 + Client-stack name is Steward1C + Generating keys for random seed b'88c26d3C92bc33Be077Bf22FCBa60E2A' + Init local keys for client-stack + Public key is DGM7x7SQvjKjfUa7VLo5gGCZ6WfKASo6Cp5uP3aBRf7j + Verification key is DJrzRm3ahRkz2pesFVtmH8wA3S3z63XayZgDvV21b4BF + Init local keys for node-stack + Public key is DGM7x7SQvjKjfUa7VLo5gGCZ6WfKASo6Cp5uP3aBRf7j + Verification key is DJrzRm3ahRkz2pesFVtmH8wA3S3z63XayZgDvV21b4BF + BLS Public key is 3k9aPxmqMYY4QQ3MK88Pot5QmqxfaaxuzMeGnKYT8j1Ds1Rcmq2zmjQmLMtKvBzC89E7yCQyiQ9HEDcGAZi6zmarMCQNkY9oYCAUVJGrZgxBE4a1oj7VYKw7zuGpMwsKLPGLcTGwpmX9LS6f5ykbazEwEgQRTiWj2epRKxZC87DLwbH + Proof of possession for BLS key is RT5vLkN7639sXwYMBWkuFnzSM7ezEb49ZZExf6htH1WBWyuYgJsRTqT71HWaizfFLi1zp63eNGKKVzzyMaETYoj8QoV3GejHeZzP7LydJQpHQ5VPuLW3NUy5BGH4Xt7RkCT5pUbwhjz6mwxXfGAtQot7kiMH18QrpcazAmHrFPXKe7 + + $ init_indy_node Steward2 0.0.0.0 9703 0.0.0.0 9704 + Node-stack name is Steward2 + Client-stack name is Steward2C + Generating keys for random seed b'Fa4F5cd101f891ca0Cfa4E02C9Bf1769' + Init local keys for client-stack + Public key is FF3Aq98cJ2QT5EDmtshfVkgyjm9dxJV7xbtFrtMQbKeD + Verification key is EQJ92vJVaAihejc9N2Yqy59L7ixVKMx2FgaXxD8F6vs7 + Init local keys for node-stack + Public key is FF3Aq98cJ2QT5EDmtshfVkgyjm9dxJV7xbtFrtMQbKeD + Verification key is EQJ92vJVaAihejc9N2Yqy59L7ixVKMx2FgaXxD8F6vs7 + BLS Public key is XvFCAC84AjEzcLFfdNQq17rGxheUbd95MCTkg8Bw3CNRR61isy5uNiqaoxZgNZac2MEvZoXX7Wk27YUMB9mc4XFdAHRJiVVs3UcB3giBuhbv4om6GjouGcKWYsFkffA4tvWPyeDDn5ifxZaJBDHVR4AHcvUNxFipGnEptFSDzayzBG + Proof of possession for BLS key is RXfySA7HWDh57hm3GRKqj1DcMPq66fLJHMzaN76U1XqdUaRTRmBtxgSREtEvudSNFL8woXJzqS7VnJehZNd8hXf4bipdBhJ4J7hzBwhpbXfsuH2yH6XExBrxyPCwyQ9K9RAQraHz2RTLhs8r93HNzjauUARbw5ADv2F42FW69kWbdR + + $ init_indy_node Steward3 0.0.0.0 9705 0.0.0.0 9706 + Node-stack name is Steward3 + Client-stack name is Steward3C + Generating keys for random seed b'A59c0EFB9cD7Eccdd4483a3BFbd36EB5' + Init local keys for client-stack + Public key is H3oLLToN9Wy1Yb9R9EMZXot8xCTnQLWMiSHRCpQm9fRD + Verification key is EktpqGnexWaiQyr9vcXDTgNYwqT8cxmAfnX8N7qWwEcC + Init local keys for node-stack + Public key is H3oLLToN9Wy1Yb9R9EMZXot8xCTnQLWMiSHRCpQm9fRD + Verification key is EktpqGnexWaiQyr9vcXDTgNYwqT8cxmAfnX8N7qWwEcC + BLS Public key is 13U7tXXXRTLeavMEQk7MqECuKkuFrHPAwidf2cVqhaJoABmHc4SBMXHVJJkc1pJNvjLu894UZ6pSt3aAYZ5nQrfkuqbBUEToWb5vZSLHTTNnznkzx5PStPFSZkYUuA4bYNLk5b8GbwrHFKjrjqzCdjEWs2hDipAmXfd9NBh3BTEwAxS + Proof of possession for BLS key is RHeKLLefbDdgBMpZ9AUrS8EHPDRnFXNiJ1z8LUgqPa8eUGyeRkAR2ppPkYqcLc9ekzG8cYZMTGx8y52sZ1q2QWqs3BYBH2i3H2WxL4icRq9Kj4kqs3BQadtPWBSq4vEaWTwwieuUXFYqpvk1ALCSNmS9NmMYXYyTL8uzrstviomjXm + + $ init_indy_node Steward4 0.0.0.0 9707 0.0.0.0 9708 + Node-stack name is Steward4 + Client-stack name is Steward4C + Generating keys for random seed b'F20fc06eab86A896A6Ae5D8AfEA46B68' + Init local keys for client-stack + Public key is GJVyfv4XXGHYDYmiifu8XmXyTx9jGb39hACRU23rT9Ww + Verification key is 8XFTwX3rHVUBddyruNTzKnBdbFqWz8eZPRasyySLD7Sv + Init local keys for node-stack + Public key is GJVyfv4XXGHYDYmiifu8XmXyTx9jGb39hACRU23rT9Ww + Verification key is 8XFTwX3rHVUBddyruNTzKnBdbFqWz8eZPRasyySLD7Sv + BLS Public key is oy7vASnhYAYo9fV1MzFSeCHEmyd2dQze6dmwWd5unwoySsA2UauUaKpV6QqwL9WQzQYRXZAoDT9jXGWwGFgCKWKVinFPj2TU5qsqAFt6PcXxQ7ZpBMEiUhQreqQv9BQsb7Upx9cNZKm4wKRyjCryX3TELb3xzz51wwsdeY8hduAKvb + Proof of possession for BLS key is QsrUH1e5zsdiEGij1NeY9S7CwzUdU2rzjskHNGHCQ8rtgYZyBC99MgRPzgkJHP86nWQUo2fSRvyWLQdBwvWfNtSqUBQgVScQPHg9CJXWWohWnzSP4ViBo8EEeGXEoP2NPeRnFCCfuhYAC7stZgBATFyvdFRwG58ws76qQQQsfDDHBV diff --git a/sample/Network/README.md b/sample/Network/README.md new file mode 100644 index 000000000..15177b8ce --- /dev/null +++ b/sample/Network/README.md @@ -0,0 +1,132 @@ +# Hands on Walkthrough + +This walkthrough goes through some of the detailed steps mentioned in [Setting up a New Network](../../docs/source/NewNetwork/NewNetwork.md) + +For the sake of simplicity this walkthrough runs all of the nodes on the local machine. As a result it uses the local python install version of some of the commands rather than the production level Debian package install version of the commands documented in the [Setting up a New Network](../../docs/source/NewNetwork/NewNetwork.md) guide. + +1. Open indy-cli by executing `indy-cli` + + Note that the command prompt changed to `indy>`. + +2. Create and open a wallet in the indy-cli. + + `indy>wallet create newNetwork key=key` + + If not already opened, open the wallet + `indy> wallet open newNetwork key=key` + +3. Create DIDs for Trustees. + + ``` + newNetwork:indy> did new seed=000000000000000000000000Trustee1 + Did "V4SGRU86Z58d6TV7PBUe6f" has been created with "~CoRER63DVYnWZtK8uAzNbx" verkey + newNetwork:indy> did new seed=000000000000000000000000Trustee2 + Did "LnXR1rPnncTPZvRdmJKhJQ" has been created with "~RTBtVN3iwcFhbWZzohFTMi" verkey + newNetwork:indy> did new seed=000000000000000000000000Trustee3 + Did "PNQm3CwyXbN5e39Rw3dXYx" has been created with "~AHtGeRXtGjVfXALtXP9WiX" verkey + ``` + + > Warning: The used seed if used twice will result in the same DID and keys! + +4. Create DIDs for Stewards + + For the sake of this tutorial all DIDs and Stewards are running on the same machine. Normally this would be independent machines and organizations. + + ``` + newNetwork:indy> did new seed=000000000000000000000000Steward1 + Did "Th7MpTaRZVRYnPiabds81Y" has been created with "~7TYfekw4GUagBnBVCqPjiC" verkey + newNetwork:indy> did new seed=000000000000000000000000Steward2 + Did "EbP4aYNeTHL6q385GuVpRV" has been created with "~RHGNtfvkgPEUQzQNtNxLNu" verkey + newNetwork:indy> did new seed=000000000000000000000000Steward3 + Did "4cU41vWW82ArfxJxHkzXPG" has been created with "~EMoPA6HrpiExVihsVfxD3H" verkey + newNetwork:indy> did new seed=000000000000000000000000Steward4 + Did "TWwCRQRZ2ZHMJFn9TzLp7W" has been created with "~UhP7K35SAXbix1kCQV4Upx" verkey + ``` +5. Create Validator Node keys + + > The seed will be randomly generated. As mentioned above with the seed you can recreate the key! + + ``` + $ init_indy_node Steward1 0.0.0.0 9701 0.0.0.0 9702 + Node-stack name is Steward1 + Client-stack name is Steward1C + Generating keys for random seed b'88c26d3C92bc33Be077Bf22FCBa60E2A' + Init local keys for client-stack + Public key is DGM7x7SQvjKjfUa7VLo5gGCZ6WfKASo6Cp5uP3aBRf7j + Verification key is DJrzRm3ahRkz2pesFVtmH8wA3S3z63XayZgDvV21b4BF + Init local keys for node-stack + Public key is DGM7x7SQvjKjfUa7VLo5gGCZ6WfKASo6Cp5uP3aBRf7j + Verification key is DJrzRm3ahRkz2pesFVtmH8wA3S3z63XayZgDvV21b4BF + BLS Public key is 3k9aPxmqMYY4QQ3MK88Pot5QmqxfaaxuzMeGnKYT8j1Ds1Rcmq2zmjQmLMtKvBzC89E7yCQyiQ9HEDcGAZi6zmarMCQNkY9oYCAUVJGrZgxBE4a1oj7VYKw7zuGpMwsKLPGLcTGwpmX9LS6f5ykbazEwEgQRTiWj2epRKxZC87DLwbH + Proof of possession for BLS key is RT5vLkN7639sXwYMBWkuFnzSM7ezEb49ZZExf6htH1WBWyuYgJsRTqT71HWaizfFLi1zp63eNGKKVzzyMaETYoj8QoV3GejHeZzP7LydJQpHQ5VPuLW3NUy5BGH4Xt7RkCT5pUbwhjz6mwxXfGAtQot7kiMH18QrpcazAmHrFPXKe7 + + $ init_indy_node Steward2 0.0.0.0 9703 0.0.0.0 9704 + Node-stack name is Steward2 + Client-stack name is Steward2C + Generating keys for random seed b'Fa4F5cd101f891ca0Cfa4E02C9Bf1769' + Init local keys for client-stack + Public key is FF3Aq98cJ2QT5EDmtshfVkgyjm9dxJV7xbtFrtMQbKeD + Verification key is EQJ92vJVaAihejc9N2Yqy59L7ixVKMx2FgaXxD8F6vs7 + Init local keys for node-stack + Public key is FF3Aq98cJ2QT5EDmtshfVkgyjm9dxJV7xbtFrtMQbKeD + Verification key is EQJ92vJVaAihejc9N2Yqy59L7ixVKMx2FgaXxD8F6vs7 + BLS Public key is XvFCAC84AjEzcLFfdNQq17rGxheUbd95MCTkg8Bw3CNRR61isy5uNiqaoxZgNZac2MEvZoXX7Wk27YUMB9mc4XFdAHRJiVVs3UcB3giBuhbv4om6GjouGcKWYsFkffA4tvWPyeDDn5ifxZaJBDHVR4AHcvUNxFipGnEptFSDzayzBG + Proof of possession for BLS key is RXfySA7HWDh57hm3GRKqj1DcMPq66fLJHMzaN76U1XqdUaRTRmBtxgSREtEvudSNFL8woXJzqS7VnJehZNd8hXf4bipdBhJ4J7hzBwhpbXfsuH2yH6XExBrxyPCwyQ9K9RAQraHz2RTLhs8r93HNzjauUARbw5ADv2F42FW69kWbdR + + $ init_indy_node Steward3 0.0.0.0 9705 0.0.0.0 9706 + Node-stack name is Steward3 + Client-stack name is Steward3C + Generating keys for random seed b'A59c0EFB9cD7Eccdd4483a3BFbd36EB5' + Init local keys for client-stack + Public key is H3oLLToN9Wy1Yb9R9EMZXot8xCTnQLWMiSHRCpQm9fRD + Verification key is EktpqGnexWaiQyr9vcXDTgNYwqT8cxmAfnX8N7qWwEcC + Init local keys for node-stack + Public key is H3oLLToN9Wy1Yb9R9EMZXot8xCTnQLWMiSHRCpQm9fRD + Verification key is EktpqGnexWaiQyr9vcXDTgNYwqT8cxmAfnX8N7qWwEcC + BLS Public key is 13U7tXXXRTLeavMEQk7MqECuKkuFrHPAwidf2cVqhaJoABmHc4SBMXHVJJkc1pJNvjLu894UZ6pSt3aAYZ5nQrfkuqbBUEToWb5vZSLHTTNnznkzx5PStPFSZkYUuA4bYNLk5b8GbwrHFKjrjqzCdjEWs2hDipAmXfd9NBh3BTEwAxS + Proof of possession for BLS key is RHeKLLefbDdgBMpZ9AUrS8EHPDRnFXNiJ1z8LUgqPa8eUGyeRkAR2ppPkYqcLc9ekzG8cYZMTGx8y52sZ1q2QWqs3BYBH2i3H2WxL4icRq9Kj4kqs3BQadtPWBSq4vEaWTwwieuUXFYqpvk1ALCSNmS9NmMYXYyTL8uzrstviomjXm + + $ init_indy_node Steward4 0.0.0.0 9707 0.0.0.0 9708 + Node-stack name is Steward4 + Client-stack name is Steward4C + Generating keys for random seed b'F20fc06eab86A896A6Ae5D8AfEA46B68' + Init local keys for client-stack + Public key is GJVyfv4XXGHYDYmiifu8XmXyTx9jGb39hACRU23rT9Ww + Verification key is 8XFTwX3rHVUBddyruNTzKnBdbFqWz8eZPRasyySLD7Sv + Init local keys for node-stack + Public key is GJVyfv4XXGHYDYmiifu8XmXyTx9jGb39hACRU23rT9Ww + Verification key is 8XFTwX3rHVUBddyruNTzKnBdbFqWz8eZPRasyySLD7Sv + BLS Public key is oy7vASnhYAYo9fV1MzFSeCHEmyd2dQze6dmwWd5unwoySsA2UauUaKpV6QqwL9WQzQYRXZAoDT9jXGWwGFgCKWKVinFPj2TU5qsqAFt6PcXxQ7ZpBMEiUhQreqQv9BQsb7Upx9cNZKm4wKRyjCryX3TELb3xzz51wwsdeY8hduAKvb + Proof of possession for BLS key is QsrUH1e5zsdiEGij1NeY9S7CwzUdU2rzjskHNGHCQ8rtgYZyBC99MgRPzgkJHP86nWQUo2fSRvyWLQdBwvWfNtSqUBQgVScQPHg9CJXWWohWnzSP4ViBo8EEeGXEoP2NPeRnFCCfuhYAC7stZgBATFyvdFRwG58ws76qQQQsfDDHBV + ``` + +6. Fill in the spreadsheet. + +7. Download the script from [https://github.com/sovrin-foundation/steward-tools/tree/master/create_genesis] and generate the genesis file. + + ``` + $ python genesis_from_files.py --trustees Trustees.csv --stewards Stewards.csv + DEBUG:root:new line check for file: ./pool_transactions_genesis + INFO:root:Starting ledger... + INFO:root:Recovering tree from transaction log + INFO:root:Recovered tree in 0.002561586000410898 seconds + DEBUG:root:new line check for file: ./domain_transactions_genesis + INFO:root:Starting ledger... + INFO:root:Recovering tree from transaction log + INFO:root:Recovered tree in 0.000322740000228805 seconds + ``` +8. Edit `/etc/indy/indy_config.py` and change the network name. + `NETWORK_NAME = "newNetwork"` + +9. `mkdir /var/lib/indy/newNetwork` + +10. `cp domain_transactions_genesis /var/lib/indy/newNetwork/ && cp pool_transactions_genesis /var/lib/indy/newNetwork/` + +11. Start the nodes: + + ``` + start_indy_node Steward1 0.0.0.0 9701 0.0.0.0 9702 + start_indy_node Steward2 0.0.0.0 9703 0.0.0.0 9704 + start_indy_node Steward3 0.0.0.0 9705 0.0.0.0 9706 + start_indy_node Steward4 0.0.0.0 9707 0.0.0.0 9708 + ``` \ No newline at end of file diff --git a/sample/Network/Stewards.csv b/sample/Network/Stewards.csv new file mode 100644 index 000000000..2a47429f3 --- /dev/null +++ b/sample/Network/Stewards.csv @@ -0,0 +1,5 @@ +Steward name,Validator alias,Node IP address,Node port,Client IP address,Client port,Validator verkey,Validator BLS key,Validator BLS POP,Steward DID,Steward verkey +Steward1,Steward1,0.0.0.0,9701,0.0.0.0,9702,DJrzRm3ahRkz2pesFVtmH8wA3S3z63XayZgDvV21b4BF,3k9aPxmqMYY4QQ3MK88Pot5QmqxfaaxuzMeGnKYT8j1Ds1Rcmq2zmjQmLMtKvBzC89E7yCQyiQ9HEDcGAZi6zmarMCQNkY9oYCAUVJGrZgxBE4a1oj7VYKw7zuGpMwsKLPGLcTGwpmX9LS6f5ykbazEwEgQRTiWj2epRKxZC87DLwbH,RT5vLkN7639sXwYMBWkuFnzSM7ezEb49ZZExf6htH1WBWyuYgJsRTqT71HWaizfFLi1zp63eNGKKVzzyMaETYoj8QoV3GejHeZzP7LydJQpHQ5VPuLW3NUy5BGH4Xt7RkCT5pUbwhjz6mwxXfGAtQot7kiMH18QrpcazAmHrFPXKe7,Th7MpTaRZVRYnPiabds81Y,~7TYfekw4GUagBnBVCqPjiC +Steward2,Steward2,0.0.0.0,9703,0.0.0.0,9704,EQJ92vJVaAihejc9N2Yqy59L7ixVKMx2FgaXxD8F6vs7,XvFCAC84AjEzcLFfdNQq17rGxheUbd95MCTkg8Bw3CNRR61isy5uNiqaoxZgNZac2MEvZoXX7Wk27YUMB9mc4XFdAHRJiVVs3UcB3giBuhbv4om6GjouGcKWYsFkffA4tvWPyeDDn5ifxZaJBDHVR4AHcvUNxFipGnEptFSDzayzBG,RXfySA7HWDh57hm3GRKqj1DcMPq66fLJHMzaN76U1XqdUaRTRmBtxgSREtEvudSNFL8woXJzqS7VnJehZNd8hXf4bipdBhJ4J7hzBwhpbXfsuH2yH6XExBrxyPCwyQ9K9RAQraHz2RTLhs8r93HNzjauUARbw5ADv2F42FW69kWbdR,EbP4aYNeTHL6q385GuVpRV,~RHGNtfvkgPEUQzQNtNxLNu +Steward3,Steward3,0.0.0.0,9705,0.0.0.0,9706,EktpqGnexWaiQyr9vcXDTgNYwqT8cxmAfnX8N7qWwEcC,13U7tXXXRTLeavMEQk7MqECuKkuFrHPAwidf2cVqhaJoABmHc4SBMXHVJJkc1pJNvjLu894UZ6pSt3aAYZ5nQrfkuqbBUEToWb5vZSLHTTNnznkzx5PStPFSZkYUuA4bYNLk5b8GbwrHFKjrjqzCdjEWs2hDipAmXfd9NBh3BTEwAxS,RHeKLLefbDdgBMpZ9AUrS8EHPDRnFXNiJ1z8LUgqPa8eUGyeRkAR2ppPkYqcLc9ekzG8cYZMTGx8y52sZ1q2QWqs3BYBH2i3H2WxL4icRq9Kj4kqs3BQadtPWBSq4vEaWTwwieuUXFYqpvk1ALCSNmS9NmMYXYyTL8uzrstviomjXm,4cU41vWW82ArfxJxHkzXPG,~EMoPA6HrpiExVihsVfxD3H +Steward4,Steward4,0.0.0.0,9707,0.0.0.0,97008,8XFTwX3rHVUBddyruNTzKnBdbFqWz8eZPRasyySLD7Sv,oy7vASnhYAYo9fV1MzFSeCHEmyd2dQze6dmwWd5unwoySsA2UauUaKpV6QqwL9WQzQYRXZAoDT9jXGWwGFgCKWKVinFPj2TU5qsqAFt6PcXxQ7ZpBMEiUhQreqQv9BQsb7Upx9cNZKm4wKRyjCryX3TELb3xzz51wwsdeY8hduAKvb,QsrUH1e5zsdiEGij1NeY9S7CwzUdU2rzjskHNGHCQ8rtgYZyBC99MgRPzgkJHP86nWQUo2fSRvyWLQdBwvWfNtSqUBQgVScQPHg9CJXWWohWnzSP4ViBo8EEeGXEoP2NPeRnFCCfuhYAC7stZgBATFyvdFRwG58ws76qQQQsfDDHBV,TWwCRQRZ2ZHMJFn9TzLp7W,~UhP7K35SAXbix1kCQV4Upx \ No newline at end of file diff --git a/sample/Network/Trustees.csv b/sample/Network/Trustees.csv new file mode 100644 index 000000000..59604b6ea --- /dev/null +++ b/sample/Network/Trustees.csv @@ -0,0 +1,4 @@ +Trustee name,Trustee DID,Trustee verkey +Trustee1,V4SGRU86Z58d6TV7PBUe6f,~CoRER63DVYnWZtK8uAzNbx +Trustee2,LnXR1rPnncTPZvRdmJKhJQ,~RTBtVN3iwcFhbWZzohFTMi +Trustee3,PNQm3CwyXbN5e39Rw3dXYx,~AHtGeRXtGjVfXALtXP9WiX \ No newline at end of file diff --git a/sample/Network/domain_transactions_genesis b/sample/Network/domain_transactions_genesis new file mode 100644 index 000000000..8f1fbbd77 --- /dev/null +++ b/sample/Network/domain_transactions_genesis @@ -0,0 +1,7 @@ +{"reqSignature":{},"txn":{"data":{"alias":"Trustee1","dest":"V4SGRU86Z58d6TV7PBUe6f","role":"0","verkey":"~CoRER63DVYnWZtK8uAzNbx"},"metadata":{},"type":"1"},"txnMetadata":{"seqNo":1},"ver":"1"} +{"reqSignature":{},"txn":{"data":{"alias":"Trustee2","dest":"LnXR1rPnncTPZvRdmJKhJQ","role":"0","verkey":"~RTBtVN3iwcFhbWZzohFTMi"},"metadata":{},"type":"1"},"txnMetadata":{"seqNo":2},"ver":"1"} +{"reqSignature":{},"txn":{"data":{"alias":"Trustee3","dest":"PNQm3CwyXbN5e39Rw3dXYx","role":"0","verkey":"~AHtGeRXtGjVfXALtXP9WiX"},"metadata":{},"type":"1"},"txnMetadata":{"seqNo":3},"ver":"1"} +{"reqSignature":{},"txn":{"data":{"dest":"Th7MpTaRZVRYnPiabds81Y","role":"2","verkey":"~7TYfekw4GUagBnBVCqPjiC"},"metadata":{"from":"PNQm3CwyXbN5e39Rw3dXYx"},"type":"1"},"txnMetadata":{"seqNo":4},"ver":"1"} +{"reqSignature":{},"txn":{"data":{"dest":"EbP4aYNeTHL6q385GuVpRV","role":"2","verkey":"~RHGNtfvkgPEUQzQNtNxLNu"},"metadata":{"from":"PNQm3CwyXbN5e39Rw3dXYx"},"type":"1"},"txnMetadata":{"seqNo":5},"ver":"1"} +{"reqSignature":{},"txn":{"data":{"dest":"4cU41vWW82ArfxJxHkzXPG","role":"2","verkey":"~EMoPA6HrpiExVihsVfxD3H"},"metadata":{"from":"PNQm3CwyXbN5e39Rw3dXYx"},"type":"1"},"txnMetadata":{"seqNo":6},"ver":"1"} +{"reqSignature":{},"txn":{"data":{"dest":"TWwCRQRZ2ZHMJFn9TzLp7W","role":"2","verkey":"~UhP7K35SAXbix1kCQV4Upx"},"metadata":{"from":"PNQm3CwyXbN5e39Rw3dXYx"},"type":"1"},"txnMetadata":{"seqNo":7},"ver":"1"} diff --git a/sample/Network/pool_transactions_genesis b/sample/Network/pool_transactions_genesis new file mode 100644 index 000000000..7ba1f6f71 --- /dev/null +++ b/sample/Network/pool_transactions_genesis @@ -0,0 +1,4 @@ +{"reqSignature":{},"txn":{"data":{"data":{"alias":"Steward1","blskey":"3k9aPxmqMYY4QQ3MK88Pot5QmqxfaaxuzMeGnKYT8j1Ds1Rcmq2zmjQmLMtKvBzC89E7yCQyiQ9HEDcGAZi6zmarMCQNkY9oYCAUVJGrZgxBE4a1oj7VYKw7zuGpMwsKLPGLcTGwpmX9LS6f5ykbazEwEgQRTiWj2epRKxZC87DLwbH","blskey_pop":"RT5vLkN7639sXwYMBWkuFnzSM7ezEb49ZZExf6htH1WBWyuYgJsRTqT71HWaizfFLi1zp63eNGKKVzzyMaETYoj8QoV3GejHeZzP7LydJQpHQ5VPuLW3NUy5BGH4Xt7RkCT5pUbwhjz6mwxXfGAtQot7kiMH18QrpcazAmHrFPXKe7","client_ip":"0.0.0.0","client_port":"9702","node_ip":"0.0.0.0","node_port":"9701","services":["VALIDATOR"]},"dest":"DJrzRm3ahRkz2pesFVtmH8wA3S3z63XayZgDvV21b4BF"},"metadata":{"from":"Th7MpTaRZVRYnPiabds81Y"},"type":"0"},"txnMetadata":{"seqNo":1,"txnId":"7e5d5f22d8c3c68c4532e0a336ba2db3276d12acf7820334bee49b5581d62277"},"ver":"1"} +{"reqSignature":{},"txn":{"data":{"data":{"alias":"Steward2","blskey":"XvFCAC84AjEzcLFfdNQq17rGxheUbd95MCTkg8Bw3CNRR61isy5uNiqaoxZgNZac2MEvZoXX7Wk27YUMB9mc4XFdAHRJiVVs3UcB3giBuhbv4om6GjouGcKWYsFkffA4tvWPyeDDn5ifxZaJBDHVR4AHcvUNxFipGnEptFSDzayzBG","blskey_pop":"RXfySA7HWDh57hm3GRKqj1DcMPq66fLJHMzaN76U1XqdUaRTRmBtxgSREtEvudSNFL8woXJzqS7VnJehZNd8hXf4bipdBhJ4J7hzBwhpbXfsuH2yH6XExBrxyPCwyQ9K9RAQraHz2RTLhs8r93HNzjauUARbw5ADv2F42FW69kWbdR","client_ip":"0.0.0.0","client_port":"9704","node_ip":"0.0.0.0","node_port":"9703","services":["VALIDATOR"]},"dest":"EQJ92vJVaAihejc9N2Yqy59L7ixVKMx2FgaXxD8F6vs7"},"metadata":{"from":"EbP4aYNeTHL6q385GuVpRV"},"type":"0"},"txnMetadata":{"seqNo":2,"txnId":"de75d0424859596a865ed9dab1eed1403849dc930dc591ada94d8cad09ecc9be"},"ver":"1"} +{"reqSignature":{},"txn":{"data":{"data":{"alias":"Steward3","blskey":"13U7tXXXRTLeavMEQk7MqECuKkuFrHPAwidf2cVqhaJoABmHc4SBMXHVJJkc1pJNvjLu894UZ6pSt3aAYZ5nQrfkuqbBUEToWb5vZSLHTTNnznkzx5PStPFSZkYUuA4bYNLk5b8GbwrHFKjrjqzCdjEWs2hDipAmXfd9NBh3BTEwAxS","blskey_pop":"RHeKLLefbDdgBMpZ9AUrS8EHPDRnFXNiJ1z8LUgqPa8eUGyeRkAR2ppPkYqcLc9ekzG8cYZMTGx8y52sZ1q2QWqs3BYBH2i3H2WxL4icRq9Kj4kqs3BQadtPWBSq4vEaWTwwieuUXFYqpvk1ALCSNmS9NmMYXYyTL8uzrstviomjXm","client_ip":"0.0.0.0","client_port":"9706","node_ip":"0.0.0.0","node_port":"9705","services":["VALIDATOR"]},"dest":"EktpqGnexWaiQyr9vcXDTgNYwqT8cxmAfnX8N7qWwEcC"},"metadata":{"from":"4cU41vWW82ArfxJxHkzXPG"},"type":"0"},"txnMetadata":{"seqNo":3,"txnId":"bab82b59bb4368ea7c3f7e828759d65af76c04a752845135419bd55d32d9a1ee"},"ver":"1"} +{"reqSignature":{},"txn":{"data":{"data":{"alias":"Steward4","blskey":"oy7vASnhYAYo9fV1MzFSeCHEmyd2dQze6dmwWd5unwoySsA2UauUaKpV6QqwL9WQzQYRXZAoDT9jXGWwGFgCKWKVinFPj2TU5qsqAFt6PcXxQ7ZpBMEiUhQreqQv9BQsb7Upx9cNZKm4wKRyjCryX3TELb3xzz51wwsdeY8hduAKvb","blskey_pop":"QsrUH1e5zsdiEGij1NeY9S7CwzUdU2rzjskHNGHCQ8rtgYZyBC99MgRPzgkJHP86nWQUo2fSRvyWLQdBwvWfNtSqUBQgVScQPHg9CJXWWohWnzSP4ViBo8EEeGXEoP2NPeRnFCCfuhYAC7stZgBATFyvdFRwG58ws76qQQQsfDDHBV","client_ip":"0.0.0.0","client_port":"97008","node_ip":"0.0.0.0","node_port":"9707","services":["VALIDATOR"]},"dest":"8XFTwX3rHVUBddyruNTzKnBdbFqWz8eZPRasyySLD7Sv"},"metadata":{"from":"TWwCRQRZ2ZHMJFn9TzLp7W"},"type":"0"},"txnMetadata":{"seqNo":4,"txnId":"29f95b62b7d162a819ec6973909f5549178d3dc8ef849ea39b440b45842fd177"},"ver":"1"} From 36c3f88354e3679c8b41daeea70e903e614b0b42 Mon Sep 17 00:00:00 2001 From: udosson Date: Thu, 13 Jan 2022 11:44:05 +0100 Subject: [PATCH 14/31] Indy-Test-Automation workflow Signed-off-by: udosson --- .github/workflows/build.yaml | 40 +- .github/workflows/indy_test_automation.yml | 1672 ++++++++++++++++++++ 2 files changed, 1711 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/indy_test_automation.yml diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 5f6ed3e0f..2ef2298e3 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -275,6 +275,8 @@ jobs: runs-on: ubuntu-20.04 container: image: ghcr.io/${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }}/node-build:ubuntu-16-04 + outputs: + INDY_NODE_PACKAGE_VERSION: ${{ steps.cache.outputs.pkgVer }} steps: - name: Check out code uses: actions/checkout@v1 @@ -422,4 +424,40 @@ jobs: with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} - skip_existing: true \ No newline at end of file + skip_existing: true + + trigger_indy-test-automation: + name: Trigger Indy Test Automation + runs-on: ubuntu-20.04 + needs: [ workflow-setup, publish_artifacts ] + ### run at the moment on ever push for testing purposes + if: needs.workflow-setup.outputs.isRC == 'true' + env: + GITHUB_REF: ${{ needs.workflow-setup.outputs.GITHUB_REF }} + INDY_NODE_PACKAGE_VERSION: ${{ needs.workflow-setup.outputs.INDY_NODE_PACKAGE_VERSION }} + steps: + - name: Check out code + uses: actions/checkout@v1 + + # TODO: Set ursaVersion, pyzmqVersion, ubuntuVersion, nodeRepoComponent, nodeSovrinRepoComponent, and clientSovrinRepoComponent dynamically + - name: Set versions + id: version + run: | + echo "Set version of Indy Plenum" + sed -i -r "s~indy-plenum==([0-9\.]+[0-9])(\.)?([a-z]+)~indy-plenum==\1\~\3~" setup.py + plenumVersion=$(grep -oP "(?<=indy-plenum==).*?(?=')" <<< "$(cat setup.py)") + echo "::set-output name=plenumVersion::${plenumVersion}" + + ### TODO: Needs do be adjusted to work with libindy-dev (not used at the moment) + echo "Set version of Indy SDK / libindy" + sed -i -r "s~python3-indy==([0-9\.]+[0-9])(\-)?([a-z]+)~python3-indy==\1\~\3~" setup.py + libIndyVersion=$(grep -oP "(?<=python3-indy==).*?(?=')" <<< "$(cat setup.py)")-xenial + echo "::set-output name=libIndyVersion::${libIndyVersion}" + + - name: Invoke workflow with inputs + uses: benc-uk/workflow-dispatch@v1 + with: + workflow: indy-test-automation + token: ${{ secrets.WORKFLOW_DISPATCH_TOKEN }} + inputs: '{ "nodeVersion": "1.13.0~dev197", "plenumVersion": "${{ steps.version.outputs.plenumVersion }}", "ursaVersion": "0.3.2-2", "pyzmqVersion": "18.1.0", "libIndyVersion": "1.15.0~1625-xenial", "ubuntuVersion": "ubuntu-1604", "nodeRepoComponent": "main", "nodeSovrinRepoComponent": "master", "clientSovrinRepoComponent": "master" }' + # inputs: '{ "nodeVersion": "${{ env.INDY_NODE_PACKAGE_VERSION }}", "plenumVersion": {{ steps.version.outputs.plenumVersion }}, "ursaVersion": "0.3.2-2", "pyzmqVersion": "18.1.0", "libIndyVersion": {{ steps.version.outputs.libIndyVersion }},, "ubuntuVersion": "ubuntu-1604", "nodeRepoComponent": "main", "nodeSovrinRepoComponent": "master", "clientSovrinRepoComponent": "master" }' diff --git a/.github/workflows/indy_test_automation.yml b/.github/workflows/indy_test_automation.yml new file mode 100644 index 000000000..3b0fd7d4f --- /dev/null +++ b/.github/workflows/indy_test_automation.yml @@ -0,0 +1,1672 @@ +name: indy-test-automation +on: + workflow_dispatch: + inputs: + nodeVersion: + description: 'Version of Indy Node' + required: true + plenumVersion: + description: 'Version of Indy Plenum' + required: true + ursaVersion: + description: 'Version of Ursa' + required: true + pyzmqVersion: + description: 'Version of PYZMQ' + required: true + default: "18.1.0" + libIndyVersion: + description: 'Version of Libindy' + required: true + ubuntuVersion: + description: 'Version of Ubuntu base image' + required: true + default: 'ubuntu-1604' + nodeRepoComponent: + description: 'Hyperledger Artifactory repository component of Indy-Node' + required: true + default: 'main' + nodeSovrinRepoComponent: + description: 'Sovrin repository component of dependcies of Indy-Node artifacts' + required: true + default: 'master' + clientSovrinRepoComponent: + description: 'Sovrin repository component of Indy SDK artifacts' + required: true + default: 'master' + + +env: + INPUT_NODEVERSION: ${{ github.event.inputs.nodeVersion }} + INPUT_PLENUMVERSION: ${{ github.event.inputs.plenumVersion }} + INPUT_URSAVERSION: ${{ github.event.inputs.ursaVersion }} + INPUT_PYZMQVERSION: ${{ github.event.inputs.pyzmqVersion }} + INPUT_LIBINDYVERSION: ${{ github.event.inputs.libIndyVersion}} + INPUT_UBUNTUVERSION: ${{ github.event.inputs.ubuntuVersion }} + INPUT_NODEREPOCOMPONENT: ${{ github.event.inputs.nodeRepoComponent }} + INPUT_NODESOVRINREPOCOMPONENT: ${{ github.event.inputs.nodeSovrinRepoComponent }} + INPUT_CLIENTSOVRINREPOCOMPONENT: ${{ github.event.inputs.clientSovrinRepoComponent }} + TEST_AUTOMATION_BRANCH: "main" + + +jobs: + workflow-setup: + name: Initialize Workflow + runs-on: ubuntu-latest + outputs: + GITHUB_REPOSITORY_NAME: ${{ steps.repository-name.outputs.lowercase }} + DIND_BUILD_ARG: ${{ steps.cache.outputs.DIND_BUILD_ARG}} + steps: + - name: Convert the GitHub repository name to lowercase + id: repository-name + uses: ASzc/change-string-case-action@v1 + with: + string: ${{ github.repository }} + + - name: Set outputs + id: cache + run: | + # Set variables according to version of ubuntu + if [[ "${{ env.INPUT_UBUNTUVERSION }}" == "ubuntu-1604" ]]; then + echo "::set-output name=DIND_BUILD_ARG::16.04" + echo "::set-output name=distribution::xenial" + fi + if [[ "${{ env.INPUT_UBUNTUVERSION }}" == "ubuntu-2004" ]]; then + echo "::set-output name=DIND_BUILD_ARG::20.04" + echo "::set-output name=distribution::focal" + fi + + dind-image: + name: Create DinD Image + needs: workflow-setup + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + DIND_BUILD_ARG: ${{ needs.workflow-setup.outputs.DIND_BUILD_ARG }} + steps: + - name: Git checkout teracyhq/docker-files + uses: actions/checkout@v2 + with: + repository: teracyhq/docker-files + + - name: Prepare image labels and tags + id: prep + shell: bash + run: | + DOCKER_IMAGE=ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/dind + TAGS="${DOCKER_IMAGE}:latest,${DOCKER_IMAGE}:${{ env.INPUT_UBUNTUVERSION }}" + echo ::set-output name=tags::${TAGS} + echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ') + + - name: Log into the GitHub Container Registry + if: steps.cache-image.outputs.cache-hit != 'true' + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx + + - name: Build and push image + uses: docker/build-push-action@v2 + with: + context: ./ubuntu/base + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.prep.outputs.tags }} + build-args: | + UBUNTU_VERSION=${{ env.DIND_BUILD_ARG }} + labels: | + org.opencontainers.image.source=${{ github.event.repository.html_url }} + org.opencontainers.image.created=${{ steps.prep.outputs.created }} + org.opencontainers.image.revision=${{ github.sha }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + + node-image: + name: Create Node Image + needs: workflow-setup + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Prepare image labels and tags + if: steps.cache-image.outputs.cache-hit != 'true' + id: prep + shell: bash + run: | + DOCKER_IMAGE=ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/node-${{ env.INPUT_UBUNTUVERSION }} + # TAGS="${DOCKER_IMAGE}:latest,${DOCKER_IMAGE}:${{ env.INPUT_UBUNTUVERSION }}" + TAGS="${DOCKER_IMAGE}:latest" + echo ::set-output name=tags::${TAGS} + echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ') + + - name: Set up Docker Buildx + if: steps.cache-image.outputs.cache-hit != 'true' + uses: docker/setup-buildx-action@v1 + + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx + + - name: Build and cache image + if: steps.cache-image.outputs.cache-hit != 'true' + uses: docker/build-push-action@v2 + with: + context: ./system_node_only/docker/node + file: ./system_node_only/docker/node/Dockerfile.${{ env.INPUT_UBUNTUVERSION }} + push: false + tags: ${{ steps.prep.outputs.tags }} + labels: | + org.opencontainers.image.source=${{ github.event.repository.html_url }} + org.opencontainers.image.created=${{ steps.prep.outputs.created }} + org.opencontainers.image.revision=${{ github.sha }} + build-args: | + NODE_REPO_COMPONENT=${{ env.INPUT_NODEREPOCOMPONENT }} + NODE_SOVRIN_REPO_COMPONENT=${{ env.INPUT_NODESOVRINREPOCOMPONENT}} + INDY_NODE_VERSION=${{ env.INPUT_NODEVERSION }} + INDY_PLENUM_VERSION=${{ env.INPUT_PLENUMVERSION }} + URSA_VERSION=${{ env.INPUT_URSAVERSION }} + PYTHON3_PYZMQ_VERSION=${{ env.INPUT_PYZMQVERSION }} + + outputs: type=docker,dest=/tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Upload node docker image artifacts + uses: actions/upload-artifact@v2 + with: + name: node_image + path: /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + retention-days: 1 + + - name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + + client-image: + name: Create Client Image + needs: [ workflow-setup, dind-image ] + runs-on: ubuntu-latest + env: + CACHE_KEY_CLIENT: ${{ needs.workflow-setup.outputs.CACHE_KEY_CLIENT }} + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + DIND_BUILD_ARG: ${{ needs.workflow-setup.outputs.DIND_BUILD_ARG }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Prepare image labels and tags + id: prep + shell: bash + run: | + DOCKER_IMAGE=ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/client + TAGS="${DOCKER_IMAGE}:latest,${DOCKER_IMAGE}:${{ env.INPUT_UBUNTUVERSION }}" + echo ::set-output name=tags::${TAGS} + echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ') + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx + + - name: Build and cache image + uses: docker/build-push-action@v2 + with: + context: ./system_node_only/docker/client/ + file: ./system_node_only/docker/client/Dockerfile.${{ env.INPUT_UBUNTUVERSION }} + push: false + tags: ${{ steps.prep.outputs.tags }} + labels: | + org.opencontainers.image.source=${{ github.event.repository.html_url }} + org.opencontainers.image.created=${{ steps.prep.outputs.created }} + org.opencontainers.image.revision=${{ github.sha }} + build-args: | + CLIENT_SOVRIN_REPO_COMPONENT=${{ env.INPUT_CLIENTSOVRINREPOCOMPONENT}} + LIBINDY_VERSION=${{ env.INPUT_LIBINDYVERSION}} + DIND_CONTAINER_REGISTRY=ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }} + DIND_IMAGE_NAME=dind:${{ env.INPUT_UBUNTUVERSION }} + outputs: type=docker,dest=/tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + + - name: Upload client docker image artifacts + uses: actions/upload-artifact@v2 + with: + name: client_image + path: /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + retention-days: 1 + + - name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + + ### 1 step in workflow per test + ### The tests rely on docker in docker with the fixed network name and fixed IP addresses. + ### That's why the tests cannot be run in matrix mode because all tests would share the same host and same docker engine. + test_consensus: + name: test_consensus + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: test_consensus + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/test_consensus.py "-l -v --junit-xml=test_consensus-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: test_consensus Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.test_consensus.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-test_consensus + path: test-result-indy-test-autmation-test_consensus.txt + retention-days: 5 + + test_freshness: + name: test_freshness + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: test_freshness + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/test_freshness.py "-l -v --junit-xml=test_freshness-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: test_freshness Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.test_freshness.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-test_freshness + path: test-result-indy-test-autmation-test_freshness.txt + retention-days: 5 + + test_ledger: + name: test_ledger + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: test_ledger + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/test_ledger.py "-l -v --junit-xml=test_ledger-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: test_ledger Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.test_ledger.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-test_ledger + path: test-result-indy-test-autmation-test_ledger.txt + retention-days: 5 + + test_off_ledger_signature: + name: test_off_ledger_signature + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: test_off_ledger_signature + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/test_off_ledger_signature.py "-l -v --junit-xml=test_off_ledger_signature-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: test_off_ledger_signature Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.test_off_ledger_signature.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-test_off_ledger_signature + path: test-result-indy-test-autmation-test_off_ledger_signature.txt + retention-days: 5 + + test_roles: + name: Test Roles + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: test_roles + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/test_roles.py "-l -v --junit-xml=test_roles-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: test_roles Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.test_roles.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-test_roles + path: test-result-indy-test-autmation-test_roles.txt + retention-days: 5 + + test_state_proof: + name: test_state_proof + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: test_state_proof + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/test_state_proof.py "-l -v --junit-xml=test_state_proof-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: test_state_proof Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.test_state_proof.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-test_state_proof + path: test-result-indy-test-autmation-test_state_proof.txt + retention-days: 5 + + test_vc: + name: test_vc + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: test_vc + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/test_vc.py "-l -v --junit-xml=test_vc-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: test_vc Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.test_vc.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-test_vc + path: test-result-indy-test-autmation-test_vc.txt + retention-days: 5 + + TestAdHocSuite: + name: TestAdHocSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestAdHocSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestAdHocSuite.py "-l -v --junit-xml=TestAdHocSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestAdHocSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestAdHocSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestAdHocSuite + path: test-result-indy-test-autmation-TestAdHocSuite.txt + retention-days: 5 + + TestAuditSuite: + name: TestAuditSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestAuditSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestAuditSuite.py "-l -v --junit-xml=TestAuditSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestAuditSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestAuditSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestAuditSuite + path: test-result-indy-test-autmation-TestAuditSuite.txt + retention-days: 5 + + TestAuthMapAttribSuite: + name: TestAuthMapAttribSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestAuthMapAttribSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestAuthMapAttribSuite.py "-l -v --junit-xml=TestAuthMapAttribSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestAuthMapAttribSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestAuthMapAttribSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestAuthMapAttribSuite + path: test-result-indy-test-autmation-TestAuthMapAttribSuite.txt + retention-days: 5 + + TestAuthMapCredDefSuite: + name: TestAuthMapCredDefSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestAuthMapCredDefSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestAuthMapCredDefSuite.py "-l -v --junit-xml=TestAuthMapCredDefSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestAuthMapCredDefSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestAuthMapCredDefSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestAuthMapCredDefSuite + path: test-result-indy-test-autmation-TestAuthMapCredDefSuite.txt + retention-days: 5 + + TestAuthMapMiscSuite: + name: TestAuthMapMiscSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestAuthMapMiscSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestAuthMapMiscSuite.py "-l -v --junit-xml=TestAuthMapMiscSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestAuthMapMiscSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestAuthMapMiscSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestAuthMapMiscSuite + path: test-result-indy-test-autmation-TestAuthMapMiscSuite.txt + retention-days: 5 + + TestAuthMapNymSuite: + name: TestAuthMapNymSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestAuthMapNymSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestAuthMapNymSuite.py "-l -v --junit-xml=TestAuthMapNymSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestAuthMapNymSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestAuthMapNymSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestAuthMapNymSuite + path: test-result-indy-test-autmation-TestAuthMapNymSuite.txt + retention-days: 5 + + TestAuthMapRevocRegDefSuite: + name: TestAuthMapRevocRegDefSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestAuthMapRevocRegDefSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestAuthMapRevocRegDefSuite.py "-l -v --junit-xml=TestAuthMapRevocRegDefSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestAuthMapRevocRegDefSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestAuthMapRevocRegDefSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestAuthMapRevocRegDefSuite + path: test-result-indy-test-autmation-TestAuthMapRevocRegDefSuite.txt + retention-days: 5 + + TestAuthMapRevocRegEntrySuite: + name: TestAuthMapRevocRegEntrySuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestAuthMapRevocRegEntrySuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestAuthMapRevocRegEntrySuite.py "-l -v --junit-xml=TestAuthMapRevocRegEntrySuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestAuthMapRevocRegEntrySuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestAuthMapRevocRegEntrySuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestAuthMapRevocRegEntrySuite + path: test-result-indy-test-autmation-TestAuthMapRevocRegEntrySuite.txt + retention-days: 5 + + TestAuthMapSchemaSuite: + name: TestAuthMapSchemaSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestAuthMapSchemaSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestAuthMapSchemaSuite.py "-l -v --junit-xml=TestAuthMapSchemaSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestAuthMapSchemaSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestAuthMapSchemaSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestAuthMapSchemaSuite + path: test-result-indy-test-autmation-TestAuthMapSchemaSuite.txt + retention-days: 5 + + TestAuthMapUpgradeSuite: + name: TestAuthMapUpgradeSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestAuthMapUpgradeSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestAuthMapUpgradeSuite.py "-l -v --junit-xml=TestAuthMapUpgradeSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestAuthMapUpgradeSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestAuthMapUpgradeSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestAuthMapUpgradeSuite + path: test-result-indy-test-autmation-TestAuthMapUpgradeSuite.txt + retention-days: 5 + + TestCatchUpSuite: + name: TestCatchUpSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestCatchUpSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestCatchUpSuite.py "-l -v --junit-xml=TestCatchUpSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestCatchUpSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestCatchUpSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestCatchUpSuite + path: test-result-indy-test-autmation-TestCatchUpSuite.txt + retention-days: 5 + + TestCatchUpSuiteExtended: + name: TestCatchUpSuiteExtended + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestCatchUpSuiteExtended + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestCatchUpSuiteExtended.py "-l -v --junit-xml=TestCatchUpSuiteExtended-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestCatchUpSuiteExtended Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestCatchUpSuiteExtended.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestCatchUpSuiteExtended + path: test-result-indy-test-autmation-TestCatchUpSuiteExtended.txt + retention-days: 5 + + TestConsensusSuite: + name: TestConsensusSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestConsensusSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestConsensusSuite.py "-l -v --junit-xml=TestConsensusSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestConsensusSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestConsensusSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestConsensusSuite + path: test-result-indy-test-autmation-TestConsensusSuite.txt + retention-days: 5 + + TestMultiSigSuite: + name: TestMultiSigSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestMultiSigSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestMultiSigSuite.py "-l -v --junit-xml=TestMultiSigSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestMultiSigSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestMultiSigSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestMultiSigSuite + path: test-result-indy-test-autmation-TestMultiSigSuite.txt + retention-days: 5 + + TestProductionSuite: + name: TestProductionSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestProductionSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestProductionSuite.py "-l -v --junit-xml=TestProductionSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestProductionSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestProductionSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestProductionSuite + path: test-result-indy-test-autmation-TestProductionSuite.txt + retention-days: 5 + + TestTAASuite: + name: TestTAASuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestTAASuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestTAASuite.py "-l -v --junit-xml=TestTAASuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestTAASuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestTAASuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestTAASuite + path: test-result-indy-test-autmation-TestTAASuite.txt + retention-days: 5 + + TestViewChangeSuite: + name: TestViewChangeSuite + needs: [ workflow-setup, node-image, client-image] + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_NAME: ${{ needs.workflow-setup.outputs.GITHUB_REPOSITORY_NAME }} + steps: + - name: Git checkout hyperledger/indy-test-automation + uses: actions/checkout@v2 + with: + repository: hyperledger/indy-test-automation + ref: ${{ env.TEST_AUTOMATION_BRANCH }} + + - name: Create docker network + run: | + docker network create --subnet="10.0.0.0/24" "indy-test-automation-network" + + - name: Download client artifact + uses: actions/download-artifact@v2 + with: + name: client_image + path: /tmp + + - name: Download node artifact + uses: actions/download-artifact@v2 + with: + name: node_image + path: /tmp + + - name: Load client and node image + run: | + docker load --input /tmp/client_image_${{ env.INPUT_UBUNTUVERSION }}.tar + docker load --input /tmp/node_image_${{ env.INPUT_UBUNTUVERSION }}.tar + + - name: test + id: TestViewChangeSuite + run: | + cd ./system_node_only/docker + sudo UBUNTU_VERSION="${{ env.INPUT_UBUNTUVERSION }}" IMAGE_REPOSITORY="ghcr.io/${{ env.GITHUB_REPOSITORY_NAME }}/" CLIENT_IMAGE="client:${{ env.INPUT_UBUNTUVERSION }}" NODE_IMAGE="node-${{ env.INPUT_UBUNTUVERSION }}" ./run.sh system_node_only/indy-node-tests/TestViewChangeSuite.py "-l -v --junit-xml=TestViewChangeSuite-report.xml" indy-test-automation-network + + - name: Publish Test Report + if: success() || failure() + uses: scacap/action-surefire-report@v1.0.7 + continue-on-error: true + with: + check_name: TestViewChangeSuite Test Report + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: "*-report.xml" + + - name: Upload Detailed Test Failure Results + # The test runner only emits the detailed test results if the tests fail. + if: (steps.TestViewChangeSuite.outcome == 'failure') && failure() + uses: actions/upload-artifact@v2 + with: + name: detailed-test-result-TestViewChangeSuite + path: test-result-indy-test-autmation-TestViewChangeSuite.txt + retention-days: 5 From 25f281410a30ec229f2a34f7862466f891913ed3 Mon Sep 17 00:00:00 2001 From: udosson Date: Fri, 14 Jan 2022 15:04:14 +0100 Subject: [PATCH 15/31] fixed typo in uploading detailed test failure results Signed-off-by: udosson --- .github/workflows/indy_test_automation.yml | 48 +++++++++++----------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/.github/workflows/indy_test_automation.yml b/.github/workflows/indy_test_automation.yml index 3b0fd7d4f..26547f1ea 100644 --- a/.github/workflows/indy_test_automation.yml +++ b/.github/workflows/indy_test_automation.yml @@ -334,7 +334,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-test_consensus - path: test-result-indy-test-autmation-test_consensus.txt + path: test-result-indy-test-automation-test_consensus.txt retention-days: 5 test_freshness: @@ -392,7 +392,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-test_freshness - path: test-result-indy-test-autmation-test_freshness.txt + path: test-result-indy-test-automation-test_freshness.txt retention-days: 5 test_ledger: @@ -450,7 +450,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-test_ledger - path: test-result-indy-test-autmation-test_ledger.txt + path: test-result-indy-test-automation-test_ledger.txt retention-days: 5 test_off_ledger_signature: @@ -508,7 +508,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-test_off_ledger_signature - path: test-result-indy-test-autmation-test_off_ledger_signature.txt + path: test-result-indy-test-automation-test_off_ledger_signature.txt retention-days: 5 test_roles: @@ -566,7 +566,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-test_roles - path: test-result-indy-test-autmation-test_roles.txt + path: test-result-indy-test-automation-test_roles.txt retention-days: 5 test_state_proof: @@ -624,7 +624,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-test_state_proof - path: test-result-indy-test-autmation-test_state_proof.txt + path: test-result-indy-test-automation-test_state_proof.txt retention-days: 5 test_vc: @@ -682,7 +682,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-test_vc - path: test-result-indy-test-autmation-test_vc.txt + path: test-result-indy-test-automation-test_vc.txt retention-days: 5 TestAdHocSuite: @@ -740,7 +740,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestAdHocSuite - path: test-result-indy-test-autmation-TestAdHocSuite.txt + path: test-result-indy-test-automation-TestAdHocSuite.txt retention-days: 5 TestAuditSuite: @@ -798,7 +798,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestAuditSuite - path: test-result-indy-test-autmation-TestAuditSuite.txt + path: test-result-indy-test-automation-TestAuditSuite.txt retention-days: 5 TestAuthMapAttribSuite: @@ -856,7 +856,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestAuthMapAttribSuite - path: test-result-indy-test-autmation-TestAuthMapAttribSuite.txt + path: test-result-indy-test-automation-TestAuthMapAttribSuite.txt retention-days: 5 TestAuthMapCredDefSuite: @@ -914,7 +914,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestAuthMapCredDefSuite - path: test-result-indy-test-autmation-TestAuthMapCredDefSuite.txt + path: test-result-indy-test-automation-TestAuthMapCredDefSuite.txt retention-days: 5 TestAuthMapMiscSuite: @@ -972,7 +972,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestAuthMapMiscSuite - path: test-result-indy-test-autmation-TestAuthMapMiscSuite.txt + path: test-result-indy-test-automation-TestAuthMapMiscSuite.txt retention-days: 5 TestAuthMapNymSuite: @@ -1030,7 +1030,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestAuthMapNymSuite - path: test-result-indy-test-autmation-TestAuthMapNymSuite.txt + path: test-result-indy-test-automation-TestAuthMapNymSuite.txt retention-days: 5 TestAuthMapRevocRegDefSuite: @@ -1088,7 +1088,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestAuthMapRevocRegDefSuite - path: test-result-indy-test-autmation-TestAuthMapRevocRegDefSuite.txt + path: test-result-indy-test-automation-TestAuthMapRevocRegDefSuite.txt retention-days: 5 TestAuthMapRevocRegEntrySuite: @@ -1146,7 +1146,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestAuthMapRevocRegEntrySuite - path: test-result-indy-test-autmation-TestAuthMapRevocRegEntrySuite.txt + path: test-result-indy-test-automation-TestAuthMapRevocRegEntrySuite.txt retention-days: 5 TestAuthMapSchemaSuite: @@ -1204,7 +1204,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestAuthMapSchemaSuite - path: test-result-indy-test-autmation-TestAuthMapSchemaSuite.txt + path: test-result-indy-test-automation-TestAuthMapSchemaSuite.txt retention-days: 5 TestAuthMapUpgradeSuite: @@ -1262,7 +1262,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestAuthMapUpgradeSuite - path: test-result-indy-test-autmation-TestAuthMapUpgradeSuite.txt + path: test-result-indy-test-automation-TestAuthMapUpgradeSuite.txt retention-days: 5 TestCatchUpSuite: @@ -1320,7 +1320,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestCatchUpSuite - path: test-result-indy-test-autmation-TestCatchUpSuite.txt + path: test-result-indy-test-automation-TestCatchUpSuite.txt retention-days: 5 TestCatchUpSuiteExtended: @@ -1378,7 +1378,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestCatchUpSuiteExtended - path: test-result-indy-test-autmation-TestCatchUpSuiteExtended.txt + path: test-result-indy-test-automation-TestCatchUpSuiteExtended.txt retention-days: 5 TestConsensusSuite: @@ -1436,7 +1436,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestConsensusSuite - path: test-result-indy-test-autmation-TestConsensusSuite.txt + path: test-result-indy-test-automation-TestConsensusSuite.txt retention-days: 5 TestMultiSigSuite: @@ -1494,7 +1494,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestMultiSigSuite - path: test-result-indy-test-autmation-TestMultiSigSuite.txt + path: test-result-indy-test-automation-TestMultiSigSuite.txt retention-days: 5 TestProductionSuite: @@ -1552,7 +1552,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestProductionSuite - path: test-result-indy-test-autmation-TestProductionSuite.txt + path: test-result-indy-test-automation-TestProductionSuite.txt retention-days: 5 TestTAASuite: @@ -1610,7 +1610,7 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestTAASuite - path: test-result-indy-test-autmation-TestTAASuite.txt + path: test-result-indy-test-automation-TestTAASuite.txt retention-days: 5 TestViewChangeSuite: @@ -1668,5 +1668,5 @@ jobs: uses: actions/upload-artifact@v2 with: name: detailed-test-result-TestViewChangeSuite - path: test-result-indy-test-autmation-TestViewChangeSuite.txt + path: test-result-indy-test-automation-TestViewChangeSuite.txt retention-days: 5 From 2afb24a440bb23e0e9b0a4919f8611c4b2016d85 Mon Sep 17 00:00:00 2001 From: pSchlarb Date: Mon, 29 Nov 2021 14:48:18 +0100 Subject: [PATCH 16/31] Removed pip imports Signed-off-by: pSchlarb --- .github/workflows/build/Dockerfile | 3 ++- .github/workflows/lint/Dockerfile | 1 - Jenkinsfile.ci | 2 +- ci/pipeline.groovy | 2 +- indy_node/__init__.py | 4 ++-- setup.py | 7 +++++-- 6 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build/Dockerfile b/.github/workflows/build/Dockerfile index a3496e381..13b0c043f 100644 --- a/.github/workflows/build/Dockerfile +++ b/.github/workflows/build/Dockerfile @@ -1,4 +1,5 @@ -FROM hyperledger/indy-core-baseci:0.0.3-master +FROM hyperledger/indy-core-baseci:0.0.4 + LABEL maintainer="Hyperledger " RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 \ diff --git a/.github/workflows/lint/Dockerfile b/.github/workflows/lint/Dockerfile index 4585a03eb..973b3e77b 100644 --- a/.github/workflows/lint/Dockerfile +++ b/.github/workflows/lint/Dockerfile @@ -14,7 +14,6 @@ RUN apt-get install -y \ python3-nacl RUN pip3 install -U \ - 'pip<10.0.0' \ setuptools \ pep8==1.7.1 \ pep8-naming==0.6.1 \ diff --git a/Jenkinsfile.ci b/Jenkinsfile.ci index 0e494780b..16479d1c0 100644 --- a/Jenkinsfile.ci +++ b/Jenkinsfile.ci @@ -73,7 +73,7 @@ def withTestEnv(body) { buildDocker("hyperledger/indy-node-ci", "ci/ubuntu.dockerfile ci").inside { echo 'Test: Install dependencies' - sh "pip install 'pip<10.0.0' 'pyzmq==18.1.0'" + sh "pip install 'pyzmq==18.1.0'" install() body.call('python') } diff --git a/ci/pipeline.groovy b/ci/pipeline.groovy index efeed1486..6cae713f5 100644 --- a/ci/pipeline.groovy +++ b/ci/pipeline.groovy @@ -148,7 +148,7 @@ def systemTests(Closure body) { def uid = sh(returnStdout: true, script: 'id -u').trim() docker.build("hyperledger/indy-node-ci", "--build-arg uid=$uid -f ci/ubuntu.dockerfile ci").inside { sh """ - pip install 'pip<10.0.0' 'pyzmq==18.1.0' + pip install 'pyzmq==18.1.0' pip install .[tests] >$pipLogName """ diff --git a/indy_node/__init__.py b/indy_node/__init__.py index 2d5ab7d62..e231f2a84 100644 --- a/indy_node/__init__.py +++ b/indy_node/__init__.py @@ -14,7 +14,7 @@ def setup_plugins(): import sys import os - import pip + import importlib_metadata import importlib # noqa from importlib.util import module_from_spec, spec_from_file_location # noqa: E402 from indy_common.config_util import getConfigOnce # noqa: E402 @@ -50,7 +50,7 @@ def find_and_load_plugin(plugin_name, plugin_root, installed_packages): format(plugin_root)) sys.path.insert(0, plugin_root.__path__[0]) enabled_plugins = config.ENABLED_PLUGINS - installed_packages = {p.project_name: p for p in pip.get_installed_distributions()} + installed_packages = set(p.metadata["Name"] for p in importlib_metadata.distributions()) for plugin_name in enabled_plugins: plugin = find_and_load_plugin(plugin_name, plugin_root, installed_packages) plugin_globals = plugin.__dict__ diff --git a/setup.py b/setup.py index ce64b29ac..38f9eccf5 100644 --- a/setup.py +++ b/setup.py @@ -27,8 +27,10 @@ BASE_DIR = os.path.join(os.path.expanduser("~"), ".indy") + tests_require = ['attrs==19.1.0', 'pytest==3.3.1', 'pytest-xdist==1.22.1', 'pytest-forked==0.2', - 'python3-indy==1.15.0-dev-1625', 'pytest-asyncio==0.8.0'] + 'python3-indy==1.16.0-dev-1636', 'pytest-asyncio==0.8.0'] + setup( name=metadata['__title__'], @@ -54,7 +56,8 @@ data_files=[( (BASE_DIR, ['data/nssm_original.exe']) )], - install_requires=['indy-plenum==1.13.0.dev169', + install_requires=['indy-plenum==1.13.0.dev178', + 'importlib-metadata<3.0', 'timeout-decorator==0.4.0', 'distro==1.3.0'], setup_requires=['pytest-runner'], From 0a1b2f8b7abecf3eccdbd6f2e2a824e42133c77a Mon Sep 17 00:00:00 2001 From: Lynn Bendixsen Date: Thu, 10 Feb 2022 11:50:30 -0700 Subject: [PATCH 17/31] Moved some steps into the correct order in the Hand on Walkthrough and added clarifying text to 2 other documents to help people with the steps. Signed-off-by: Lynn Bendixsen --- docs/source/NewNetwork/NewNetwork.md | 2 +- docs/source/installation-and-configuration.md | 2 ++ sample/Network/README.md | 18 +++++++++--------- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/docs/source/NewNetwork/NewNetwork.md b/docs/source/NewNetwork/NewNetwork.md index 821b03a5a..67b1c82fc 100644 --- a/docs/source/NewNetwork/NewNetwork.md +++ b/docs/source/NewNetwork/NewNetwork.md @@ -37,7 +37,7 @@ The Stewards must: 1. Generate Steward DIDs as described in [Creating DID](./CreateDID.md). 1. Install their node as described in [Installation and configuration of Indy-Node](../installation-and-configuration.md) (with some small adjustments): - 1. Determine a name for the new network and have the stewards substitute it in the appropriate places in the guide, such as when setting the network name and creating the directory when creating the keys for the node. + 1. Determine a name for the new network and have the stewards substitute it in the appropriate places in the guide, such as when setting the network name and creating the directory when creating the keys for the node. This step MUST be completed before running init_indy_node as part of step [3.2.3 Create theKey for the Validator Node](https://github.com/lynnbendixsen/indy-node/blob/master/docs/source/installation-and-configuration.md#323-create-the-key-for-the-validator-node). 1. They all need to stop at the normal place ([3.5. Add Node to a Pool](../installation-and-configuration.md#3.5.-Add-Node-to-a-Pool)) as instructed in the guide as the steps that follow differ when creating a new network. The following sections of this guide describe the steps required to start the new network. Once the Stewards have created their DID and Verkey, and performed the initial setup of they node, give the Stewards access to a spreadsheet like [this one](https://docs.google.com/spreadsheets/d/1LDduIeZp7pansd9deXeVSqGgdf0VdAHNMc7xYli3QAY/edit#gid=0) and have them fill out their own row of the Stewards sheet. The completed sheet will be used to generate the genesis transaction files for the network. diff --git a/docs/source/installation-and-configuration.md b/docs/source/installation-and-configuration.md index bdb124505..f2f6808a3 100644 --- a/docs/source/installation-and-configuration.md +++ b/docs/source/installation-and-configuration.md @@ -255,6 +255,7 @@ Many providers, such as AWS, use local, non-routable IP addresses on their nodes Please run the following on the Validator before running `init_indy_node`. In the `/etc/indy/indy_config.py` file, change the Network name from “sandbox” (Sovrin StagingNet) to “net3” (Sovrin BuilderNet) (use sudo to edit the file or use `sudo sed -i -re "s/(NETWORK_NAME = ')\w+/\1net3/" /etc/indy/indy_config.py)` then run the following commands: +NOTE: **This is where you would substitute the directory name of the new network if you were setting up a new network.** ``` sudo -i -u indy mkdir /var/lib/indy/net3 cd /var/lib/indy/net3 @@ -329,6 +330,7 @@ At this point you should have the following data available: After you have been informed that your public key has been placed onto the ledger of the Network, you may complete the configuration steps to activate your Validator node on that network. Things to verify before activating the node: +Note: If you are creating a new network, substitute the new networks directory name for 'net3' below. - `cat /etc/indy/indy_config.py` - Ensure the network configuration is correct. - `cat /etc/indy/indy.env` diff --git a/sample/Network/README.md b/sample/Network/README.md index 15177b8ce..a4699103d 100644 --- a/sample/Network/README.md +++ b/sample/Network/README.md @@ -42,7 +42,12 @@ For the sake of simplicity this walkthrough runs all of the nodes on the local m newNetwork:indy> did new seed=000000000000000000000000Steward4 Did "TWwCRQRZ2ZHMJFn9TzLp7W" has been created with "~UhP7K35SAXbix1kCQV4Upx" verkey ``` -5. Create Validator Node keys +5. Edit `/etc/indy/indy_config.py` and change the network name. + `NETWORK_NAME = "newNetwork"` + +6. `mkdir /var/lib/indy/newNetwork` + +7. Create Validator Node keys > The seed will be randomly generated. As mentioned above with the seed you can recreate the key! @@ -100,9 +105,9 @@ For the sake of simplicity this walkthrough runs all of the nodes on the local m Proof of possession for BLS key is QsrUH1e5zsdiEGij1NeY9S7CwzUdU2rzjskHNGHCQ8rtgYZyBC99MgRPzgkJHP86nWQUo2fSRvyWLQdBwvWfNtSqUBQgVScQPHg9CJXWWohWnzSP4ViBo8EEeGXEoP2NPeRnFCCfuhYAC7stZgBATFyvdFRwG58ws76qQQQsfDDHBV ``` -6. Fill in the spreadsheet. +8. Fill in the spreadsheet. -7. Download the script from [https://github.com/sovrin-foundation/steward-tools/tree/master/create_genesis] and generate the genesis file. +9. Download the script from [https://github.com/sovrin-foundation/steward-tools/tree/master/create_genesis] and generate the genesis file. ``` $ python genesis_from_files.py --trustees Trustees.csv --stewards Stewards.csv @@ -115,11 +120,6 @@ For the sake of simplicity this walkthrough runs all of the nodes on the local m INFO:root:Recovering tree from transaction log INFO:root:Recovered tree in 0.000322740000228805 seconds ``` -8. Edit `/etc/indy/indy_config.py` and change the network name. - `NETWORK_NAME = "newNetwork"` - -9. `mkdir /var/lib/indy/newNetwork` - 10. `cp domain_transactions_genesis /var/lib/indy/newNetwork/ && cp pool_transactions_genesis /var/lib/indy/newNetwork/` 11. Start the nodes: @@ -129,4 +129,4 @@ For the sake of simplicity this walkthrough runs all of the nodes on the local m start_indy_node Steward2 0.0.0.0 9703 0.0.0.0 9704 start_indy_node Steward3 0.0.0.0 9705 0.0.0.0 9706 start_indy_node Steward4 0.0.0.0 9707 0.0.0.0 9708 - ``` \ No newline at end of file + ``` From 3194c1f5ea6492a275fd41855d4425c80f1425f5 Mon Sep 17 00:00:00 2001 From: Lynn Bendixsen Date: Thu, 10 Feb 2022 12:14:30 -0700 Subject: [PATCH 18/31] Corrected initial checkin with consistency and typo and line feeds. Signed-off-by: Lynn Bendixsen --- docs/source/NewNetwork/NewNetwork.md | 2 +- docs/source/installation-and-configuration.md | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/NewNetwork/NewNetwork.md b/docs/source/NewNetwork/NewNetwork.md index 67b1c82fc..9e74fc546 100644 --- a/docs/source/NewNetwork/NewNetwork.md +++ b/docs/source/NewNetwork/NewNetwork.md @@ -37,7 +37,7 @@ The Stewards must: 1. Generate Steward DIDs as described in [Creating DID](./CreateDID.md). 1. Install their node as described in [Installation and configuration of Indy-Node](../installation-and-configuration.md) (with some small adjustments): - 1. Determine a name for the new network and have the stewards substitute it in the appropriate places in the guide, such as when setting the network name and creating the directory when creating the keys for the node. This step MUST be completed before running init_indy_node as part of step [3.2.3 Create theKey for the Validator Node](https://github.com/lynnbendixsen/indy-node/blob/master/docs/source/installation-and-configuration.md#323-create-the-key-for-the-validator-node). + 1. Determine a name for the new network and have the stewards substitute it in the appropriate places in the guide, such as when setting the network name and creating the directory when creating the keys for the node. This step MUST be completed before running init_indy_node as part of step [3.2.3 Create the Key for the Validator Node](https://github.com/lynnbendixsen/indy-node/blob/master/docs/source/installation-and-configuration.md#323-create-the-key-for-the-validator-node). 1. They all need to stop at the normal place ([3.5. Add Node to a Pool](../installation-and-configuration.md#3.5.-Add-Node-to-a-Pool)) as instructed in the guide as the steps that follow differ when creating a new network. The following sections of this guide describe the steps required to start the new network. Once the Stewards have created their DID and Verkey, and performed the initial setup of they node, give the Stewards access to a spreadsheet like [this one](https://docs.google.com/spreadsheets/d/1LDduIeZp7pansd9deXeVSqGgdf0VdAHNMc7xYli3QAY/edit#gid=0) and have them fill out their own row of the Stewards sheet. The completed sheet will be used to generate the genesis transaction files for the network. diff --git a/docs/source/installation-and-configuration.md b/docs/source/installation-and-configuration.md index f2f6808a3..17793fa8f 100644 --- a/docs/source/installation-and-configuration.md +++ b/docs/source/installation-and-configuration.md @@ -254,8 +254,8 @@ Many providers, such as AWS, use local, non-routable IP addresses on their nodes Please run the following on the Validator before running `init_indy_node`. -In the `/etc/indy/indy_config.py` file, change the Network name from “sandbox” (Sovrin StagingNet) to “net3” (Sovrin BuilderNet) (use sudo to edit the file or use `sudo sed -i -re "s/(NETWORK_NAME = ')\w+/\1net3/" /etc/indy/indy_config.py)` then run the following commands: -NOTE: **This is where you would substitute the directory name of the new network if you were setting up a new network.** +In the `/etc/indy/indy_config.py` file, change the Network name from “sandbox” (Sovrin StagingNet) to “net3” (Sovrin BuilderNet) (use sudo to edit the file or use `sudo sed -i -re "s/(NETWORK_NAME = ')\w+/\1net3/" /etc/indy/indy_config.py)` then run the following commands: +Note: **This is where you would substitute the directory name of the new network if you were setting up a new network.** ``` sudo -i -u indy mkdir /var/lib/indy/net3 cd /var/lib/indy/net3 @@ -329,7 +329,7 @@ At this point you should have the following data available: #### 3.5.1 Configuration After you have been informed that your public key has been placed onto the ledger of the Network, you may complete the configuration steps to activate your Validator node on that network. -Things to verify before activating the node: +Things to verify before activating the node: Note: If you are creating a new network, substitute the new networks directory name for 'net3' below. - `cat /etc/indy/indy_config.py` - Ensure the network configuration is correct. From 7229c7a8dd9036d26e44e5072f6cbbac9385186a Mon Sep 17 00:00:00 2001 From: Wade Barnes Date: Fri, 27 May 2022 13:27:06 -0700 Subject: [PATCH 19/31] Update setup_iptables script - Retain overall connection limit - Limit the number of connections per IP address - Rate limit the connections per IP address. - Add support for deleting (-d) the rules added by the script. - Update usage and warning documentation. Signed-off-by: Wade Barnes --- scripts/setup_iptables | 185 ++++++++++++++++++++++++++++++++++------- 1 file changed, 157 insertions(+), 28 deletions(-) diff --git a/scripts/setup_iptables b/scripts/setup_iptables index ea6e8c326..a94428f46 100755 --- a/scripts/setup_iptables +++ b/scripts/setup_iptables @@ -1,49 +1,178 @@ #!/bin/bash -if [ $# -lt 2 ]; then - echo "" - echo "Usage: $0 client_port connlimit"; - echo " client_port - node client port"; - echo " connlimit - clients connections limit"; - echo "" - exit 1; -fi +usage () { + cat <<-EOF + + Usage: + + $0 [-d] [per_ip_connlimit] [conn_rate_period] [conn_rate_limit] + + Options: + -d - Delete the corresponding rules. + Removes the rules corresponding to the supplied input parameters. + + Input Parameters: + client_port - Required. The node's client port. + overall_connlimit - Required. The overall connection limit for all clients. + per_ip_connlimit - Optional. The connection limit per IP address; defaults to 3. + conn_rate_period - Optional. The period for connection rate limiting; defaults to 60 seconds. + conn_rate_limit - Optional. The connection limit for connection rate limiting; default to 10. + + Example: + $0 9701 300 + +EOF + exit 1 +} + +check_setup () { + cat <<-EOF + + Warning: iptables and/or iptables-persistent is not installed, or permission denied. Client connections limit is not set. + + Please ensure iptables and iptables-persistent are both installed and iptables-persistent is enabled, and try running with sudo. + # To install iptables-persistent: + sudo apt-get install -y iptables-persistent + + # Make sure services are enabled on Debian or Ubuntu using the systemctl command: + sudo systemctl is-enabled netfilter-persistent.service + + # If not enable it: + sudo systemctl enable netfilter-persistent.service + + # Get status: + sudo systemctl status netfilter-persistent.service + +EOF + exit 1 +} -DPORT=$1 -CONN_LIMIT=$2 LOG_CHAIN=LOG_CONN_REJECT +OPERATION="add_rule" + +while getopts dh FLAG; do + case $FLAG in + d) + OPERATION="delete_rule" + DELETE=1 + ;; + h) + usage + ;; + \?) + usage + ;; + esac +done +shift $((OPTIND-1)) -add_rule_if_not_exist() -{ - RULE="$1" +DPORT=${1} +OVER_ALL_CONN_LIMIT=${2} + +# Default to 3 connections per IP. +CONN_LIMIT_PER_IP=${3:-3} + +# Default: Allow an IP to make up to 10 connection attempts every 100 seconds. +CONN_RATE_LIMIT_PERIOD=${4:-60} +CONN_RATE_LIMIT_LIMIT=${5:-10} + +add() { + if [ -z ${DELETE} ]; then + return 0 + else + return 1 + fi +} - cmd="iptables -C $RULE 2>/dev/null 1>&2" +delete() { + if [ ! -z ${DELETE} ]; then + return 0 + else + return 1 + fi +} + +rule_exists() { + RULE="${1}" + cmd="iptables -C ${RULE} 2>/dev/null 1>&2" + # echo $cmd eval $cmd + rtnCd=$? + if (( ${rtnCd} == 0 )); then + return 0 + else + return 1 + fi +} - if [ $? -eq 1 ]; then - cmd="iptables -A $RULE" +add_rule() { + RULE="${1}" + if ! rule_exists "${RULE}"; then + cmd="iptables -A ${RULE}" + # echo $cmd eval $cmd fi } +delete_rule() { + RULE="${1}" + if rule_exists "${RULE}"; then + cmd="iptables -D ${RULE}" + # echo $cmd + eval $cmd + fi +} + +save_rules() { + su -c "iptables-save > /etc/iptables/rules.v4 && ip6tables-save > /etc/iptables/rules.v6" +} + +if [ $# -lt 2 ]; then + usage +fi + # Check whether iptables installed and works -dpkg -s iptables 2>/dev/null 1>&2 && iptables -nL 2>/dev/null 1>&2 +dpkg -s iptables 2>/dev/null 1>&2 && iptables -nL 2>/dev/null 1>&2 && dpkg -s iptables-persistent 2>/dev/null 1>&2 if [ $? -eq 0 ]; then - # Create logging chain for rejected connections - iptables -N $LOG_CHAIN 2>/dev/null 1>&2 + + if add; then + echo "Adding iptable rules ..." + # Create logging chain for rejected connections + iptables -N ${LOG_CHAIN} 2>/dev/null 1>&2 + else + echo "Removing iptable rules ..." + fi # Append a rule that sets log level and log prefix - RULE="$LOG_CHAIN -j LOG --log-level warning --log-prefix \"connlimit: \"" - add_rule_if_not_exist "$RULE" + RULE="${LOG_CHAIN} -j LOG --log-level warning --log-prefix \"connlimit: \"" + ${OPERATION} "${RULE}" # Append a rule that finally rejects connection - RULE="$LOG_CHAIN -p tcp -j REJECT --reject-with tcp-reset" - add_rule_if_not_exist "$RULE" + RULE="${LOG_CHAIN} -p tcp -j REJECT --reject-with tcp-reset" + ${OPERATION} "${RULE}" + + # Append a rule to limit the total number of simultaneous client connections + RULE="INPUT -p tcp --syn --dport ${DPORT} -m connlimit --connlimit-above ${OVER_ALL_CONN_LIMIT} --connlimit-mask 0 -j ${LOG_CHAIN}" + ${OPERATION} "${RULE}" - # Append a rule to limit the number of simultaneous clients connections - RULE="INPUT -p tcp --syn --dport $DPORT -m connlimit --connlimit-above $CONN_LIMIT --connlimit-mask 0 -j $LOG_CHAIN" - add_rule_if_not_exist "$RULE" + # Append a rule to limit the number connections per IP address + RULE="INPUT -p tcp -m tcp --dport ${DPORT} --tcp-flags FIN,SYN,RST,ACK SYN -m connlimit --connlimit-above ${CONN_LIMIT_PER_IP} --connlimit-mask 32 --connlimit-saddr -j ${LOG_CHAIN}" + ${OPERATION} "${RULE}" + + # Append rules to rate limit connections + RULE="INPUT -p tcp -m tcp --dport ${DPORT} -m conntrack --ctstate NEW -m recent --set --name DEFAULT --mask 255.255.255.255 --rsource" + ${OPERATION} "${RULE}" + RULE="INPUT -p tcp -m tcp --dport ${DPORT} -m conntrack --ctstate NEW -m recent --update --seconds ${CONN_RATE_LIMIT_PERIOD} --hitcount ${CONN_RATE_LIMIT_LIMIT} --name DEFAULT --mask 255.255.255.255 --rsource -j ${LOG_CHAIN}" + ${OPERATION} "${RULE}" + + if delete; then + # Remove logging chain for rejected connections + iptables -X ${LOG_CHAIN} 2>/dev/null 1>&2 + fi + + # Save the rules + save_rules else - echo "Warning: iptables is not installed or permission denied, clients connections limit is not set." -fi + check_setup +fi \ No newline at end of file From 656b797d17ee679d9793027f343813bc6dedf40d Mon Sep 17 00:00:00 2001 From: Wade Barnes Date: Mon, 13 Jun 2022 12:34:43 -0700 Subject: [PATCH 20/31] Update setup_iptables - Add support for disabling IPv6. Signed-off-by: Wade Barnes --- scripts/setup_iptables | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/scripts/setup_iptables b/scripts/setup_iptables index a94428f46..e453a96d9 100755 --- a/scripts/setup_iptables +++ b/scripts/setup_iptables @@ -128,6 +128,33 @@ save_rules() { su -c "iptables-save > /etc/iptables/rules.v4 && ip6tables-save > /etc/iptables/rules.v6" } +disable_ipv6() { + echo "Disabling IPv6 ..." + ip6_conf_file="/etc/sysctl.d/60-custom-disable-ipv6.conf" + mkdir -p ${ip6_conf_file%/*} + + cat <<-EOF > ${ip6_conf_file} +net.ipv6.conf.all.disable_ipv6 = 1 +net.ipv6.conf.default.disable_ipv6 = 1 +net.ipv6.conf.lo.disable_ipv6 = 1 +EOF + + sysctl -p + systemctl restart procps +} + +enable_ipv6() { + echo "Enabling IPv6 ..." + ip6_conf_file="/etc/sysctl.d/60-custom-disable-ipv6.conf" + + if [ -f ${ip6_conf_file} ]; then + rm ${ip6_conf_file} + fi + sysctl -p + systemctl restart procps +} + + if [ $# -lt 2 ]; then usage fi @@ -173,6 +200,12 @@ if [ $? -eq 0 ]; then # Save the rules save_rules + + if add; then + disable_ipv6 + else + enable_ipv6 + fi else check_setup fi \ No newline at end of file From 69374ee9f9cc05fc31ddd3b5aa4d19a2194cafca Mon Sep 17 00:00:00 2001 From: Wade Barnes Date: Wed, 3 Aug 2022 14:08:29 -0700 Subject: [PATCH 21/31] Update setup_iptables script - Adjust defaults based on testing. - Remove default logging rule. - Add support for setting an explicit logging level. Recommend debug. Signed-off-by: Wade Barnes --- scripts/setup_iptables | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/scripts/setup_iptables b/scripts/setup_iptables index e453a96d9..3c387b688 100755 --- a/scripts/setup_iptables +++ b/scripts/setup_iptables @@ -14,12 +14,15 @@ usage () { Input Parameters: client_port - Required. The node's client port. overall_connlimit - Required. The overall connection limit for all clients. - per_ip_connlimit - Optional. The connection limit per IP address; defaults to 3. + per_ip_connlimit - Optional. The connection limit per IP address; defaults to 10. conn_rate_period - Optional. The period for connection rate limiting; defaults to 60 seconds. - conn_rate_limit - Optional. The connection limit for connection rate limiting; default to 10. + conn_rate_limit - Optional. The connection limit for connection rate limiting; default to 20. + logging_level - Optional. If used, this should be set to a level such as 'debug' so they can + easily be filtered from the logs and included only as needed. + Default is no logging. Example: - $0 9701 300 + $0 9702 15000 EOF exit 1 @@ -70,12 +73,14 @@ shift $((OPTIND-1)) DPORT=${1} OVER_ALL_CONN_LIMIT=${2} -# Default to 3 connections per IP. -CONN_LIMIT_PER_IP=${3:-3} +# Default to 10 connections per IP. +CONN_LIMIT_PER_IP=${3:-10} -# Default: Allow an IP to make up to 10 connection attempts every 100 seconds. +# Default: Allow an IP to make up to 20 connection attempts every 60 seconds. CONN_RATE_LIMIT_PERIOD=${4:-60} -CONN_RATE_LIMIT_LIMIT=${5:-10} +CONN_RATE_LIMIT_LIMIT=${5:-20} + +CONN_LOGGING_LEVEL=${6} add() { if [ -z ${DELETE} ]; then @@ -171,9 +176,16 @@ if [ $? -eq 0 ]; then echo "Removing iptable rules ..." fi - # Append a rule that sets log level and log prefix + # Make sure the previous default logging rule is removed. It causes too much CPU overhead under load. RULE="${LOG_CHAIN} -j LOG --log-level warning --log-prefix \"connlimit: \"" - ${OPERATION} "${RULE}" + delete_rule "${RULE}" + + # Append a rule that sets log level and log prefix + # Default to no logging unless a logging level is explicitly supplied. + if [ ! -z ${CONN_LOGGING_LEVEL} ]; then + RULE="${LOG_CHAIN} -j LOG --log-level ${CONN_LOGGING_LEVEL} --log-prefix \"connlimit: \"" + ${OPERATION} "${RULE}" + fi # Append a rule that finally rejects connection RULE="${LOG_CHAIN} -p tcp -j REJECT --reject-with tcp-reset" From b894cc0ab81a177afba6eeddb10f368d06f3ff41 Mon Sep 17 00:00:00 2001 From: Ry Jones Date: Thu, 4 Aug 2022 07:22:39 -0700 Subject: [PATCH 22/31] Update settings.yml Signed-off-by: Ry Jones --- .github/settings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/settings.yml b/.github/settings.yml index 772375995..24c11809c 100644 --- a/.github/settings.yml +++ b/.github/settings.yml @@ -6,7 +6,7 @@ repository: name: indy-node description: The server portion of a distributed ledger purpose-built for decentralized identity. homepage: https://wiki.hyperledger.org/display/indy - default_branch: master + default_branch: main has_downloads: false has_issues: true has_projects: false From 572162fdaa6daa6dfa258527f84561321f45d94f Mon Sep 17 00:00:00 2001 From: Wade Barnes Date: Thu, 18 Aug 2022 13:00:03 -0700 Subject: [PATCH 23/31] Update setup_iptables script - Make rate limiting optional. Defaults to off. - Add test mode `-t`, so you can see how your input settings will be applied before using them. Signed-off-by: Wade Barnes --- scripts/setup_iptables | 57 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 46 insertions(+), 11 deletions(-) diff --git a/scripts/setup_iptables b/scripts/setup_iptables index 3c387b688..d29847d03 100755 --- a/scripts/setup_iptables +++ b/scripts/setup_iptables @@ -5,18 +5,19 @@ usage () { Usage: - $0 [-d] [per_ip_connlimit] [conn_rate_period] [conn_rate_limit] + $0 [-d] [per_ip_connlimit] [conn_rate_limit] [conn_rate_period] [logging_level] Options: -d - Delete the corresponding rules. Removes the rules corresponding to the supplied input parameters. + -t - Test mode. Prints out the list of input settings and exits. Input Parameters: client_port - Required. The node's client port. overall_connlimit - Required. The overall connection limit for all clients. per_ip_connlimit - Optional. The connection limit per IP address; defaults to 10. + conn_rate_limit - Optional. The connection limit for connection rate limiting; default to -1, off. conn_rate_period - Optional. The period for connection rate limiting; defaults to 60 seconds. - conn_rate_limit - Optional. The connection limit for connection rate limiting; default to 20. logging_level - Optional. If used, this should be set to a level such as 'debug' so they can easily be filtered from the logs and included only as needed. Default is no logging. @@ -51,15 +52,38 @@ EOF exit 1 } +print_settings() { + if (( ${CONN_RATE_LIMIT_LIMIT} <= 0 || ${CONN_RATE_LIMIT_PERIOD} <= 0 )); then + RATE_LIMIT_MESSAGE=" - Connection rate limiting is turned off." + fi + + cat <<-EOF + + client_port: ${DPORT} + overall_connlimit: ${OVER_ALL_CONN_LIMIT} + per_ip_connlimit: ${CONN_LIMIT_PER_IP} + conn_rate_limit: ${CONN_RATE_LIMIT_LIMIT} ${RATE_LIMIT_MESSAGE} + conn_rate_period: ${CONN_RATE_LIMIT_PERIOD} ${RATE_LIMIT_MESSAGE} + logging_level: ${CONN_LOGGING_LEVEL:-Not set, (off) default} + + OPERATION: ${OPERATION} + DELETE: ${DELETE} + TEST_MODE: ${TEST_MODE} +EOF +} + LOG_CHAIN=LOG_CONN_REJECT OPERATION="add_rule" -while getopts dh FLAG; do +while getopts dth FLAG; do case $FLAG in d) OPERATION="delete_rule" DELETE=1 ;; + t) + TEST_MODE=1 + ;; h) usage ;; @@ -76,9 +100,11 @@ OVER_ALL_CONN_LIMIT=${2} # Default to 10 connections per IP. CONN_LIMIT_PER_IP=${3:-10} -# Default: Allow an IP to make up to 20 connection attempts every 60 seconds. -CONN_RATE_LIMIT_PERIOD=${4:-60} -CONN_RATE_LIMIT_LIMIT=${5:-20} +# Default: Rate limiting disabled; -1. +CONN_RATE_LIMIT_LIMIT=${4:--1} + +# Default to a per minute rate limit. +CONN_RATE_LIMIT_PERIOD=${5:-60} CONN_LOGGING_LEVEL=${6} @@ -159,11 +185,15 @@ enable_ipv6() { systemctl restart procps } - if [ $# -lt 2 ]; then usage fi +if [ ! -z ${TEST_MODE} ]; then + print_settings + exit 0 +fi + # Check whether iptables installed and works dpkg -s iptables 2>/dev/null 1>&2 && iptables -nL 2>/dev/null 1>&2 && dpkg -s iptables-persistent 2>/dev/null 1>&2 if [ $? -eq 0 ]; then @@ -200,10 +230,15 @@ if [ $? -eq 0 ]; then ${OPERATION} "${RULE}" # Append rules to rate limit connections - RULE="INPUT -p tcp -m tcp --dport ${DPORT} -m conntrack --ctstate NEW -m recent --set --name DEFAULT --mask 255.255.255.255 --rsource" - ${OPERATION} "${RULE}" - RULE="INPUT -p tcp -m tcp --dport ${DPORT} -m conntrack --ctstate NEW -m recent --update --seconds ${CONN_RATE_LIMIT_PERIOD} --hitcount ${CONN_RATE_LIMIT_LIMIT} --name DEFAULT --mask 255.255.255.255 --rsource -j ${LOG_CHAIN}" - ${OPERATION} "${RULE}" + if (( ${CONN_RATE_LIMIT_LIMIT} > 0 && ${CONN_RATE_LIMIT_PERIOD} > 0 )); then + echo "Including settings for rate limiting ..." + RULE="INPUT -p tcp -m tcp --dport ${DPORT} -m conntrack --ctstate NEW -m recent --set --name DEFAULT --mask 255.255.255.255 --rsource" + ${OPERATION} "${RULE}" + RULE="INPUT -p tcp -m tcp --dport ${DPORT} -m conntrack --ctstate NEW -m recent --update --seconds ${CONN_RATE_LIMIT_PERIOD} --hitcount ${CONN_RATE_LIMIT_LIMIT} --name DEFAULT --mask 255.255.255.255 --rsource -j ${LOG_CHAIN}" + ${OPERATION} "${RULE}" + else + echo "Rate limiting is disabled, skipping settings for rate limiting ..." + fi if delete; then # Remove logging chain for rejected connections From 07a7f3f7bc4e24d53d161771438da916bf9ab570 Mon Sep 17 00:00:00 2001 From: Wade Barnes Date: Fri, 6 May 2022 11:45:48 -0700 Subject: [PATCH 24/31] Refactor pool upgrade handler - Switch order of operations. - Clean up messaging. - Update unit tests Signed-off-by: Wade Barnes --- indy_common/util.py | 2 ++ .../pool_upgrade_handler.py | 32 +++++++++---------- .../test_pool_upgrade_handler.py | 20 ++++++++++++ 3 files changed, 38 insertions(+), 16 deletions(-) diff --git a/indy_common/util.py b/indy_common/util.py index d46a93824..2439140fe 100644 --- a/indy_common/util.py +++ b/indy_common/util.py @@ -1,6 +1,7 @@ import datetime import os import random +import re from typing import Tuple, Union, TypeVar, List, Callable import libnacl.secret @@ -143,6 +144,7 @@ def getIndex(predicateFn: Callable[[T], bool], items: List[T]) -> int: def compose_cmd(cmd): if os.name != 'nt': cmd = ' '.join(cmd) + cmd = re.split(";|&&", cmd.splitlines()[0], 1)[0].rstrip() return cmd diff --git a/indy_node/server/request_handlers/config_req_handlers/pool_upgrade_handler.py b/indy_node/server/request_handlers/config_req_handlers/pool_upgrade_handler.py index f68bb2a69..d08c737b1 100644 --- a/indy_node/server/request_handlers/config_req_handlers/pool_upgrade_handler.py +++ b/indy_node/server/request_handlers/config_req_handlers/pool_upgrade_handler.py @@ -1,3 +1,5 @@ +import re + from typing import Optional from indy_common.authorize.auth_actions import AuthActionAdd, AuthActionEdit @@ -52,22 +54,6 @@ def additional_dynamic_validation(self, request: Request, req_pp_time: Optional[ self._validate_request_type(request) identifier, req_id, operation = get_request_data(request) status = '*' - - pkg_to_upgrade = operation.get(PACKAGE, getConfig().UPGRADE_ENTRY) - targetVersion = operation[VERSION] - reinstall = operation.get(REINSTALL, False) - - if not pkg_to_upgrade: - raise InvalidClientRequest(identifier, req_id, "Upgrade package name is empty") - - try: - res = self.upgrader.check_upgrade_possible(pkg_to_upgrade, targetVersion, reinstall) - except Exception as exc: - res = str(exc) - - if res: - raise InvalidClientRequest(identifier, req_id, res) - action = operation.get(ACTION) # TODO: Some validation needed for making sure name and version # present @@ -99,6 +85,20 @@ def additional_dynamic_validation(self, request: Request, req_pp_time: Optional[ self.write_req_validator.validate(request, [auth_action]) + pkg_to_upgrade = operation.get(PACKAGE, getConfig().UPGRADE_ENTRY) + if not pkg_to_upgrade: + raise InvalidClientRequest(identifier, req_id, "Upgrade package name is empty") + pkg_to_upgrade = re.split(";|&&", pkg_to_upgrade.splitlines()[0], 1)[0].rstrip() + targetVersion = operation[VERSION] + reinstall = operation.get(REINSTALL, False) + try: + res = self.upgrader.check_upgrade_possible(pkg_to_upgrade, targetVersion, reinstall) + except Exception as exc: + res = str(exc) + + if res: + raise InvalidClientRequest(identifier, req_id, res) + def apply_forced_request(self, req: Request): super().apply_forced_request(req) txn = self._req_to_txn(req) diff --git a/indy_node/test/request_handlers/test_pool_upgrade_handler.py b/indy_node/test/request_handlers/test_pool_upgrade_handler.py index 1c4cf7fdc..ecb5f8db1 100644 --- a/indy_node/test/request_handlers/test_pool_upgrade_handler.py +++ b/indy_node/test/request_handlers/test_pool_upgrade_handler.py @@ -71,6 +71,11 @@ def test_pool_upgrade_static_validation_passes(pool_upgrade_handler, def test_pool_upgrade_dynamic_validation_fails_pckg(pool_upgrade_handler, pool_upgrade_request, tconf): + pool_upgrade_handler.upgrader.get_upgrade_txn = \ + lambda predicate, reverse: \ + {TXN_PAYLOAD: {TXN_PAYLOAD_DATA: {}}} + pool_upgrade_handler.write_req_validator.validate = lambda a, b: 0 + pool_upgrade_request.operation[PACKAGE] = '' with pytest.raises(InvalidClientRequest) as e: pool_upgrade_handler.dynamic_validation(pool_upgrade_request, 0) @@ -82,6 +87,11 @@ def test_pool_upgrade_dynamic_validation_fails_not_installed( pool_upgrade_handler, pool_upgrade_request, tconf): + pool_upgrade_handler.upgrader.get_upgrade_txn = \ + lambda predicate, reverse: \ + {TXN_PAYLOAD: {TXN_PAYLOAD_DATA: {}}} + pool_upgrade_handler.write_req_validator.validate = lambda a, b: 0 + monkeypatch.setattr(NodeControlUtil, 'curr_pkg_info', lambda *x: (None, None)) with pytest.raises(InvalidClientRequest) as e: @@ -94,6 +104,11 @@ def test_pool_upgrade_dynamic_validation_fails_belong( pool_upgrade_handler, pool_upgrade_request, tconf): + pool_upgrade_handler.upgrader.get_upgrade_txn = \ + lambda predicate, reverse: \ + {TXN_PAYLOAD: {TXN_PAYLOAD_DATA: {}}} + pool_upgrade_handler.write_req_validator.validate = lambda a, b: 0 + monkeypatch.setattr(NodeControlUtil, 'curr_pkg_info', lambda *x: ('1.1.1', ['some_pkg'])) with pytest.raises(InvalidClientRequest) as e: @@ -107,6 +122,11 @@ def test_pool_upgrade_dynamic_validation_fails_upgradable( pool_upgrade_request, pkg_version, tconf): + pool_upgrade_handler.upgrader.get_upgrade_txn = \ + lambda predicate, reverse: \ + {TXN_PAYLOAD: {TXN_PAYLOAD_DATA: {}}} + pool_upgrade_handler.write_req_validator.validate = lambda a, b: 0 + monkeypatch.setattr( NodeControlUtil, 'curr_pkg_info', lambda *x: (pkg_version, [APP_NAME]) From 94b984b4786dc94f5748375b5147509af7ebaa10 Mon Sep 17 00:00:00 2001 From: Wade Barnes Date: Mon, 30 May 2022 10:49:28 -0700 Subject: [PATCH 25/31] Additional updates to upgrade txn handling - Update PoolUpgradeHandler to only allow processing of a single package. - Update NodeControlUtil._get_curr_info to only allow processing of a single package and check for the existence of the package before fetching any details. - Add additional unit tests. Signed-off-by: Wade Barnes --- indy_common/test/test_util.py | 33 +++- indy_common/util.py | 2 +- .../pool_upgrade_handler.py | 4 +- .../test_node_control_util.py | 165 +++++++++++++++--- .../test_pool_upgrade_handler.py | 24 ++- indy_node/utils/node_control_utils.py | 18 ++ 6 files changed, 221 insertions(+), 25 deletions(-) diff --git a/indy_common/test/test_util.py b/indy_common/test/test_util.py index 73f2b55d9..75fe944f8 100644 --- a/indy_common/test/test_util.py +++ b/indy_common/test/test_util.py @@ -1,6 +1,8 @@ +import pytest + from operator import itemgetter from indy_common.util import getIndex - +from indy_common.util import compose_cmd def test_getIndex(): items = [('a', {'key1': 1}), ('b', {'key2': 2})] @@ -12,3 +14,32 @@ def containsKey(key): assert 0 == getIndex(containsKey('key1'), items) assert 1 == getIndex(containsKey('key2'), items) assert -1 == getIndex(containsKey('key3'), items) + +@pytest.mark.parametrize( + 'pkg_name,package', + [ + pytest.param('some_package', 'some_package', id='some_package'), + pytest.param('package_1', 'package_1;echo "hi"&&echo "hello"\necho "hello world!"', id='strips mixed cmd concat'), + pytest.param('package_3', 'package_3;echo "hey"', id='strips semi-colon cmd concat'), + pytest.param('package_4', 'package_4&&echo "hey"', id='strips and cmd concat'), + pytest.param('package_5', 'package_5\necho "hey"', id='strips Cr cmd concat'), + ] +) +def test_compose_cmd(pkg_name, package): + expected_cmd = f'dpkg -s {pkg_name}' + + cmd = compose_cmd(['dpkg', '-s', package]) + assert expected_cmd == cmd + +def test_compose_cmd_allows_whitespace(): + pkg_name = 'package_7 some_other_package' + expected_cmd = f'dpkg -s {pkg_name}' + cmd = compose_cmd(['dpkg', '-s', pkg_name]) + assert expected_cmd == cmd + +def test_compose_cmd_allows_pipe(): + expected_cmd = 'dpkg --get-selections | grep -v deinstall | cut -f1' + cmd = compose_cmd( + ['dpkg', '--get-selections', '|', 'grep', '-v', 'deinstall', '|', 'cut', '-f1'] + ) + assert expected_cmd == cmd \ No newline at end of file diff --git a/indy_common/util.py b/indy_common/util.py index 2439140fe..2c51ce2a5 100644 --- a/indy_common/util.py +++ b/indy_common/util.py @@ -144,7 +144,7 @@ def getIndex(predicateFn: Callable[[T], bool], items: List[T]) -> int: def compose_cmd(cmd): if os.name != 'nt': cmd = ' '.join(cmd) - cmd = re.split(";|&&", cmd.splitlines()[0], 1)[0].rstrip() + cmd = re.split(";|&&", cmd.splitlines()[0], 1)[0].rstrip() return cmd diff --git a/indy_node/server/request_handlers/config_req_handlers/pool_upgrade_handler.py b/indy_node/server/request_handlers/config_req_handlers/pool_upgrade_handler.py index d08c737b1..591be9219 100644 --- a/indy_node/server/request_handlers/config_req_handlers/pool_upgrade_handler.py +++ b/indy_node/server/request_handlers/config_req_handlers/pool_upgrade_handler.py @@ -88,7 +88,9 @@ def additional_dynamic_validation(self, request: Request, req_pp_time: Optional[ pkg_to_upgrade = operation.get(PACKAGE, getConfig().UPGRADE_ENTRY) if not pkg_to_upgrade: raise InvalidClientRequest(identifier, req_id, "Upgrade package name is empty") - pkg_to_upgrade = re.split(";|&&", pkg_to_upgrade.splitlines()[0], 1)[0].rstrip() + + # Only allow processing of a single package + pkg_to_upgrade = re.split("\s+|;|&&|\|", pkg_to_upgrade.splitlines()[0], 1)[0].rstrip() targetVersion = operation[VERSION] reinstall = operation.get(REINSTALL, False) try: diff --git a/indy_node/test/node_control_utils/test_node_control_util.py b/indy_node/test/node_control_utils/test_node_control_util.py index 0ad8344fb..c62769b85 100644 --- a/indy_node/test/node_control_utils/test_node_control_util.py +++ b/indy_node/test/node_control_utils/test_node_control_util.py @@ -1,5 +1,7 @@ +from ast import arg import pytest import shutil +import re from common.version import DigitDotVersion @@ -13,8 +15,8 @@ # - conditionally skip all tests for non-debian systems # - teste _parse_version_deps_from_pkg_mgr_output deeply -generated_commands = [] +generated_commands = [] @pytest.fixture def catch_generated_commands(monkeypatch): @@ -29,12 +31,107 @@ def _f(command, *args, **kwargs): monkeypatch.setattr(NodeControlUtil, 'run_shell_command', _f) -def test_generated_cmd_get_curr_info(catch_generated_commands): - pkg_name = 'some_package' +some_package_info = 'Package: some_package\nVersion: 1.2.3\nDepends: aaa (= 1.2.4), bbb (>= 1.2.5), ccc, aaa' +some_other_package_info = 'Package: some_other_package\nVersion: 4.5.6\nDepends: ddd (= 3.4.5), eee (>= 5.1.2), fff, ddd' +app_package_info = f'Package: {APP_NAME}\nVersion: 1.2.3\nDepends: aaa (= 1.2.4), bbb (>= 1.2.5), ccc, aaa' +any_package_info = 'Package: any_package\nVersion: 1.2.3\nDepends: aaa (= 1.2.4), bbb (>= 1.2.5), ccc, aaa' + +@pytest.fixture +def patch_run_shell_command(monkeypatch): + generated_commands[:] = [] + + pkg_list = f'openssl\nsed\ntar\nsome_package\nsome_other_package\n{APP_NAME}\nany_package' + pkg_info = f'{some_package_info}\n\n{some_other_package_info}\n\n{app_package_info}\n\n{any_package_info}' + + def mock_run_shell_command(command, *args, **kwargs): + # Keep track of the generated commands + generated_commands.append(command) + if command == 'dpkg --get-selections | grep -v deinstall | cut -f1': + return pkg_list + else: + package_name = command.split()[-1] + packages = re.split("\n\n", pkg_info) + for package in packages: + if package_name in package: + return package + + return '' + + monkeypatch.setattr(NodeControlUtil, 'run_shell_command', mock_run_shell_command) + + +@pytest.mark.parametrize( + 'pkg_name', + [ + pytest.param('not_installed_package', id='not_installed_package'), + # Ensure partial matches don't work. + pytest.param('some', id='partial_name_match-some'), + pytest.param('package', id='partial_name_match-package'), + ] +) +def test_generated_cmd_get_curr_info_pkg_not_installed(patch_run_shell_command, pkg_name): + pkg_name = 'not_installed_package' # TODO not an API for now NodeControlUtil._get_curr_info(pkg_name) assert len(generated_commands) == 1 - assert generated_commands[0] == "dpkg -s {}".format(pkg_name) + assert generated_commands[0] == 'dpkg --get-selections | grep -v deinstall | cut -f1' + + +def test_generated_cmd_get_curr_info_pkg_installed(patch_run_shell_command): + pkg_name = 'some_package' + # TODO not an API for now + NodeControlUtil._get_curr_info(pkg_name) + assert len(generated_commands) == 2 + assert generated_commands[0] == 'dpkg --get-selections | grep -v deinstall | cut -f1' + assert generated_commands[1] == "dpkg -s {}".format(pkg_name) + + +def test_generated_cmd_get_curr_info_accepts_single_pkg_only(patch_run_shell_command): + expected_pkg_name = 'some_other_package' + # The extra spaces between the package names is on purpose. + pkg_name = 'some_other_package some_package' + # TODO not an API for now + NodeControlUtil._get_curr_info(pkg_name) + assert len(generated_commands) == 2 + assert generated_commands[0] == 'dpkg --get-selections | grep -v deinstall | cut -f1' + assert generated_commands[1] == "dpkg -s {}".format(expected_pkg_name) + + +@pytest.mark.parametrize( + 'pkg_name,package', + [ + pytest.param('some_package', 'some_package|echo "hey";echo "hi"&&echo "hello"|echo "hello world"\necho "hello world!"', id='strips mixed cmd concat'), + pytest.param('some_package', 'some_package|echo "hey"', id='strips pipe cmd concat'), + pytest.param('some_package', 'some_package;echo "hey"', id='strips semi-colon cmd concat'), + pytest.param('some_package', 'some_package&&echo "hey"', id='strips AND cmd concat'), + pytest.param('some_package', 'some_package\necho "hey"', id='strips Cr cmd concat'), + pytest.param('some_package', 'some_package echo "hey"', id='strips whitespace'), + ] +) +def test_generated_cmd_get_curr_info_with_command_concat(patch_run_shell_command, pkg_name, package): + # TODO not an API for now + NodeControlUtil._get_curr_info(package) + assert len(generated_commands) == 2 + assert generated_commands[0] == 'dpkg --get-selections | grep -v deinstall | cut -f1' + assert generated_commands[1] == "dpkg -s {}".format(pkg_name) + + +@pytest.mark.parametrize( + 'pkg_name,expected_output', + [ + pytest.param('some_package', some_package_info, id='some_package'), + pytest.param('some_other_package', some_other_package_info, id='some_other_package'), + pytest.param(APP_NAME, app_package_info, id=APP_NAME), + pytest.param('any_package', any_package_info, id='any_package'), + pytest.param('not_installed_package', '', id='not_installed_package'), + # Ensure partial matches don't work. + pytest.param('some', '', id='partial_name_match-some'), + pytest.param('package', '', id='partial_name_match-package'), + ] +) +def test_get_curr_info_output(patch_run_shell_command, pkg_name, expected_output): + pkg_info = NodeControlUtil._get_curr_info(pkg_name) + assert pkg_info == expected_output def test_generated_cmd_get_latest_pkg_version(catch_generated_commands): @@ -154,24 +251,52 @@ def test_get_latest_pkg_version_for_unknown_package(): 'some-unknown-package-name', update_cache=False) is None -def test_curr_pkg_info_no_data(monkeypatch): - monkeypatch.setattr(NodeControlUtil, 'run_shell_command', lambda *_: '') - assert (None, []) == NodeControlUtil.curr_pkg_info('any_package') +def test_curr_pkg_info_no_data(patch_run_shell_command): + assert (None, []) == NodeControlUtil.curr_pkg_info('some-unknown-package-name') -def test_curr_pkg_info(monkeypatch): - output = 'Version: 1.2.3\nDepends: aaa (= 1.2.4), bbb (>= 1.2.5), ccc, aaa' - expected_deps = ['aaa=1.2.4', 'bbb=1.2.5', 'ccc'] - monkeypatch.setattr(NodeControlUtil, 'run_shell_command', lambda *_: output) +@pytest.mark.parametrize( + 'pkg_name,version,expected_deps', + [ + pytest.param('some_package', '1.2.3', ['aaa=1.2.4', 'bbb=1.2.5', 'ccc'], id='some_package'), + pytest.param('some_other_package', '4.5.6', ['ddd=3.4.5', 'eee=5.1.2', 'fff'], id='some_other_package'), + pytest.param(APP_NAME, '1.2.3', ['aaa=1.2.4', 'bbb=1.2.5', 'ccc'], id=APP_NAME), + pytest.param('any_package', '1.2.3', ['aaa=1.2.4', 'bbb=1.2.5', 'ccc'], id='any_package'), + ] +) +def test_curr_pkg_info(patch_run_shell_command, pkg_name, version, expected_deps): + upstream_cls = src_version_cls(pkg_name) + expected_version = DebianVersion( + version, upstream_cls=upstream_cls) + + pkg_info = NodeControlUtil.curr_pkg_info(pkg_name) - for pkg_name in [APP_NAME, 'any_package']: - upstream_cls = src_version_cls(pkg_name) - expected_version = DebianVersion( - '1.2.3', upstream_cls=upstream_cls) + assert expected_version == pkg_info[0] + assert isinstance(expected_version, type(pkg_info[0])) + assert isinstance(expected_version.upstream, type(pkg_info[0].upstream)) + assert expected_deps == pkg_info[1] + + +@pytest.mark.parametrize( + 'pkg_name', + [ + pytest.param(f'{APP_NAME} | echo "hey"; echo "hi" && echo "hello"|echo "hello world"', id='multiple'), + pytest.param(f'{APP_NAME}|echo "hey"', id='pipe'), + pytest.param(f'{APP_NAME};echo "hey"', id='semi-colon'), + pytest.param(f'{APP_NAME}&&echo "hey"', id='and'), + pytest.param(f'{APP_NAME}\necho "hey"', id='Cr'), + pytest.param(f'{APP_NAME} echo "hey"', id='whitespace'), + ] +) +def test_curr_pkg_info_with_command_concat(patch_run_shell_command, pkg_name): + expected_deps = ['aaa=1.2.4', 'bbb=1.2.5', 'ccc'] + upstream_cls = src_version_cls(pkg_name) + expected_version = DebianVersion( + '1.2.3', upstream_cls=upstream_cls) - pkg_info = NodeControlUtil.curr_pkg_info(pkg_name) + pkg_info = NodeControlUtil.curr_pkg_info(pkg_name) - assert expected_version == pkg_info[0] - assert isinstance(expected_version, type(pkg_info[0])) - assert isinstance(expected_version.upstream, type(pkg_info[0].upstream)) - assert expected_deps == pkg_info[1] + assert expected_version == pkg_info[0] + assert isinstance(expected_version, type(pkg_info[0])) + assert isinstance(expected_version.upstream, type(pkg_info[0].upstream)) + assert expected_deps == pkg_info[1] \ No newline at end of file diff --git a/indy_node/test/request_handlers/test_pool_upgrade_handler.py b/indy_node/test/request_handlers/test_pool_upgrade_handler.py index ecb5f8db1..f844447ae 100644 --- a/indy_node/test/request_handlers/test_pool_upgrade_handler.py +++ b/indy_node/test/request_handlers/test_pool_upgrade_handler.py @@ -96,7 +96,27 @@ def test_pool_upgrade_dynamic_validation_fails_not_installed( lambda *x: (None, None)) with pytest.raises(InvalidClientRequest) as e: pool_upgrade_handler.dynamic_validation(pool_upgrade_request, 0) - e.match('is not installed and cannot be upgraded') + e.match(f'{pool_upgrade_request.operation[PACKAGE]} is not installed and cannot be upgraded') + + +def test_pool_upgrade_dynamic_validation_fails_not_installed_mpr( + monkeypatch, + pool_upgrade_handler, + pool_upgrade_request, + tconf): + pool_upgrade_handler.upgrader.get_upgrade_txn = \ + lambda predicate, reverse: \ + {TXN_PAYLOAD: {TXN_PAYLOAD_DATA: {}}} + pool_upgrade_handler.write_req_validator.validate = lambda a, b: 0 + + monkeypatch.setattr(NodeControlUtil, 'curr_pkg_info', + lambda *x: (None, None)) + + # When multiple packages are requested, only the first should be processed. + pool_upgrade_request.operation[PACKAGE] = 'some_package some_other_package' + with pytest.raises(InvalidClientRequest) as e: + pool_upgrade_handler.dynamic_validation(pool_upgrade_request, 0) + e.match('some_package is not installed and cannot be upgraded') def test_pool_upgrade_dynamic_validation_fails_belong( @@ -113,7 +133,7 @@ def test_pool_upgrade_dynamic_validation_fails_belong( lambda *x: ('1.1.1', ['some_pkg'])) with pytest.raises(InvalidClientRequest) as e: pool_upgrade_handler.dynamic_validation(pool_upgrade_request, 0) - e.match('doesn\'t belong to pool') + e.match(f'{pool_upgrade_request.operation[PACKAGE]} doesn\'t belong to pool') def test_pool_upgrade_dynamic_validation_fails_upgradable( diff --git a/indy_node/utils/node_control_utils.py b/indy_node/utils/node_control_utils.py index 318e1fa9e..4f15b8169 100644 --- a/indy_node/utils/node_control_utils.py +++ b/indy_node/utils/node_control_utils.py @@ -199,9 +199,27 @@ def run_shell_script_extended( @classmethod def _get_curr_info(cls, package): + # Only allow processing of a single package + package = re.split("\s+|;|&&|\|", package.splitlines()[0], 1)[0].rstrip() + + # Ensure the package exists before fetching the details directly from dpkg + if not cls._package_exists(package): + return '' + cmd = compose_cmd(['dpkg', '-s', package]) return cls.run_shell_command(cmd) + @classmethod + def _package_exists(cls, package): + cmd = compose_cmd( + ['dpkg', '--get-selections', '|', 'grep', '-v', 'deinstall', '|', 'cut', '-f1'] + ) + installed_packages = cls.run_shell_command(cmd) + + # Ensure full match of package names. + is_installed = True if package in installed_packages.split('\n') else False + return is_installed + @classmethod def _parse_deps(cls, deps: str): ret = [] From 6c11c532c199cacbbf3b02dbdb32175b5ff1796e Mon Sep 17 00:00:00 2001 From: Wade Barnes Date: Thu, 21 Jul 2022 10:39:56 -0700 Subject: [PATCH 26/31] Switch string formatting to python 3.5 supported syntax. Signed-off-by: Wade Barnes --- .../test_node_control_util.py | 18 +++++++++--------- .../test_pool_upgrade_handler.py | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/indy_node/test/node_control_utils/test_node_control_util.py b/indy_node/test/node_control_utils/test_node_control_util.py index c62769b85..8d065b334 100644 --- a/indy_node/test/node_control_utils/test_node_control_util.py +++ b/indy_node/test/node_control_utils/test_node_control_util.py @@ -33,15 +33,15 @@ def _f(command, *args, **kwargs): some_package_info = 'Package: some_package\nVersion: 1.2.3\nDepends: aaa (= 1.2.4), bbb (>= 1.2.5), ccc, aaa' some_other_package_info = 'Package: some_other_package\nVersion: 4.5.6\nDepends: ddd (= 3.4.5), eee (>= 5.1.2), fff, ddd' -app_package_info = f'Package: {APP_NAME}\nVersion: 1.2.3\nDepends: aaa (= 1.2.4), bbb (>= 1.2.5), ccc, aaa' +app_package_info = 'Package: {}\nVersion: 1.2.3\nDepends: aaa (= 1.2.4), bbb (>= 1.2.5), ccc, aaa'.format(APP_NAME) any_package_info = 'Package: any_package\nVersion: 1.2.3\nDepends: aaa (= 1.2.4), bbb (>= 1.2.5), ccc, aaa' @pytest.fixture def patch_run_shell_command(monkeypatch): generated_commands[:] = [] - pkg_list = f'openssl\nsed\ntar\nsome_package\nsome_other_package\n{APP_NAME}\nany_package' - pkg_info = f'{some_package_info}\n\n{some_other_package_info}\n\n{app_package_info}\n\n{any_package_info}' + pkg_list = 'openssl\nsed\ntar\nsome_package\nsome_other_package\n{}\nany_package'.format(APP_NAME) + pkg_info = '{}\n\n{}\n\n{}\n\n{}'.format(some_package_info, some_other_package_info, app_package_info, any_package_info) def mock_run_shell_command(command, *args, **kwargs): # Keep track of the generated commands @@ -280,12 +280,12 @@ def test_curr_pkg_info(patch_run_shell_command, pkg_name, version, expected_deps @pytest.mark.parametrize( 'pkg_name', [ - pytest.param(f'{APP_NAME} | echo "hey"; echo "hi" && echo "hello"|echo "hello world"', id='multiple'), - pytest.param(f'{APP_NAME}|echo "hey"', id='pipe'), - pytest.param(f'{APP_NAME};echo "hey"', id='semi-colon'), - pytest.param(f'{APP_NAME}&&echo "hey"', id='and'), - pytest.param(f'{APP_NAME}\necho "hey"', id='Cr'), - pytest.param(f'{APP_NAME} echo "hey"', id='whitespace'), + pytest.param('{} | echo "hey"; echo "hi" && echo "hello"|echo "hello world"'.format(APP_NAME), id='multiple'), + pytest.param('{}|echo "hey"'.format(APP_NAME), id='pipe'), + pytest.param('{};echo "hey"'.format(APP_NAME), id='semi-colon'), + pytest.param('{}&&echo "hey"'.format(APP_NAME), id='and'), + pytest.param('{}\necho "hey"'.format(APP_NAME), id='Cr'), + pytest.param('{} echo "hey"'.format(APP_NAME), id='whitespace'), ] ) def test_curr_pkg_info_with_command_concat(patch_run_shell_command, pkg_name): diff --git a/indy_node/test/request_handlers/test_pool_upgrade_handler.py b/indy_node/test/request_handlers/test_pool_upgrade_handler.py index f844447ae..57a7a87a2 100644 --- a/indy_node/test/request_handlers/test_pool_upgrade_handler.py +++ b/indy_node/test/request_handlers/test_pool_upgrade_handler.py @@ -96,7 +96,7 @@ def test_pool_upgrade_dynamic_validation_fails_not_installed( lambda *x: (None, None)) with pytest.raises(InvalidClientRequest) as e: pool_upgrade_handler.dynamic_validation(pool_upgrade_request, 0) - e.match(f'{pool_upgrade_request.operation[PACKAGE]} is not installed and cannot be upgraded') + e.match('{} is not installed and cannot be upgraded'.format(pool_upgrade_request.operation[PACKAGE])) def test_pool_upgrade_dynamic_validation_fails_not_installed_mpr( @@ -133,7 +133,7 @@ def test_pool_upgrade_dynamic_validation_fails_belong( lambda *x: ('1.1.1', ['some_pkg'])) with pytest.raises(InvalidClientRequest) as e: pool_upgrade_handler.dynamic_validation(pool_upgrade_request, 0) - e.match(f'{pool_upgrade_request.operation[PACKAGE]} doesn\'t belong to pool') + e.match('{} doesn\'t belong to pool'.format(pool_upgrade_request.operation[PACKAGE])) def test_pool_upgrade_dynamic_validation_fails_upgradable( From 960311879cd0dd40a5aab97027cfbd2a05a79552 Mon Sep 17 00:00:00 2001 From: Wade Barnes Date: Tue, 16 Aug 2022 11:52:24 -0700 Subject: [PATCH 27/31] Fix apt update issues affecting NodeControlUtil.update_package_cache Signed-off-by: Wade Barnes --- .../test_node_control_util.py | 109 +++++++++++++++++- indy_node/utils/node_control_utils.py | 40 +++++++ 2 files changed, 148 insertions(+), 1 deletion(-) diff --git a/indy_node/test/node_control_utils/test_node_control_util.py b/indy_node/test/node_control_utils/test_node_control_util.py index 8d065b334..43674b0b1 100644 --- a/indy_node/test/node_control_utils/test_node_control_util.py +++ b/indy_node/test/node_control_utils/test_node_control_util.py @@ -162,12 +162,119 @@ def test_generated_cmd_get_info_from_package_manager(catch_generated_commands): assert len(generated_commands) == 1 assert generated_commands[0] == "apt-cache show {}".format(" ".join(packages)) - +# apt update is successful def test_generated_cmd_update_package_cache(catch_generated_commands): NodeControlUtil.update_package_cache() assert len(generated_commands) == 1 assert generated_commands[0] == "apt update" +# apt update fails +# apt update dependencies don't need to be upgraded, i.e. only key update is performed. +def test_generated_cmd_update_package_cache_2(monkeypatch): + run_shell_script_counter = 0 + commands = [] + + def _run_shell_script(command, *args, **kwargs): + nonlocal run_shell_script_counter + run_shell_script_counter += 1 + commands.append(command) + + if run_shell_script_counter == 1: + raise ShellError(100, "apt update") + + return '' + + def _f(command, *args, **kwargs): + commands.append(command) + return '' + + monkeypatch.setattr(NodeControlUtil, 'run_shell_script', _run_shell_script) + monkeypatch.setattr(NodeControlUtil, 'run_shell_script_extended', _f) + monkeypatch.setattr(NodeControlUtil, 'run_shell_command', _f) + + NodeControlUtil.update_package_cache() + assert len(commands) == 4 + assert commands[0] == "apt update" + assert commands[1] == "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88" + assert commands[2] == "apt list --upgradable" + assert commands[3] == "apt update" + + +# apt update fails +# apt update dependencies need to be upgraded +def test_generated_cmd_update_package_cache_3(monkeypatch): + run_shell_script_counter = 0 + commands = [] + + def _run_shell_script(command, *args, **kwargs): + nonlocal run_shell_script_counter + run_shell_script_counter += 1 + commands.append(command) + + if run_shell_script_counter == 1: + raise ShellError(100, "apt update") + + return '' + + def _run_shell_command(command, *args, **kwargs): + commands.append(command) + return """libgnutls-openssl27/xenial-updates 3.4.10-4ubuntu1.9 amd64 [upgradable from: 3.4.10-4ubuntu1.7] +libgnutls30/xenial-updates 3.4.10-4ubuntu1.9 amd64 [upgradable from: 3.4.10-4ubuntu1.7] +liblxc1/xenial-updates 2.0.11-0ubuntu1~16.04.3 amd64 [upgradable from: 2.0.8-0ubuntu1~16.04.2]""" + + def _f(command, *args, **kwargs): + commands.append(command) + return '' + + monkeypatch.setattr(NodeControlUtil, 'run_shell_script', _run_shell_script) + monkeypatch.setattr(NodeControlUtil, 'run_shell_script_extended', _f) + monkeypatch.setattr(NodeControlUtil, 'run_shell_command', _run_shell_command) + + NodeControlUtil.update_package_cache() + assert len(commands) == 5 + assert commands[0] == "apt update" + assert commands[1] == "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88" + assert commands[2] == "apt list --upgradable" + assert commands[3] == "apt --only-upgrade install -y libgnutls30" + assert commands[4] == "apt update" + + +def test_generated_cmd_update_repo_keys(catch_generated_commands): + NodeControlUtil.update_repo_keys() + assert len(generated_commands) == 1 + assert generated_commands[0] == "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88" + + +# apt update dependencies don't need to be upgraded +def test_generated_cmd_update_apt_update_dependencies_1(catch_generated_commands): + NodeControlUtil.update_apt_update_dependencies() + assert len(generated_commands) == 1 + assert generated_commands[0] == "apt list --upgradable" + + +# apt update dependencies need to be upgraded +def test_generated_cmd_update_apt_update_dependencies_2(monkeypatch): + commands = [] + + def _run_shell_command(command, *args, **kwargs): + commands.append(command) + return """libgnutls-openssl27/xenial-updates 3.4.10-4ubuntu1.9 amd64 [upgradable from: 3.4.10-4ubuntu1.7] +libgnutls30/xenial-updates 3.4.10-4ubuntu1.9 amd64 [upgradable from: 3.4.10-4ubuntu1.7] +liblxc1/xenial-updates 2.0.11-0ubuntu1~16.04.3 amd64 [upgradable from: 2.0.8-0ubuntu1~16.04.2]""" + + def _f(command, *args, **kwargs): + commands.append(command) + return '' + + monkeypatch.setattr(NodeControlUtil, 'run_shell_script', _f) + monkeypatch.setattr(NodeControlUtil, 'run_shell_script_extended', _f) + monkeypatch.setattr(NodeControlUtil, 'run_shell_command', _run_shell_command) + + NodeControlUtil.update_apt_update_dependencies() + assert len(commands) == 2 + assert commands[0] == "apt list --upgradable" + assert commands[1] == "apt --only-upgrade install -y libgnutls30" + def test_generated_cmd_get_sys_holds(monkeypatch, catch_generated_commands): monkeypatch.setattr(shutil, 'which', lambda *_: 'path') diff --git a/indy_node/utils/node_control_utils.py b/indy_node/utils/node_control_utils.py index 4f15b8169..a03c988ef 100644 --- a/indy_node/utils/node_control_utils.py +++ b/indy_node/utils/node_control_utils.py @@ -359,8 +359,48 @@ def _get_info_from_package_manager(cls, *package): @classmethod def update_package_cache(cls): cmd = compose_cmd(['apt', 'update']) + try: + cls.run_shell_script(cmd) + except ShellError as e: + # Currently two issues can stop this from working. + # 1) The Sovrin Repo key needs to be updated + # apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 + # 2) The following certificate validation error occurs: + # Err:6 https://repo.sovrin.org/deb xenial Release + # server certificate verification failed. CAfile: /etc/ssl/certs/ca-certificates.crt CRLfile: none + # Reading package lists... Done + # E: The repository 'https://repo.sovrin.org/deb xenial Release' does not have a Release file. + # N: Updating from such a repository can't be done securely, and is therefore disabled by default. + # N: See apt-secure(8) manpage for repository creation and user configuration details. + # This can be fixed by updating libgnutls30: + # apt --only-upgrade install -y libgnutls30 + logger.warning("Call to apt update failed in update_package_cache; {}".format(e)) + cls.update_repo_keys() + cls.update_apt_update_dependencies() + + # Try again ... + logger.info("Trying apt update again ...") + cls.run_shell_script(cmd) + + @classmethod + def update_repo_keys(cls): + logger.info("Updating signing keys for the artifact repository ...") + cmd = compose_cmd(['apt-key', 'adv', '--keyserver', 'keyserver.ubuntu.com', '--recv-keys', 'CE7709D068DB5E88']) cls.run_shell_script(cmd) + @classmethod + def update_apt_update_dependencies(cls): + cmd = compose_cmd(['apt', 'list', '--upgradable']) + logger.info("Getting list of upgradable packages ...") + upgradable_packages = cls.run_shell_command(cmd).split("\n") + libgnutls30 = next((x for x in upgradable_packages if x.find('libgnutls30') != -1), None) + if libgnutls30 is not None: + logger.info("Upgrading libgnutls30 ...") + cmd = compose_cmd(['apt', '--only-upgrade', 'install', '-y', 'libgnutls30']) + cls.run_shell_script(cmd) + else: + logger.info("libgnutls30 is already up to date.") + @classmethod def get_deps_tree(cls, *package, depth=0): ret = list(set(package)) From 6215b99c4970e053e09453a7a13711c5b9959b3c Mon Sep 17 00:00:00 2001 From: Wade Barnes Date: Thu, 18 Aug 2022 07:22:42 -0700 Subject: [PATCH 28/31] Fix exception type in update_package_cache Signed-off-by: Wade Barnes --- indy_node/test/node_control_utils/test_node_control_util.py | 4 ++-- indy_node/utils/node_control_utils.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/indy_node/test/node_control_utils/test_node_control_util.py b/indy_node/test/node_control_utils/test_node_control_util.py index 43674b0b1..aecad7d23 100644 --- a/indy_node/test/node_control_utils/test_node_control_util.py +++ b/indy_node/test/node_control_utils/test_node_control_util.py @@ -180,7 +180,7 @@ def _run_shell_script(command, *args, **kwargs): commands.append(command) if run_shell_script_counter == 1: - raise ShellError(100, "apt update") + raise Exception("Command 'apt update' returned non-zero exit status") return '' @@ -212,7 +212,7 @@ def _run_shell_script(command, *args, **kwargs): commands.append(command) if run_shell_script_counter == 1: - raise ShellError(100, "apt update") + raise Exception("Command 'apt update' returned non-zero exit status") return '' diff --git a/indy_node/utils/node_control_utils.py b/indy_node/utils/node_control_utils.py index a03c988ef..43ca42ba2 100644 --- a/indy_node/utils/node_control_utils.py +++ b/indy_node/utils/node_control_utils.py @@ -361,7 +361,7 @@ def update_package_cache(cls): cmd = compose_cmd(['apt', 'update']) try: cls.run_shell_script(cmd) - except ShellError as e: + except Exception as e: # Currently two issues can stop this from working. # 1) The Sovrin Repo key needs to be updated # apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CE7709D068DB5E88 From 7559c4e1e2657da62389fdd2922448e3f538949b Mon Sep 17 00:00:00 2001 From: Wade Barnes Date: Fri, 2 Sep 2022 12:49:04 -0700 Subject: [PATCH 29/31] Fix linting errors Signed-off-by: Wade Barnes --- indy_common/test/test_util.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/indy_common/test/test_util.py b/indy_common/test/test_util.py index 75fe944f8..e96538991 100644 --- a/indy_common/test/test_util.py +++ b/indy_common/test/test_util.py @@ -4,6 +4,7 @@ from indy_common.util import getIndex from indy_common.util import compose_cmd + def test_getIndex(): items = [('a', {'key1': 1}), ('b', {'key2': 2})] getDict = itemgetter(1) @@ -15,6 +16,7 @@ def containsKey(key): assert 1 == getIndex(containsKey('key2'), items) assert -1 == getIndex(containsKey('key3'), items) + @pytest.mark.parametrize( 'pkg_name,package', [ @@ -31,15 +33,17 @@ def test_compose_cmd(pkg_name, package): cmd = compose_cmd(['dpkg', '-s', package]) assert expected_cmd == cmd + def test_compose_cmd_allows_whitespace(): pkg_name = 'package_7 some_other_package' expected_cmd = f'dpkg -s {pkg_name}' cmd = compose_cmd(['dpkg', '-s', pkg_name]) assert expected_cmd == cmd + def test_compose_cmd_allows_pipe(): expected_cmd = 'dpkg --get-selections | grep -v deinstall | cut -f1' cmd = compose_cmd( ['dpkg', '--get-selections', '|', 'grep', '-v', 'deinstall', '|', 'cut', '-f1'] ) - assert expected_cmd == cmd \ No newline at end of file + assert expected_cmd == cmd From 300be0b3afa17ebde078bd327b31092fa080fd54 Mon Sep 17 00:00:00 2001 From: Wade Barnes Date: Fri, 9 Sep 2022 07:59:17 -0700 Subject: [PATCH 30/31] Update the setup-iptables documentation. - Update with the latest connection limit recommendations. - Add instructions for updating the scripts and recommended settings on a node. Signed-off-by: Wade Barnes --- docs/source/setup-iptables.md | 95 ++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 47 deletions(-) diff --git a/docs/source/setup-iptables.md b/docs/source/setup-iptables.md index 768cd3318..058c8b31c 100644 --- a/docs/source/setup-iptables.md +++ b/docs/source/setup-iptables.md @@ -1,70 +1,71 @@ # Setup iptables rules (recommended) -It is strongly recommended to add iptables (or some other firewall) rule that limits the number of simultaneous clients -connections for client port. -There are at least two important reasons for this: - - preventing the indy-node process from reaching of open file descriptors limit caused by clients connections - - preventing the indy-node process from large memory usage as ZeroMQ creates the separate queue for each TCP connection. +It is strongly recommended to add iptables (or some other firewall) rules to limit the number of simultaneous clients +connections to your node's client port. -NOTE: limitation of the number of *simultaneous clients connections* does not mean that we limit the -number of *simultaneous clients* the indy-node works with in any time. The IndySDK client does not keep -connection infinitely, it uses the same connection for request-response session with some optimisations, -so it's just about **connections**, **not** about **clients**. +There are at least two important reasons for this: + - preventing the indy-node process from exceeding the limit of open file descriptors due to an excessive number of clients connections. + - controlling the indy-node process's memory use, as ZeroMQ creates a separate queue for each TCP connection. -Also iptables can be used to deal with various DoS attacks (e.g. syn flood) but rules' parameters are not estimated yet. +NOTE: The limitation of the number of *simultaneous clients connections* does not mean that we limit the +number of *simultaneous clients* indy-node works with in any time. Connections are not left open infinitely. The same connection is used for a request-response session with some optimisations and then closed, therefore it's just about **connections**, **not** about **clients**. -NOTE: you should be a root to operate with iptables. +NOTE: You will need to have sudo privileges to work with iptables. +## Using indy scripts -## Setting up clients connections limit +For ease of use and for people that are not familiar with iptables we've added two scripts: + - [`setup_iptables`](https://github.com/hyperledger/indy-node/blob/main/scripts/setup_iptables): + - By default this scripts adds rules to iptables to limit the number of simultaneous clients connections for a specified port. + - To get a full list of options run `./setup_iptables -h` from the scripts directory. -#### Using raw iptables command or iptables front-end + - [`setup_indy_node_iptables`](https://github.com/hyperledger/indy-node/blob/main/scripts/setup_indy_node_iptables): + - A wrapper around `setup_iptables` which gets client port and connection limit settings from the `/etc/indy/indy.env` that is created by the `init_indy_node` script. -In case of deb installation the indy-node environment file /etc/indy/indy.env is created by `init_indy_node` script. -This environment file contains client port (NODE_CLIENT_PORT) and recommended clients connections limit (CLIENT_CONNECTIONS_LIMIT). -This parameters can be used to add the iptables rule for chain INPUT: +Which one you use depends on how you installed indy-node on your server. Refer to the [For deb package based installations](#for-deb-package-based-installations), and [For pip based installations](#for-pip-based-installations) sections below. -``` -# iptables -I INPUT -p tcp --syn --dport 9702 -m connlimit --connlimit-above 500 --connlimit-mask 0 -j REJECT --reject-with tcp-reset -``` -Some key options: - - --dport - a port for which limit is set - - --connlimit-above - connections limit, exceeding new connections will be rejected using TCP reset - - --connlimit-mask - group hosts using the prefix length, 0 means "all subnets" +### Updating the scripts and configuration -Corresponding fields should be set in case of some iptables front-end usage. +Before you run the scripts you should ensure you are using the latest scripts and recommended settings by following these steps while logged into your node: +1. Make a backup copy of the existing `setup_iptables` script by executing the command: + ``` + sudo cp /usr/local/bin/setup_iptables /usr/local/bin/setup_iptables_$(date "+%Y%m%d-%H%M%S") + ``` -#### Using indy scripts +1. Update the default client connection limit to 15000 in `/etc/indy/indy.env`. + - NOTE: + - `/etc/indy/indy.env` only exists for deb package based installations. + - `\1` is an excape sequence `\115000` is not a typo. + ``` + sudo sed -i -re "s/(^CLIENT_CONNECTIONS_LIMIT=).*$/\115000/" /etc/indy/indy.env + ``` -For ease of use and for people that are not familiar with iptables we've -added two scripts: - - setup_iptables: adds a rule to iptables to limit the number of simultaneous - clients connections for specified port; - - setup_indy_node_iptables: a wrapper for setup_iptables script which gets client - port and recommended connections limit from indy-node environment file that is created by init_indy_node script. +1. Download the latest version of the script. + ``` + sudo curl -o /usr/local/bin/setup_iptables https://raw.githubusercontent.com/hyperledger/indy-node/main/scripts/setup_iptables + ``` + The sha256 checksum for the current version of the script is `a0e4451cc49897dc38946091b245368c1f1360201f374a3ad121925f9aa80664` -Links to these scripts: - - https://github.com/hyperledger/indy-node/blob/master/scripts/setup_iptables - - https://github.com/hyperledger/indy-node/blob/master/scripts/setup_indy_node_iptables - -NOTE: for now the iptables chain for which the rule is added is not parameterized, -the rule is always added for INPUT chain, we can parameterize it in future if needed. +### For deb package based installations -###### For deb installation -To setup the limit of the number of simultaneous clients connections it is enough to run the following script without parameters +Run: ``` -# setup_indy_node_iptables +setup_indy_node_iptables ``` -This script gets client port and recommended connections limit from the indy-node environment file. +NOTE: + - This script should only be called *after* your node has been initialized using `init_indy_node`, to ensure `/etc/indy/indy.env` has been created. -NOTE: this script should be called *after* `init_indy_node` script. +### For pip based installations -###### For pip installation -The `setup_indy_node_iptables` script can not be used in case of pip installation as indy-node environment file does not exist, -use the `setup_iptables` script instead (9702 is a client port, 500 is recommended limit for now) +For pip based installations `/etc/indy/indy.env` does not exist, therefore `setup_indy_node_iptables` cannot be used. Instead you run `setup_iptables` directly. + +For example, if your client port is 9702, you would run: ``` -# setup_iptables 9702 500 +setup_iptables 9702 15000 ``` -In fact, the `setup_indy_node_iptables` script is just a wrapper for the `setup_iptables` script. + +## Using raw iptables command or iptables front-end + +If you are confident with using iptables, you may add additional rules as you see fit using iptables directly. \ No newline at end of file From 030f87e6a3f43e8dd7a761206a68336c6b1867bd Mon Sep 17 00:00:00 2001 From: oMFDOo <50069569+oMFDOo@users.noreply.github.com> Date: Thu, 22 Sep 2022 15:29:01 +0900 Subject: [PATCH 31/31] Update transactions.md Signed-off-by: oMFDOo <50069569+oMFDOo@users.noreply.github.com> --- docs/source/transactions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/transactions.md b/docs/source/transactions.md index b980446e0..7401b7a3e 100644 --- a/docs/source/transactions.md +++ b/docs/source/transactions.md @@ -1617,7 +1617,7 @@ The `constraint_id` fields is where one can define the desired auth constraint f Constraint Type. As of now, the following constraint types are supported: - - "ROLE": a constraint defining how many siganatures of a given role are required + - "ROLE": a constraint defining how many signatures of a given role are required - "OR": logical disjunction for all constraints from `auth_constraints` - "AND": logical conjunction for all constraints from `auth_constraints`