-
Notifications
You must be signed in to change notification settings - Fork 0
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Test/data availability and integrity #2
Changes from 28 commits
231a391
a5a3f17
2fc0cd1
0d32bcd
b424456
0eac80f
b83019c
1025e36
53bb790
a56dbfc
b028574
578efed
536864e
c5f82e6
01b6025
be897bf
77ce25c
7cdcc0d
d021e52
c6a8b44
696a6b2
ca1686a
56a4f6b
4bc7c74
82abae7
1b0731f
5f68514
0c91fe7
2738c9e
b4d32df
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
port: 4400 | ||
n_hosts: 2 | ||
timeout: 30 | ||
|
||
# ConsensusConfig related parameters | ||
security_param: 10 | ||
active_slot_coeff: 0.9 | ||
|
||
# DaConfig related parameters | ||
subnetwork_size: 2 | ||
dispersal_factor: 2 | ||
num_samples: 1 | ||
num_subnets: 2 | ||
old_blobs_check_interval_secs: 5 | ||
blobs_validity_duration_secs: 60 | ||
global_params_path: "/kzgrs_test_params" | ||
|
||
# Tracing | ||
tracing_settings: | ||
logger: Stdout | ||
tracing: !Otlp | ||
endpoint: http://tempo:4317/ | ||
sample_ratio: 0.5 | ||
service_name: node | ||
filter: !EnvFilter | ||
filters: | ||
nomos: debug | ||
metrics: !Otlp | ||
endpoint: http://prometheus:9090/api/v1/otlp/v1/metrics | ||
host_identifier: node | ||
level: INFO |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
port: 4400 | ||
n_hosts: 5 | ||
timeout: 30 | ||
|
||
# ConsensusConfig related parameters | ||
security_param: 10 | ||
active_slot_coeff: 0.9 | ||
|
||
# DaConfig related parameters | ||
subnetwork_size: 2 | ||
dispersal_factor: 2 | ||
num_samples: 1 | ||
num_subnets: 2 | ||
old_blobs_check_interval_secs: 5 | ||
blobs_validity_duration_secs: 60 | ||
global_params_path: "/kzgrs_test_params" | ||
|
||
# Tracing | ||
tracing_settings: | ||
logger: Stdout | ||
tracing: !Otlp | ||
endpoint: http://tempo:4317/ | ||
sample_ratio: 0.5 | ||
service_name: node | ||
filter: !EnvFilter | ||
filters: | ||
nomos: debug | ||
metrics: !Otlp | ||
endpoint: http://prometheus:9090/api/v1/otlp/v1/metrics | ||
host_identifier: node | ||
level: INFO |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,5 @@ | ||
port: 4400 | ||
n_hosts: 2 | ||
n_hosts: 5 | ||
timeout: 30 | ||
|
||
# ConsensusConfig related parameters | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,20 +1,71 @@ | ||
import inspect | ||
import os | ||
import shutil | ||
|
||
import pytest | ||
|
||
from src.env_vars import NODE_1, NODE_2 | ||
from src.env_vars import CFGSYNC, NOMOS, NOMOS_EXECUTOR | ||
from src.libs.custom_logger import get_custom_logger | ||
from src.node.nomos_node import NomosNode | ||
|
||
logger = get_custom_logger(__name__) | ||
|
||
|
||
def prepare_cluster_config(node_count): | ||
cwd = os.getcwd() | ||
config_dir = "cluster_config" | ||
src = f"{cwd}/{config_dir}/cfgsync-{node_count}node.yaml" | ||
dst = f"{cwd}/{config_dir}/cfgsync.yaml" | ||
shutil.copyfile(src, dst) | ||
|
||
|
||
def start_nodes(nodes): | ||
for node in nodes: | ||
node.start() | ||
|
||
|
||
def ensure_nodes_ready(nodes): | ||
for node in nodes: | ||
node.ensure_ready() | ||
|
||
|
||
class StepsCommon: | ||
@pytest.fixture(scope="function", autouse=True) | ||
def cluster_setup(self): | ||
logger.debug(f"Running fixture setup: {inspect.currentframe().f_code.co_name}") | ||
self.main_nodes = [] | ||
|
||
@pytest.fixture(scope="function") | ||
def setup_2_node_cluster(self, request): | ||
logger.debug(f"Running fixture setup: {inspect.currentframe().f_code.co_name}") | ||
prepare_cluster_config(2) | ||
self.node1 = NomosNode(CFGSYNC, "cfgsync") | ||
self.node2 = NomosNode(NOMOS, "nomos_node_0") | ||
self.node3 = NomosNode(NOMOS_EXECUTOR, "nomos_node_1") | ||
self.main_nodes.extend([self.node1, self.node2, self.node3]) | ||
start_nodes(self.main_nodes) | ||
|
||
try: | ||
ensure_nodes_ready(self.main_nodes[2:]) | ||
except Exception as ex: | ||
logger.error(f"REST service did not become ready in time: {ex}") | ||
raise | ||
|
||
@pytest.fixture(scope="function") | ||
def setup_main_nodes(self, request): | ||
def setup_5_node_cluster(self, request): | ||
logger.debug(f"Running fixture setup: {inspect.currentframe().f_code.co_name}") | ||
self.node1 = NomosNode(NODE_1, f"node1_{request.cls.test_id}") | ||
self.node1.start() | ||
self.node2 = NomosNode(NODE_2, f"node2_{request.cls.test_id}") | ||
self.node2.start() | ||
self.main_nodes.extend([self.node1, self.node2]) | ||
prepare_cluster_config(5) | ||
self.node1 = NomosNode(CFGSYNC, "cfgsync") | ||
self.node2 = NomosNode(NOMOS, "nomos_node_0") | ||
self.node3 = NomosNode(NOMOS, "nomos_node_1") | ||
self.node4 = NomosNode(NOMOS, "nomos_node_2") | ||
self.node5 = NomosNode(NOMOS, "nomos_node_3") | ||
self.node6 = NomosNode(NOMOS_EXECUTOR, "nomos_node_4") | ||
self.main_nodes.extend([self.node1, self.node2, self.node3, self.node4, self.node5, self.node6]) | ||
start_nodes(self.main_nodes) | ||
|
||
try: | ||
ensure_nodes_ready(self.main_nodes[2:]) | ||
except Exception as ex: | ||
logger.error(f"REST service did not become ready in time: {ex}") | ||
raise |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,65 @@ | ||
import allure | ||
|
||
from src.env_vars import NOMOS_EXECUTOR | ||
from src.steps.common import StepsCommon | ||
|
||
|
||
def add_padding(orig_bytes): | ||
block_size = 31 | ||
""" | ||
Pads a list of bytes (integers in [0..255]) using a PKCS#7-like scheme: | ||
- The value of each padded byte is the number of bytes padded. | ||
- If the original data is already a multiple of the block size, | ||
an additional full block of bytes (each the block size) is added. | ||
""" | ||
original_len = len(orig_bytes) | ||
padding_needed = block_size - (original_len % block_size) | ||
# If the data is already a multiple of block_size, add a full block of padding | ||
if padding_needed == 0: | ||
padding_needed = block_size | ||
|
||
# Each padded byte will be equal to padding_needed | ||
padded_bytes = orig_bytes + [padding_needed] * padding_needed | ||
return padded_bytes | ||
|
||
|
||
def prepare_dispersal_request(data, app_id, index): | ||
data_bytes = data.encode("utf-8") | ||
padded_bytes = add_padding(list(data_bytes)) | ||
dispersal_data = {"data": padded_bytes, "metadata": {"app_id": app_id, "index": index}} | ||
return dispersal_data | ||
|
||
|
||
def prepare_get_range_request(app_id, start_index, end_index): | ||
query_data = {"app_id": app_id, "range": {"start": start_index, "end": end_index}} | ||
return query_data | ||
|
||
|
||
class StepsDataAvailability(StepsCommon): | ||
|
||
def find_executor_node(self): | ||
executor = {} | ||
for node in self.main_nodes: | ||
if node.node_type() == NOMOS_EXECUTOR: | ||
executor = node | ||
return executor | ||
|
||
@allure.step | ||
def disperse_data(self, data, app_id, index): | ||
request = prepare_dispersal_request(data, app_id, index) | ||
executor = self.find_executor_node() | ||
try: | ||
executor.send_dispersal_request(request) | ||
except Exception as ex: | ||
assert "Bad Request" in str(ex) or "Internal Server Error" in str(ex) | ||
|
||
@allure.step | ||
def get_data_range(self, node, app_id, start, end): | ||
response = [] | ||
query = prepare_get_range_request(app_id, start, end) | ||
try: | ||
response = node.send_get_data_range_request(query) | ||
except Exception as ex: | ||
assert "Bad Request" in str(ex) or "Internal Server Error" in str(ex) | ||
|
||
return response |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,30 @@ | ||
class TestDataIntegrity: | ||
import pytest | ||
|
||
from src.libs.custom_logger import get_custom_logger | ||
from src.steps.da import StepsDataAvailability | ||
from src.test_data import DATA_TO_DISPERSE | ||
|
||
logger = get_custom_logger(__name__) | ||
|
||
|
||
class TestDataIntegrity(StepsDataAvailability): | ||
main_nodes = [] | ||
|
||
def test_cluster_start(self): | ||
for node in self.main_nodes: | ||
print(node) | ||
@pytest.mark.usefixtures("setup_5_node_cluster") | ||
def test_da_identify_retrieve_missing_columns(self): | ||
self.disperse_data(DATA_TO_DISPERSE[0], [0] * 31 + [1], [0] * 8) | ||
received_data = [] | ||
# Get data only from half of nodes | ||
for node in self.main_nodes[2:4]: | ||
received_data.append(self.get_data_range(node, [0] * 31 + [1], [0] * 8, [0] * 7 + [3])) | ||
|
||
# Use received blob data to reconstruct the original data | ||
# nomos-cli reconstruct command required | ||
reconstructed_data = [] | ||
assert DATA_TO_DISPERSE[0] == bytes(reconstructed_data).decode("utf-8") | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. but There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, the nomos-cli should be called to reconstruct the data from received blobs. PR in nomos-node repo logos-co/nomos-node#994. I would assume nomos-cli as our "first to go" verification tool. We might think about being independent, with our own tooling, however this would come at certain cost. We can discuss more during meeting. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ok, but now the assert will not pass, right? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Tests skipped at b4d32df |
||
@pytest.mark.usefixtures("setup_2_node_cluster") | ||
def test_da_sampling_determines_data_presence(self): | ||
self.disperse_data(DATA_TO_DISPERSE[0], [0] * 31 + [1], [0] * 8) | ||
received_data = self.get_data_range(self.node2, [0] * 31 + [1], [0] * 8, [0] * 7 + [5]) | ||
assert DATA_TO_DISPERSE[0] == bytes(received_data[0][1]).decode("utf-8") |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,24 +1,14 @@ | ||
import pytest | ||
|
||
from src.env_vars import CFGSYNC, NOMOS, NOMOS_EXECUTOR | ||
from src.libs.custom_logger import get_custom_logger | ||
from src.node.nomos_node import NomosNode | ||
from src.steps.common import StepsCommon | ||
|
||
logger = get_custom_logger(__name__) | ||
|
||
|
||
class Test2NodeClAlive: | ||
class Test2NodeClAlive(StepsCommon): | ||
@pytest.mark.usefixtures("setup_2_node_cluster") | ||
def test_cluster_start(self): | ||
|
||
self.node1 = NomosNode(CFGSYNC, "cfgsync") | ||
self.node2 = NomosNode(NOMOS, "nomos_node_0") | ||
self.node3 = NomosNode(NOMOS_EXECUTOR, "nomos_node_1") | ||
|
||
self.node1.start() | ||
self.node2.start() | ||
self.node3.start() | ||
|
||
try: | ||
self.node2.ensure_ready() | ||
self.node3.ensure_ready() | ||
except Exception as ex: | ||
logger.error(f"REST service did not become ready in time: {ex}") | ||
raise | ||
logger.debug("Two node cluster started successfully!") |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
doesn't seem to be used
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fixed at 2738c9e