From d75b48fc7c583fa706d0984d3e50b10433b16b5c Mon Sep 17 00:00:00 2001 From: NataliaDracheva Date: Thu, 30 Aug 2018 13:18:41 +0100 Subject: [PATCH 1/7] [INDY-1644] Added new parameters to config file Signed-off-by: NataliaDracheva --- .../performance/config_perf_spike_load.yml | 30 ++++++++++--------- scripts/performance/perf_spike_load.py | 5 +++- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/scripts/performance/config_perf_spike_load.yml b/scripts/performance/config_perf_spike_load.yml index c7535ccd8..9ee27d4e8 100644 --- a/scripts/performance/config_perf_spike_load.yml +++ b/scripts/performance/config_perf_spike_load.yml @@ -1,29 +1,31 @@ # perf_spike_load.py arguments perf_spike: - read_mode: permanent #permanent = background reading operation with writing spikes, spike = reading and writing spikes with 0 load in between - spike_time_in_seconds: 180 - rest_time_in_seconds: 180 - overall_time_in_seconds: 3600 # 1h = 3600 sec, 24h = 86400 + read_mode: spike # permanent = background reading operation with writing spikes, spike = reading and writing spikes with 0 load in between + spike_time_in_seconds: 600 + rest_time_in_seconds: 600 + overall_time_in_seconds: 600 # 1h = 3600 sec, 24h = 86400 + # perf_processes.py will be called with arguments provided below during perf_spike_load.py execution. You may move arguments between common and _txns sections: # e.g. if you want to define different clients number for reading and writing transactions, just copy "clients" to read_ and write_txns sections and provide values. # The script takes the most specific arguments (for transactions rather than common) in case of a duplication. common: - clients: 1 seed: 000000000000000000000000Trustee1 num: 1 refresh: 100 - buff_req: 100 + buff_req: 300 sep: "|" wallet_key: key - mode: t + mode: p pool_config: "" sync_mode: all - out: "" - genesis: ~/pool_transactions_genesis - directory: . + out: "/home/me/Documents/permanent.log" + genesis: /home/me/Documents/stab_nodes_genesis + directory: ~/Documents/ read_txns: - kind: get_nym - load_rate: 5 + clients: 10 + kind: "{\"schema\": 2, \"cred_def\": 2, \"nym\": 14, \"attrib\": 2}" + load_rate: 10 write_txns: - kind: nym - load_rate: 2 + clients: 1 + kind: "{\"get_nym\": 9, \"get_schema\": 4, \"get_cred_def\": 4, \"get_attrib\": 3}" + load_rate: 10 \ No newline at end of file diff --git a/scripts/performance/perf_spike_load.py b/scripts/performance/perf_spike_load.py index 177943d14..3d30d2697 100644 --- a/scripts/performance/perf_spike_load.py +++ b/scripts/performance/perf_spike_load.py @@ -58,9 +58,12 @@ def start_profile(): print("Every spike time in seconds: ", config["perf_spike"]["spike_time_in_seconds"]) print("Interval between spikes in seconds: ", config["perf_spike"]["rest_time_in_seconds"]) print("Overall time in minutes: ", config["perf_spike"]["overall_time_in_seconds"] / 60) - if config["perf_spike"]["read_mode"] == 'permanent': + if config["perf_spike"]["read_mode"] == 'permanent' and config["read_txns"]["step_time_in_seconds"] != 0: subprocess_args = create_subprocess_args(config, "read_background", folder_count, root_log_folder_name) subprocess.Popen(subprocess_args, close_fds=True) + elif config["perf_spike"]["read_mode"] == 'permanent' and config["read_txns"]["step_time_in_seconds"] != 0: + steps_number = int(config["perf_spike"]["spike_time_in_seconds"] / config["read_txns"]["step_time_in_seconds"]) + end_time = datetime.now() + timedelta(seconds=int(config["perf_spike"]["overall_time_in_seconds"])) while datetime.now() < end_time: folder_count += 1 From ab021736988b1e8c14eda0e7ab2ca2014ed50106 Mon Sep 17 00:00:00 2001 From: NataliaDracheva Date: Thu, 30 Aug 2018 14:13:27 +0100 Subject: [PATCH 2/7] [INDY-1644] Added background stepwise load Signed-off-by: NataliaDracheva --- scripts/performance/config_perf_spike_load.yml | 6 +++++- scripts/performance/perf_spike_load.py | 6 ++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/scripts/performance/config_perf_spike_load.yml b/scripts/performance/config_perf_spike_load.yml index 9ee27d4e8..3bdbc39e1 100644 --- a/scripts/performance/config_perf_spike_load.yml +++ b/scripts/performance/config_perf_spike_load.yml @@ -28,4 +28,8 @@ read_txns: write_txns: clients: 1 kind: "{\"get_nym\": 9, \"get_schema\": 4, \"get_cred_def\": 4, \"get_attrib\": 3}" - load_rate: 10 \ No newline at end of file + load_rate: 10 +stepwise_load: + step_time_in_seconds = 0 # for stepwize testing, 0 means stable load without increasing + step_txns_per_second = 0 # additional load rate per step + step_initial_load_rate = 0 # initial load rate per spike \ No newline at end of file diff --git a/scripts/performance/perf_spike_load.py b/scripts/performance/perf_spike_load.py index 3d30d2697..36d102814 100644 --- a/scripts/performance/perf_spike_load.py +++ b/scripts/performance/perf_spike_load.py @@ -61,9 +61,11 @@ def start_profile(): if config["perf_spike"]["read_mode"] == 'permanent' and config["read_txns"]["step_time_in_seconds"] != 0: subprocess_args = create_subprocess_args(config, "read_background", folder_count, root_log_folder_name) subprocess.Popen(subprocess_args, close_fds=True) - elif config["perf_spike"]["read_mode"] == 'permanent' and config["read_txns"]["step_time_in_seconds"] != 0: + elif config["perf_spike"]["read_mode"] == 'permanent' and config["read_txns"]["step_time_in_seconds"] == 0: steps_number = int(config["perf_spike"]["spike_time_in_seconds"] / config["read_txns"]["step_time_in_seconds"]) - + for i in range(0, steps_number): + subprocess_args = create_subprocess_args(config, "read_background", folder_count, root_log_folder_name) + subprocess.Popen(subprocess_args, close_fds=True) end_time = datetime.now() + timedelta(seconds=int(config["perf_spike"]["overall_time_in_seconds"])) while datetime.now() < end_time: folder_count += 1 From 76e1723c97be4a097ad8fc2e8e1fc21b17b4cca7 Mon Sep 17 00:00:00 2001 From: NataliaDracheva Date: Fri, 31 Aug 2018 14:26:46 +0100 Subject: [PATCH 3/7] [INDY-1666] Stress scenario works in stepwise and stable modes Signed-off-by: NataliaDracheva --- .../performance/config_perf_spike_load.yml | 68 +++++--- scripts/performance/perf_spike_load.py | 164 ++++++++++++------ 2 files changed, 152 insertions(+), 80 deletions(-) diff --git a/scripts/performance/config_perf_spike_load.yml b/scripts/performance/config_perf_spike_load.yml index 3bdbc39e1..a2d8cfad7 100644 --- a/scripts/performance/config_perf_spike_load.yml +++ b/scripts/performance/config_perf_spike_load.yml @@ -1,35 +1,53 @@ # perf_spike_load.py arguments -perf_spike: - read_mode: spike # permanent = background reading operation with writing spikes, spike = reading and writing spikes with 0 load in between - spike_time_in_seconds: 600 - rest_time_in_seconds: 600 - overall_time_in_seconds: 600 # 1h = 3600 sec, 24h = 86400 +profile: + mode: stress # permanent = background reading operation with writing spikes, + # spike = reading and writing spikes with 0 load in between, + # stress = one stepwise increasing spike for beaking point checking scenario + spike_time_in_seconds: 600 + rest_time_in_seconds: 600 + overall_time_in_seconds: 60 # 1h = 3600 sec, 24h = 86400 + # perf_processes.py will be called with arguments provided below during perf_spike_load.py execution. You may move arguments between common and _txns sections: # e.g. if you want to define different clients number for reading and writing transactions, just copy "clients" to read_ and write_txns sections and provide values. # The script takes the most specific arguments (for transactions rather than common) in case of a duplication. common: - seed: 000000000000000000000000Trustee1 - num: 1 - refresh: 100 - buff_req: 300 - sep: "|" - wallet_key: key - mode: p - pool_config: "" - sync_mode: all - out: "/home/me/Documents/permanent.log" - genesis: /home/me/Documents/stab_nodes_genesis - directory: ~/Documents/ -read_txns: + seed: 000000000000000000000000Trustee1 + num: 1 + refresh: 60 + buff_req: 300 + sep: "|" + wallet_key: key + mode: p + pool_config: "" + sync_mode: all + out: "" + genesis: /home/me/Documents/stab_nodes_genesis + directory: ~/Documents/ + +background: # is used in permanent and stress modes + clients: 1 + kind: get_nym + load_rate: 10 + stepwise: + step_time_in_seconds: 10 # for stepwise testing, 0 means stable load without growing + step_txns_per_second: 2 # additional load rate per step + step_initial_load_rate: 2 # initial load rate per spike + +spikes: # is used in permanent and spike modes + spike1: clients: 10 - kind: "{\"schema\": 2, \"cred_def\": 2, \"nym\": 14, \"attrib\": 2}" + kind: nym load_rate: 10 -write_txns: + stepwise: + step_time_in_seconds: 10 # for stepwise testing, 0 means stable load without growing + step_txns_per_second: 2 # additional load rate per step + step_initial_load_rate: 2 # initial load rate per spike + spike2: clients: 1 - kind: "{\"get_nym\": 9, \"get_schema\": 4, \"get_cred_def\": 4, \"get_attrib\": 3}" + kind: nym load_rate: 10 -stepwise_load: - step_time_in_seconds = 0 # for stepwize testing, 0 means stable load without increasing - step_txns_per_second = 0 # additional load rate per step - step_initial_load_rate = 0 # initial load rate per spike \ No newline at end of file + stepwise: + step_time_in_seconds: 10 # for stepwise testing, 0 means stable load without growing + step_txns_per_second: 1 # additional load rate per step + step_initial_load_rate: 1 # initial load rate per spike \ No newline at end of file diff --git a/scripts/performance/perf_spike_load.py b/scripts/performance/perf_spike_load.py index 36d102814..af2acd728 100644 --- a/scripts/performance/perf_spike_load.py +++ b/scripts/performance/perf_spike_load.py @@ -11,74 +11,128 @@ import os -def create_subprocess_args(config, sub_process_type, folder_count, log_folder_name): +def create_output_directory(folder_path): + output_folder = "" + for folder in folder_path: + output_folder = os.path.join(output_folder, folder) + try: + output_folder = os.path.expanduser(output_folder) + except OSError: + raise ValueError("Bad output log folder pathname!") + if not os.path.isdir(output_folder): + os.makedirs(output_folder) + directory = "--directory={}".format(output_folder) + return directory + + +def create_subprocess(config, sub_process_type, directory_arg, load_time, load_rate=None): args = ["python3", "perf_processes.py"] common_args = config["common"].copy() - if "read" in sub_process_type: - common_args.update(config["read_txns"]) - elif "write" in sub_process_type: - common_args.update(config["write_txns"]) + common_args.update(config[sub_process_type]) for dict_key in common_args: if dict_key == "directory": - output_folder = os.path.join(str(common_args[dict_key]), log_folder_name, - "{}_{}".format(sub_process_type, folder_count)) - try: - output_folder = os.path.expanduser(output_folder) - except OSError: - raise ValueError("Bad output log folder pathname!") - if not os.path.isdir(output_folder): - os.makedirs(output_folder) - args.append("--{}={}".format(dict_key, output_folder)) + args.append(directory_arg) + elif "stepwise" in dict_key: + continue + elif load_rate is not None and dict_key == "load_rate": + args.append("--{}={}".format(dict_key, load_rate)) else: args.append("--{}={}".format(dict_key, common_args[dict_key])) - if "background" in sub_process_type: - args.append("--load_time={}".format(config["perf_spike"]["overall_time_in_seconds"])) - elif "spike" in sub_process_type: - args.append("--load_time={}".format(config["perf_spike"]["spike_time_in_seconds"])) - return args + args.append("--load_time={}".format(load_time)) + subprocess.Popen(args, close_fds=True) + return def start_profile(): - folder_count = 0 # ordering number of the spike which goes to logs folder name - root_log_folder_name = "Spike_log {}".format(time.strftime("%m-%d-%y %H-%M-%S")) with open("config_perf_spike_load.yml") as file: config = yaml.load(file) - if config["perf_spike"]["read_mode"] == 'permanent': - print(""" - ========================================================================================== - The script creates writing transaction spikes, during intervals there is a background load - of reading transactions - ==========================================================================================""") - elif config["perf_spike"]["read_mode"] == 'spike': - print(""" - ============================================================================================ - The script creates reading and writing transaction spikes, during intervals there is no load - ============================================================================================""") - print("Reading transactions mode: ", config["perf_spike"]["read_mode"]) - print("Every spike time in seconds: ", config["perf_spike"]["spike_time_in_seconds"]) - print("Interval between spikes in seconds: ", config["perf_spike"]["rest_time_in_seconds"]) - print("Overall time in minutes: ", config["perf_spike"]["overall_time_in_seconds"] / 60) - if config["perf_spike"]["read_mode"] == 'permanent' and config["read_txns"]["step_time_in_seconds"] != 0: - subprocess_args = create_subprocess_args(config, "read_background", folder_count, root_log_folder_name) - subprocess.Popen(subprocess_args, close_fds=True) - elif config["perf_spike"]["read_mode"] == 'permanent' and config["read_txns"]["step_time_in_seconds"] == 0: - steps_number = int(config["perf_spike"]["spike_time_in_seconds"] / config["read_txns"]["step_time_in_seconds"]) - for i in range(0, steps_number): - subprocess_args = create_subprocess_args(config, "read_background", folder_count, root_log_folder_name) - subprocess.Popen(subprocess_args, close_fds=True) - end_time = datetime.now() + timedelta(seconds=int(config["perf_spike"]["overall_time_in_seconds"])) - while datetime.now() < end_time: + mode = config["profile"]["mode"] + if mode == "stress": + stress_profile(config) + elif mode == "permanent": + permanent_profile(config) + elif mode == "spike": + spike_profile(config) + + +def run_spikes(config, root_log_folder_name): + spike_time_in_seconds = config["profile"]["spike_time_in_seconds"] + folder_count = 0 + key = None + for key in config["spikes"].keys(): folder_count += 1 - if config["perf_spike"]["read_mode"] == 'spike': - # start profile with reading transactions for x minutes - subprocess_args = create_subprocess_args(config, "read_spike", folder_count, root_log_folder_name) - subprocess.Popen(subprocess_args, close_fds=True) + if config["spikes"][key]["step_time_in_seconds"] != 0: + sub_process_type = "spike" + step_time_in_seconds = int(config[key]["stepwise"]["step_time_in_seconds"]) + step_txns_per_second = int(config[key]["stepwise"]["step_txns_per_second"]) + step_load_rate = int(config[key]["stepwise"]["step_initial_load_rate"]) + steps_number = int(spike_time_in_seconds / step_time_in_seconds) + + for i in range(0, steps_number): + load_time = spike_time_in_seconds - step_time_in_seconds * i + directory = [config["common"]["directory"], root_log_folder_name, + " {}_{}".format(sub_process_type, folder_count)] + directory_arg = create_output_directory(directory) + create_subprocess(config, sub_process_type, directory_arg, load_time, step_load_rate) + step_load_rate = step_txns_per_second + time.sleep(step_time_in_seconds) + folder_count += 1 + else: + sub_process_type = key + directory_parts = [config["common"]["directory"], root_log_folder_name, + "{}_{}".format(sub_process_type, folder_count)] + directory_arg = create_output_directory(directory_parts) + create_subprocess(config, sub_process_type, directory_arg, spike_time_in_seconds) + print("Spike {}".format(key)) + time.sleep(int(config["profile"]["spike_time_in_seconds"]) + + int(config["profile"]["rest_time_in_seconds"])) + + +def spike_profile(config): + root_log_folder_name = "Spike_log {}".format(time.strftime("%m-%d-%y %H-%M-%S")) + + end_time = datetime.now() + timedelta(seconds=int(config["profile"]["overall_time_in_seconds"])) + while datetime.now() < end_time: + run_spikes(config, root_log_folder_name) + + +def permanent_profile(config): + root_log_folder_name = "Spike_with_bg_log {}".format(time.strftime("%m-%d-%y %H-%M-%S")) + directory = [config["common"]["directory"], root_log_folder_name, + "{}_{}".format("background", "0")] + directory_arg = create_output_directory(directory) + create_subprocess(config, "background", directory_arg, config["profile"]["overall_time_in_seconds"]) + + end_time = datetime.now() + timedelta(seconds=int(config["profile"]["overall_time_in_seconds"])) + while datetime.now() < end_time: + run_spikes(config, root_log_folder_name) + + +def stress_profile(config): + folder_count = 1 # ordering number of the spike which goes to logs folder name + root_log_folder_name = "Stress_log_{}".format(time.strftime("%m-%d-%y_%H-%M-%S")) + overall_time_in_seconds = int(config["profile"]["overall_time_in_seconds"]) + sub_process_type = "background" + if config["background"]["stepwise"]["step_time_in_seconds"] != 0: + step_time_in_seconds = int(config["background"]["stepwise"]["step_time_in_seconds"]) + step_txns_per_second = int(config["background"]["stepwise"]["step_txns_per_second"]) + step_load_rate = int(config["background"]["stepwise"]["step_initial_load_rate"]) + steps_number = int(overall_time_in_seconds/step_time_in_seconds) + + for i in range(0, steps_number): + load_time = overall_time_in_seconds - step_time_in_seconds * i + directory = [config["common"]["directory"], root_log_folder_name, + "{}_{}".format(sub_process_type, folder_count)] + directory_arg = create_output_directory(directory) + create_subprocess(config, sub_process_type, directory_arg, load_time, step_load_rate) + step_load_rate = step_txns_per_second + time.sleep(step_time_in_seconds) folder_count += 1 - # start profile with writing transactions for x minutes - subprocess_args = create_subprocess_args(config, "write_spike", folder_count, root_log_folder_name) - subprocess.Popen(subprocess_args, close_fds=True) - time.sleep(int(config["perf_spike"]["spike_time_in_seconds"]) + - int(config["perf_spike"]["rest_time_in_seconds"])) + else: + directory_parts = [config["common"]["directory"], root_log_folder_name, "Stable_stress"] + directory_arg = create_output_directory(directory_parts) + create_subprocess(config, sub_process_type, directory_arg, overall_time_in_seconds) + print("Stress stable") if __name__ == '__main__': From bd6cb000b76c6feb52faf257e7d03a1274c30e2e Mon Sep 17 00:00:00 2001 From: NataliaDracheva Date: Tue, 4 Sep 2018 19:04:50 +0300 Subject: [PATCH 4/7] [INDY-1666] Added a possibility to check load profile before running. Signed-off-by: NataliaDracheva --- .../performance/config_perf_spike_load.yml | 57 ++-- scripts/performance/perf_spike_load.py | 266 +++++++++++------- 2 files changed, 190 insertions(+), 133 deletions(-) diff --git a/scripts/performance/config_perf_spike_load.yml b/scripts/performance/config_perf_spike_load.yml index a2d8cfad7..52a2dfaed 100644 --- a/scripts/performance/config_perf_spike_load.yml +++ b/scripts/performance/config_perf_spike_load.yml @@ -1,17 +1,14 @@ # perf_spike_load.py arguments profile: - mode: stress # permanent = background reading operation with writing spikes, - # spike = reading and writing spikes with 0 load in between, - # stress = one stepwise increasing spike for beaking point checking scenario - spike_time_in_seconds: 600 - rest_time_in_seconds: 600 - overall_time_in_seconds: 60 # 1h = 3600 sec, 24h = 86400 + spike_time_in_seconds: 60 + rest_time_in_seconds: 20 + overall_time_in_seconds: 600 # 1h = 3600 sec, 24h = 86400 - -# perf_processes.py will be called with arguments provided below during perf_spike_load.py execution. You may move arguments between common and _txns sections: -# e.g. if you want to define different clients number for reading and writing transactions, just copy "clients" to read_ and write_txns sections and provide values. -# The script takes the most specific arguments (for transactions rather than common) in case of a duplication. +# perf_processes.py will be called with arguments provided below during perf_spike_load.py execution. You may move arguments between common and processes sections: +# e.g. if you want to define different clients number for reading and writing transactions, just copy "clients" to any of processes sections and provide values. +# The script takes the most specific arguments (for processes rather than common) in case of a duplication. common: + clients: 1 seed: 000000000000000000000000Trustee1 num: 1 refresh: 60 @@ -22,32 +19,24 @@ common: pool_config: "" sync_mode: all out: "" - genesis: /home/me/Documents/stab_nodes_genesis + genesis: /home/me/Documents/genesis directory: ~/Documents/ -background: # is used in permanent and stress modes - clients: 1 - kind: get_nym - load_rate: 10 - stepwise: - step_time_in_seconds: 10 # for stepwise testing, 0 means stable load without growing - step_txns_per_second: 2 # additional load rate per step - step_initial_load_rate: 2 # initial load rate per spike +processes: + background: # set all step_* parameters values to 0 if background load is not needed + kind: get_nym + step_time_in_seconds: 10 # for stepwise testing, 0 means stable load without growing + step_initial_load_rate: 0 # initial load rate per spike (and stable load rate if not stepwise) + step_final_load_rate: 100 # additional load rate per step -spikes: # is used in permanent and spike modes - spike1: - clients: 10 + spikes: # set all step_* parameters values to 0 if background load is not needed kind: nym - load_rate: 10 - stepwise: - step_time_in_seconds: 10 # for stepwise testing, 0 means stable load without growing - step_txns_per_second: 2 # additional load rate per step - step_initial_load_rate: 2 # initial load rate per spike - spike2: - clients: 1 + step_time_in_seconds: 5 # for stepwise testing, 0 means stable load without growing + step_initial_load_rate: 30 # initial load rate per spike (and stable load rate if not stepwise) + step_final_load_rate: 90 # final load rate per spike + + spike2: # remove additional spikes if not needed kind: nym - load_rate: 10 - stepwise: - step_time_in_seconds: 10 # for stepwise testing, 0 means stable load without growing - step_txns_per_second: 1 # additional load rate per step - step_initial_load_rate: 1 # initial load rate per spike \ No newline at end of file + step_time_in_seconds: 20 # for stepwise testing, 0 means stable load without growing + step_initial_load_rate: 10 # initial load rate per spike (and stable load rate if not stepwise) + step_final_load_rate: 20 # final load rate per spike diff --git a/scripts/performance/perf_spike_load.py b/scripts/performance/perf_spike_load.py index af2acd728..6258b8be5 100644 --- a/scripts/performance/perf_spike_load.py +++ b/scripts/performance/perf_spike_load.py @@ -1,14 +1,23 @@ #! /usr/bin/env python3 -"""This script uses another load script (perf_processes.py) running it with different parameters which are -provided in config_perf_spike_load.yml file""" - -from datetime import timedelta, datetime import subprocess import yaml import time import os +import collections +import matplotlib.pyplot as plt +import numpy as np +import argparse + +parser = argparse.ArgumentParser(description='This script uses another load script (perf_processes.py) running it ' + 'with different parameters which are provided in ' + 'config_perf_spike_load.yml file') + +parser.add_argument('-g', '--graph', default=False, type=bool, required=False, dest='graph', + help='Build a graph to check if the configured profile is correct') + +args = parser.parse_args() def create_output_directory(folder_path): @@ -19,120 +28,179 @@ def create_output_directory(folder_path): output_folder = os.path.expanduser(output_folder) except OSError: raise ValueError("Bad output log folder pathname!") - if not os.path.isdir(output_folder): + if not os.path.isdir(output_folder) and not args.graph: os.makedirs(output_folder) directory = "--directory={}".format(output_folder) return directory -def create_subprocess(config, sub_process_type, directory_arg, load_time, load_rate=None): - args = ["python3", "perf_processes.py"] +def get_args(config, process_type, directory_arg): + args_for_script = ["python3", "perf_processes.py"] common_args = config["common"].copy() - common_args.update(config[sub_process_type]) + common_args.update(config["processes"][process_type]) for dict_key in common_args: if dict_key == "directory": - args.append(directory_arg) - elif "stepwise" in dict_key: + args_for_script.append(directory_arg) + elif "step" in dict_key: continue - elif load_rate is not None and dict_key == "load_rate": - args.append("--{}={}".format(dict_key, load_rate)) else: - args.append("--{}={}".format(dict_key, common_args[dict_key])) - args.append("--load_time={}".format(load_time)) - subprocess.Popen(args, close_fds=True) - return + args_for_script.append("--{}={}".format(dict_key, common_args[dict_key])) + return args_for_script -def start_profile(): - with open("config_perf_spike_load.yml") as file: - config = yaml.load(file) - mode = config["profile"]["mode"] - if mode == "stress": - stress_profile(config) - elif mode == "permanent": - permanent_profile(config) - elif mode == "spike": - spike_profile(config) - - -def run_spikes(config, root_log_folder_name): - spike_time_in_seconds = config["profile"]["spike_time_in_seconds"] - folder_count = 0 - key = None - for key in config["spikes"].keys(): - folder_count += 1 - if config["spikes"][key]["step_time_in_seconds"] != 0: - sub_process_type = "spike" - step_time_in_seconds = int(config[key]["stepwise"]["step_time_in_seconds"]) - step_txns_per_second = int(config[key]["stepwise"]["step_txns_per_second"]) - step_load_rate = int(config[key]["stepwise"]["step_initial_load_rate"]) - steps_number = int(spike_time_in_seconds / step_time_in_seconds) - - for i in range(0, steps_number): - load_time = spike_time_in_seconds - step_time_in_seconds * i - directory = [config["common"]["directory"], root_log_folder_name, - " {}_{}".format(sub_process_type, folder_count)] - directory_arg = create_output_directory(directory) - create_subprocess(config, sub_process_type, directory_arg, load_time, step_load_rate) - step_load_rate = step_txns_per_second - time.sleep(step_time_in_seconds) - folder_count += 1 +def order_processes(delays, args_for_script): + processed_delays = [] + processes_dictionary = {} + for delay in delays: + if delay in processed_delays: + continue else: - sub_process_type = key - directory_parts = [config["common"]["directory"], root_log_folder_name, - "{}_{}".format(sub_process_type, folder_count)] - directory_arg = create_output_directory(directory_parts) - create_subprocess(config, sub_process_type, directory_arg, spike_time_in_seconds) - print("Spike {}".format(key)) - time.sleep(int(config["profile"]["spike_time_in_seconds"]) + - int(config["profile"]["rest_time_in_seconds"])) - - -def spike_profile(config): - root_log_folder_name = "Spike_log {}".format(time.strftime("%m-%d-%y %H-%M-%S")) - - end_time = datetime.now() + timedelta(seconds=int(config["profile"]["overall_time_in_seconds"])) - while datetime.now() < end_time: - run_spikes(config, root_log_folder_name) - + processed_delays.append(delay) + delays_indices = [i for i, e in enumerate(delays) if e == delay] + args_list = [] + for index in delays_indices: + args_list.append(args_for_script[index]) + processes_dictionary.update({delay: args_list}) + processes_dictionary_sorted = collections.OrderedDict(sorted(processes_dictionary.items())) + return processes_dictionary_sorted + + +def collect_delays(function_parameters, time_interval, spike_delay=0): + args_for_script = function_parameters[0] + step_time = function_parameters[1] + step_initial_load = function_parameters[2] + step_final_load = function_parameters[3] + args_copy = args_for_script.copy() + args_to_send = [] + delay = [] + if step_time != 0 and step_final_load != step_initial_load: + step_number = int(time_interval / step_time) + step_value = int((step_final_load - step_initial_load) / step_number) + if step_value == 0: + raise ValueError("There should be at least one transaction per step.") + for i in range(0, step_number): + load_time = time_interval - step_time * i + args_copy = args_for_script.copy() + args_copy.append("--load_time={}".format(load_time)) + if i != 0: + args_copy.append("--load_rate={}".format(step_value)) + else: + args_copy.append("--load_rate={}".format(step_initial_load)) + delay.append(spike_delay + time_interval - load_time) + args_to_send.append(args_copy) + step_number += 1 + else: + delay.append(spike_delay) + args_copy.append("--load_time={}".format(time_interval)) + args_copy.append("--load_rate={}".format(step_initial_load)) + args_to_send.append(args_copy) + return [delay, args_to_send] -def permanent_profile(config): - root_log_folder_name = "Spike_with_bg_log {}".format(time.strftime("%m-%d-%y %H-%M-%S")) - directory = [config["common"]["directory"], root_log_folder_name, - "{}_{}".format("background", "0")] - directory_arg = create_output_directory(directory) - create_subprocess(config, "background", directory_arg, config["profile"]["overall_time_in_seconds"]) - end_time = datetime.now() + timedelta(seconds=int(config["profile"]["overall_time_in_seconds"])) - while datetime.now() < end_time: - run_spikes(config, root_log_folder_name) +def collect_processes(config): + root_log_folder_name = "Stress_log_{}".format(time.strftime("%m-%d-%y_%H-%M-%S")) + processes = list(config["processes"].keys()) + functions = {} + for process_name in processes: + step_time_in_seconds = config["processes"][process_name]["step_time_in_seconds"] + if step_time_in_seconds == 0: + continue + initial_rate = config["processes"][process_name]["step_initial_load_rate"] + final_rate = config["processes"][process_name]["step_final_load_rate"] + if initial_rate > final_rate: + raise ValueError("In {} block initial rate is bigger than final!".format(process_name)) + directory = [config["common"]["directory"], root_log_folder_name, process_name] + directory_arg = create_output_directory(directory) + args_for_script = get_args(config, process_name, directory_arg) + step_parameters = [args_for_script, step_time_in_seconds, initial_rate, final_rate] + functions.update({process_name: step_parameters}) + return functions -def stress_profile(config): - folder_count = 1 # ordering number of the spike which goes to logs folder name - root_log_folder_name = "Stress_log_{}".format(time.strftime("%m-%d-%y_%H-%M-%S")) +def start_profile(): + with open("config_perf_spike_load.yml") as file: + config = yaml.load(file) + spike_time = config["profile"]["spike_time_in_seconds"] + rest_time = config["profile"]["rest_time_in_seconds"] overall_time_in_seconds = int(config["profile"]["overall_time_in_seconds"]) - sub_process_type = "background" - if config["background"]["stepwise"]["step_time_in_seconds"] != 0: - step_time_in_seconds = int(config["background"]["stepwise"]["step_time_in_seconds"]) - step_txns_per_second = int(config["background"]["stepwise"]["step_txns_per_second"]) - step_load_rate = int(config["background"]["stepwise"]["step_initial_load_rate"]) - steps_number = int(overall_time_in_seconds/step_time_in_seconds) - - for i in range(0, steps_number): - load_time = overall_time_in_seconds - step_time_in_seconds * i - directory = [config["common"]["directory"], root_log_folder_name, - "{}_{}".format(sub_process_type, folder_count)] - directory_arg = create_output_directory(directory) - create_subprocess(config, sub_process_type, directory_arg, load_time, step_load_rate) - step_load_rate = step_txns_per_second - time.sleep(step_time_in_seconds) - folder_count += 1 + delays_list = [] + args_list = [] + background_plot = [] + processes_dict = collect_processes(config) + if "background" in list(processes_dict.keys()): + delays_args_list = collect_delays(processes_dict["background"], overall_time_in_seconds) + delays_list.extend(delays_args_list[0]) + args_list.extend(delays_args_list[1]) + background_plot = prepare_plot_values(delays_args_list) + + spike_plots_list = [] + time_count = 0 + spikes_list = filter(lambda i: "background" not in i, list(processes_dict.keys())) + spike_number = 0 + for spike in spikes_list: + while time_count < overall_time_in_seconds: + spike_delay = (spike_time + rest_time) * spike_number + delays_args_list = (collect_delays(processes_dict[spike], spike_time, spike_delay)) + delays_list.extend(delays_args_list[0]) + args_list.extend(delays_args_list[1]) + spike_plots_list.append(prepare_plot_values(delays_args_list)) + spike_number += 1 + time_count += spike_time + rest_time + + spike_number = 0 + time_count = 0 + + if args.graph: + build_plot_on_config(background_plot, spike_plots_list) else: - directory_parts = [config["common"]["directory"], root_log_folder_name, "Stable_stress"] - directory_arg = create_output_directory(directory_parts) - create_subprocess(config, sub_process_type, directory_arg, overall_time_in_seconds) - print("Stress stable") + prepared = order_processes(delays_list, args_list) + time_count = 0 + for item in prepared.keys(): + time.sleep(item - time_count) + for process_args in prepared[item]: + subprocess.Popen(process_args, close_fds=True) + time_count = item + + +def prepare_plot_values(delays_args_list): + delays = delays_args_list[0] + args_for_script = delays_args_list[1] + plot_dict = {} + for i in range(0, len(delays)): + plot_dict.update({delays[i]: int(args_for_script[i][-1].split("=")[-1])}) + plot_dict_sorted = collections.OrderedDict(sorted(plot_dict.items())) + return plot_dict_sorted + + +def add_plot(ax, args_dict, color): + step_count = 1 + time_ax = [] + load_rate = [] + for delay in args_dict.keys(): + step_load_rate = args_dict[delay] + time_ax.append(delay) + if step_count != 1: + load_rate.append(load_rate[0] + step_load_rate * (step_count - 1)) + else: + load_rate.append(step_load_rate) + step_count += 1 + time_ax.append((time_ax[-1] - time_ax[-2]) + time_ax[-1]) + load_rate.append((load_rate[-1] - load_rate[-2]) + load_rate[-1]) + ax.fill_between(time_ax, load_rate, facecolor=color, alpha=0.4) + + +def build_plot_on_config(background, spikes): + figure, ax = plt.subplots(1, 1) + if len(background) != 0: + add_plot(ax, background, 'b') + if len(spikes) != 0: + for spike in spikes: + add_plot(ax, spike, 'g') + start, stop = ax.get_ylim() + ticks = np.arange(start, stop + (stop // 10), stop // 10) + ax.set_yticks(ticks) + ax.grid() + plt.show() if __name__ == '__main__': From a7c82b66310a9fdf18746f04b14a6421592d3752 Mon Sep 17 00:00:00 2001 From: NataliaDracheva Date: Wed, 5 Sep 2018 10:45:21 +0300 Subject: [PATCH 5/7] [INDY-1666] Fixes as per review: set instead of list for unique values storage. Signed-off-by: NataliaDracheva --- scripts/performance/config_perf_spike_load.yml | 6 +++--- scripts/performance/perf_spike_load.py | 10 ++++------ 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/scripts/performance/config_perf_spike_load.yml b/scripts/performance/config_perf_spike_load.yml index 52a2dfaed..bb7056ff0 100644 --- a/scripts/performance/config_perf_spike_load.yml +++ b/scripts/performance/config_perf_spike_load.yml @@ -18,7 +18,7 @@ common: mode: p pool_config: "" sync_mode: all - out: "" + out: "load_script_output.txt" genesis: /home/me/Documents/genesis directory: ~/Documents/ @@ -31,12 +31,12 @@ processes: spikes: # set all step_* parameters values to 0 if background load is not needed kind: nym - step_time_in_seconds: 5 # for stepwise testing, 0 means stable load without growing + step_time_in_seconds: 0 # for stepwise testing, 0 means stable load without growing step_initial_load_rate: 30 # initial load rate per spike (and stable load rate if not stepwise) step_final_load_rate: 90 # final load rate per spike spike2: # remove additional spikes if not needed kind: nym - step_time_in_seconds: 20 # for stepwise testing, 0 means stable load without growing + step_time_in_seconds: 0 # for stepwise testing, 0 means stable load without growing step_initial_load_rate: 10 # initial load rate per spike (and stable load rate if not stepwise) step_final_load_rate: 20 # final load rate per spike diff --git a/scripts/performance/perf_spike_load.py b/scripts/performance/perf_spike_load.py index 6258b8be5..a8377f95a 100644 --- a/scripts/performance/perf_spike_load.py +++ b/scripts/performance/perf_spike_load.py @@ -49,13 +49,11 @@ def get_args(config, process_type, directory_arg): def order_processes(delays, args_for_script): - processed_delays = [] + assert len(delays) == len(args_for_script), 'Can not order the processes as a list of delays length is not equal ' \ + 'to a list of arguments length.' + unique_delays = set(delays) processes_dictionary = {} - for delay in delays: - if delay in processed_delays: - continue - else: - processed_delays.append(delay) + for delay in unique_delays: delays_indices = [i for i, e in enumerate(delays) if e == delay] args_list = [] for index in delays_indices: From 92dfddb8ed543c1df1699052a8e2cffffefaf140 Mon Sep 17 00:00:00 2001 From: NataliaDracheva Date: Wed, 5 Sep 2018 11:01:53 +0300 Subject: [PATCH 6/7] [INDY-1666] Fixes as per review: output folder creation. Signed-off-by: NataliaDracheva --- scripts/performance/perf_spike_load.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/scripts/performance/perf_spike_load.py b/scripts/performance/perf_spike_load.py index a8377f95a..5e3748d07 100644 --- a/scripts/performance/perf_spike_load.py +++ b/scripts/performance/perf_spike_load.py @@ -21,9 +21,7 @@ def create_output_directory(folder_path): - output_folder = "" - for folder in folder_path: - output_folder = os.path.join(output_folder, folder) + output_folder = os.path.join(folder_path[0], *folder_path[1:]) try: output_folder = os.path.expanduser(output_folder) except OSError: @@ -96,7 +94,7 @@ def collect_delays(function_parameters, time_interval, spike_delay=0): def collect_processes(config): - root_log_folder_name = "Stress_log_{}".format(time.strftime("%m-%d-%y_%H-%M-%S")) + root_log_folder_name = "Spike_log_{}".format(time.strftime("%m-%d-%y_%H-%M-%S")) processes = list(config["processes"].keys()) functions = {} for process_name in processes: From daa07b40658094a4919680d6484e7a59fcc23953 Mon Sep 17 00:00:00 2001 From: NataliaDracheva Date: Wed, 5 Sep 2018 13:08:00 +0300 Subject: [PATCH 7/7] [INDY-1666] Fixes as per review: iterations through dicts, removing redundant variables. Signed-off-by: NataliaDracheva --- scripts/performance/perf_spike_load.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/scripts/performance/perf_spike_load.py b/scripts/performance/perf_spike_load.py index 5e3748d07..f70f30e07 100644 --- a/scripts/performance/perf_spike_load.py +++ b/scripts/performance/perf_spike_load.py @@ -84,7 +84,6 @@ def collect_delays(function_parameters, time_interval, spike_delay=0): args_copy.append("--load_rate={}".format(step_initial_load)) delay.append(spike_delay + time_interval - load_time) args_to_send.append(args_copy) - step_number += 1 else: delay.append(spike_delay) args_copy.append("--load_time={}".format(time_interval)) @@ -123,7 +122,7 @@ def start_profile(): args_list = [] background_plot = [] processes_dict = collect_processes(config) - if "background" in list(processes_dict.keys()): + if "background" in processes_dict.keys(): delays_args_list = collect_delays(processes_dict["background"], overall_time_in_seconds) delays_list.extend(delays_args_list[0]) args_list.extend(delays_args_list[1]) @@ -131,12 +130,13 @@ def start_profile(): spike_plots_list = [] time_count = 0 - spikes_list = filter(lambda i: "background" not in i, list(processes_dict.keys())) spike_number = 0 - for spike in spikes_list: + for spike, spike_args in processes_dict.items(): + if spike == "background": + continue while time_count < overall_time_in_seconds: spike_delay = (spike_time + rest_time) * spike_number - delays_args_list = (collect_delays(processes_dict[spike], spike_time, spike_delay)) + delays_args_list = (collect_delays(spike_args, spike_time, spike_delay)) delays_list.extend(delays_args_list[0]) args_list.extend(delays_args_list[1]) spike_plots_list.append(prepare_plot_values(delays_args_list)) @@ -172,8 +172,8 @@ def add_plot(ax, args_dict, color): step_count = 1 time_ax = [] load_rate = [] - for delay in args_dict.keys(): - step_load_rate = args_dict[delay] + for delay, args_for_plot in args_dict.items(): + step_load_rate = args_for_plot time_ax.append(delay) if step_count != 1: load_rate.append(load_rate[0] + step_load_rate * (step_count - 1))