@@ -19,7 +19,6 @@ def cli():
19
19
cli function to group commands
20
20
"""
21
21
22
-
23
22
# pylint: disable=too-many-locals
24
23
@click .command ()
25
24
@click .option ("--config" , default = "config.yaml" , help = "Path to the configuration file" )
@@ -60,46 +59,19 @@ def orion(config, debug, output):
60
59
print ("No UUID present for given metadata" )
61
60
sys .exit ()
62
61
63
- runs = match .match_kube_burner (uuids )
64
- ids = match .filter_runs (runs , runs )
62
+ if metadata ["benchmark" ] == "k8s-netperf" :
63
+ index = "k8s-netperf"
64
+ ids = uuids
65
+ elif metadata ["benchmark" ] == "ingress-perf" :
66
+ index = "ingress-performance"
67
+ ids = uuids
68
+ else :
69
+ index = "ripsaw-kube-burner"
70
+ runs = match .match_kube_burner (uuids )
71
+ ids = match .filter_runs (runs , runs )
72
+
65
73
metrics = test ["metrics" ]
66
- dataframe_list = []
67
-
68
- for metric in metrics :
69
- logger .info ("Collecting %s" , metric ["metric" ])
70
- if metric ["metricType" ] == "latency" :
71
- if metric ["metric" ] == "podReadyLatency" :
72
- try :
73
- podl = match .burner_results ("" , ids , "ripsaw-kube-burner*" )
74
- podl_df = match .convert_to_df (
75
- podl , columns = ["uuid" , "timestamp" , "P99" ]
76
- )
77
- dataframe_list .append (podl_df )
78
- logger .debug (podl_df )
79
- except Exception as e : # pylint: disable=broad-exception-caught
80
- logger .error (
81
- "The namespace %s does not exist, exception %s" ,
82
- metric ["namespace" ],
83
- e ,
84
- )
85
-
86
- elif metric ["metricType" ] == "cpu" :
87
- try :
88
- cpu = match .burner_cpu_results (
89
- ids , metric ["namespace" ], "ripsaw-kube-burner*"
90
- )
91
- cpu_df = match .convert_to_df (cpu , columns = ["uuid" , "cpu_avg" ])
92
- cpu_df = cpu_df .rename (
93
- columns = {"cpu_avg" : metric ["metric" ] + "_cpu_avg" }
94
- )
95
- dataframe_list .append (cpu_df )
96
- logger .debug (cpu_df )
97
- except Exception as e : # pylint: disable=broad-exception-caught
98
- logger .error (
99
- "The namespace %s does not exist, exception %s" ,
100
- metric ["namespace" ],
101
- e ,
102
- )
74
+ dataframe_list = get_metric_data (ids , index , metrics , match , logger )
103
75
104
76
merged_df = reduce (
105
77
lambda left , right : pd .merge (left , right , on = "uuid" , how = "inner" ),
@@ -108,6 +80,62 @@ def orion(config, debug, output):
108
80
match .save_results (merged_df , csv_file_path = output )
109
81
110
82
83
+ def get_metric_data (ids , index , metrics , match , logger ):
84
+ """Gets details metrics basked on metric yaml list
85
+
86
+ Args:
87
+ ids (list): list of all uuids
88
+ index (dict): index in es of where to find data
89
+ metrics (dict): metrics to gather data on
90
+ match (Matcher): current matcher instance
91
+ logger (logger): log data to one output
92
+
93
+ Returns:
94
+ dataframe_list: dataframe of the all metrics
95
+ """
96
+ dataframe_list = []
97
+ for metric in metrics :
98
+ metric_name = metric ['name' ]
99
+ logger .info ("Collecting %s" , metric_name )
100
+ metric_of_interest = metric ['metric_of_interest' ]
101
+
102
+ if "agg" in metric .keys ():
103
+ try :
104
+ cpu = match .get_agg_metric_query (
105
+ ids , index , metric
106
+ )
107
+ agg_value = metric ['agg' ]['value' ]
108
+ agg_type = metric ['agg' ]['agg_type' ]
109
+ agg_name = agg_value + "_" + agg_type
110
+ cpu_df = match .convert_to_df (cpu , columns = ["uuid" , agg_name ])
111
+ cpu_df = cpu_df .rename (
112
+ columns = {agg_name : metric_name + "_" + agg_name }
113
+ )
114
+ dataframe_list .append (cpu_df )
115
+ logger .debug (cpu_df )
116
+
117
+ except Exception as e : # pylint: disable=broad-exception-caught
118
+ logger .error (
119
+ "Couldn't get agg metrics %s, exception %s" ,
120
+ metric_name ,
121
+ e ,
122
+ )
123
+ else :
124
+ try :
125
+ podl = match .getResults ("" , ids , index , metric )
126
+ podl_df = match .convert_to_df (
127
+ podl , columns = ["uuid" , "timestamp" , metric_of_interest ]
128
+ )
129
+ dataframe_list .append (podl_df )
130
+ logger .debug (podl_df )
131
+ except Exception as e : # pylint: disable=broad-exception-caught
132
+ logger .error (
133
+ "Couldn't get metrics %s, exception %s" ,
134
+ metric_name ,
135
+ e ,
136
+ )
137
+ return dataframe_list
138
+
111
139
def get_metadata (test ,logger ):
112
140
"""Gets metadata of the run from each test
113
141
0 commit comments