8
8
import os
9
9
10
10
import click
11
- import yaml
12
11
import pandas as pd
12
+
13
13
from fmatch .matcher import Matcher
14
+ from utils .orion_funcs import run_hunter_analyze , get_metadata , \
15
+ set_logging , load_config , get_metric_data
14
16
15
17
16
18
@click .group ()
@@ -24,7 +26,8 @@ def cli():
24
26
@click .option ("--config" , default = "config.yaml" , help = "Path to the configuration file" )
25
27
@click .option ("--output" , default = "output.csv" , help = "Path to save the output csv file" )
26
28
@click .option ("--debug" , is_flag = True , help = "log level " )
27
- def orion (config , debug , output ):
29
+ @click .option ("--hunter-analyze" ,is_flag = True , help = "run hunter analyze" )
30
+ def orion (config , debug , output ,hunter_analyze ):
28
31
"""Orion is the cli tool to detect regressions over the runs
29
32
30
33
Args:
@@ -35,25 +38,22 @@ def orion(config, debug, output):
35
38
level = logging .DEBUG if debug else logging .INFO
36
39
logger = logging .getLogger ("Orion" )
37
40
logger = set_logging (level , logger )
41
+ data = load_config (config ,logger )
42
+ ES_URL = None
43
+
44
+ if "ES_SERVER" in data .keys ():
45
+ ES_URL = data ['ES_SERVER' ]
46
+ else :
47
+ if 'ES_SERVER' in os .environ :
48
+ ES_URL = os .environ .get ("ES_SERVER" )
49
+ else :
50
+ logger .error ("ES_SERVER environment variable/config variable not set" )
51
+ sys .exit (1 )
38
52
39
- if "ES_SERVER" not in os .environ :
40
- logger .error ("ES_SERVER environment variable not set" )
41
- sys .exit (1 )
42
-
43
- try :
44
- with open (config , "r" , encoding = "utf-8" ) as file :
45
- data = yaml .safe_load (file )
46
- logger .debug ("The %s file has successfully loaded" , config )
47
- except FileNotFoundError as e :
48
- logger .error ("Config file not found: %s" , e )
49
- sys .exit (1 )
50
- except Exception as e : # pylint: disable=broad-exception-caught
51
- logger .error ("An error occurred: %s" , e )
52
- sys .exit (1 )
53
53
for test in data ["tests" ]:
54
54
metadata = get_metadata (test , logger )
55
55
logger .info ("The test %s has started" , test ["name" ])
56
- match = Matcher (index = "perf_scale_ci" , level = level )
56
+ match = Matcher (index = "perf_scale_ci" , level = level , ES_URL = ES_URL )
57
57
uuids = match .get_uuid_by_metadata (metadata )
58
58
if len (uuids ) == 0 :
59
59
print ("No UUID present for given metadata" )
@@ -77,103 +77,12 @@ def orion(config, debug, output):
77
77
lambda left , right : pd .merge (left , right , on = "uuid" , how = "inner" ),
78
78
dataframe_list ,
79
79
)
80
- match .save_results (merged_df , csv_file_path = output )
81
-
82
-
83
- def get_metric_data (ids , index , metrics , match , logger ):
84
- """Gets details metrics basked on metric yaml list
80
+ match .save_results (merged_df , csv_file_path = output .split ("." )[0 ]+ "-" + test ['name' ]+ ".csv" )
85
81
86
- Args:
87
- ids (list): list of all uuids
88
- index (dict): index in es of where to find data
89
- metrics (dict): metrics to gather data on
90
- match (Matcher): current matcher instance
91
- logger (logger): log data to one output
92
-
93
- Returns:
94
- dataframe_list: dataframe of the all metrics
95
- """
96
- dataframe_list = []
97
- for metric in metrics :
98
- metric_name = metric ['name' ]
99
- logger .info ("Collecting %s" , metric_name )
100
- metric_of_interest = metric ['metric_of_interest' ]
101
-
102
- if "agg" in metric .keys ():
103
- try :
104
- cpu = match .get_agg_metric_query (
105
- ids , index , metric
106
- )
107
- agg_value = metric ['agg' ]['value' ]
108
- agg_type = metric ['agg' ]['agg_type' ]
109
- agg_name = agg_value + "_" + agg_type
110
- cpu_df = match .convert_to_df (cpu , columns = ["uuid" , agg_name ])
111
- cpu_df = cpu_df .rename (
112
- columns = {agg_name : metric_name + "_" + agg_name }
113
- )
114
- dataframe_list .append (cpu_df )
115
- logger .debug (cpu_df )
116
-
117
- except Exception as e : # pylint: disable=broad-exception-caught
118
- logger .error (
119
- "Couldn't get agg metrics %s, exception %s" ,
120
- metric_name ,
121
- e ,
122
- )
123
- else :
124
- try :
125
- podl = match .getResults ("" , ids , index , metric )
126
- podl_df = match .convert_to_df (
127
- podl , columns = ["uuid" , "timestamp" , metric_of_interest ]
128
- )
129
- dataframe_list .append (podl_df )
130
- logger .debug (podl_df )
131
- except Exception as e : # pylint: disable=broad-exception-caught
132
- logger .error (
133
- "Couldn't get metrics %s, exception %s" ,
134
- metric_name ,
135
- e ,
136
- )
137
- return dataframe_list
138
-
139
- def get_metadata (test ,logger ):
140
- """Gets metadata of the run from each test
82
+ if hunter_analyze :
83
+ run_hunter_analyze (merged_df ,test )
141
84
142
- Args:
143
- test (dict): test dictionary
144
85
145
- Returns:
146
- dict: dictionary of the metadata
147
- """
148
- metadata = {}
149
- for k ,v in test .items ():
150
- if k in ["metrics" ,"name" ]:
151
- continue
152
- metadata [k ] = v
153
- metadata ["ocpVersion" ] = str (metadata ["ocpVersion" ])
154
- logger .debug ('metadata' + str (metadata ))
155
- return metadata
156
-
157
-
158
- def set_logging (level , logger ):
159
- """sets log level and format
160
-
161
- Args:
162
- level (_type_): level of the log
163
- logger (_type_): logger object
164
-
165
- Returns:
166
- logging.Logger: a formatted and level set logger
167
- """
168
- logger .setLevel (level )
169
- handler = logging .StreamHandler (sys .stdout )
170
- handler .setLevel (level )
171
- formatter = logging .Formatter (
172
- "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
173
- )
174
- handler .setFormatter (formatter )
175
- logger .addHandler (handler )
176
- return logger
177
86
178
87
179
88
if __name__ == "__main__" :
0 commit comments