Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Resilience template rework #36

Draft
wants to merge 29 commits into
base: develop
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
98a1803
initial upload
aivanova5 Oct 12, 2023
2f2bb8d
Update main_bulk.glm
aivanova5 Oct 12, 2023
5a58033
Update .index
aivanova5 Oct 13, 2023
e38f5bd
Create .catalog
aivanova5 Oct 13, 2023
f034977
Adding missing logging file
aivanova5 Oct 13, 2023
56667e5
Update main_bulk.glm
aivanova5 Oct 13, 2023
b13fcce
Update main_bulk.glm
aivanova5 Oct 13, 2023
8fd4b8d
Update main_bulk.glm
aivanova5 Oct 13, 2023
33b86bd
Update main_bulk.glm
aivanova5 Oct 13, 2023
246e128
Update main_bulk.glm
aivanova5 Oct 13, 2023
b2a57a0
Update main_bulk.glm
aivanova5 Oct 13, 2023
2f06c64
Update status_log.py
aivanova5 Oct 13, 2023
b32f4b6
Update status_log.py
aivanova5 Oct 13, 2023
e27c6e3
Update status_log.py
aivanova5 Oct 13, 2023
4bd91cb
Update main_bulk.glm
aivanova5 Oct 13, 2023
caf698f
Update main_bulk.glm
aivanova5 Oct 13, 2023
4e4b6e2
Update main_bulk.glm
aivanova5 Oct 13, 2023
6584c9a
Update main_bulk.glm
aivanova5 Oct 14, 2023
268c9fe
Update main_bulk.glm
aivanova5 Oct 14, 2023
a62e6f8
Update main_bulk.glm
aivanova5 Oct 14, 2023
2ebfe59
Update main_bulk.glm
aivanova5 Oct 14, 2023
834d479
Update main_bulk.glm
aivanova5 Oct 14, 2023
8a89302
Update main_bulk.glm
aivanova5 Oct 14, 2023
5727e20
Update main_bulk.glm
aivanova5 Oct 14, 2023
7ba5e04
Adding resilience template features
aivanova5 Jan 31, 2024
174fc0d
Updating header for template
aivanova5 Feb 23, 2024
1c28a47
Update .catalog
aivanova5 Feb 23, 2024
7803034
Update header.glm
aivanova5 Feb 27, 2024
49ebe1a
Adding veg pre processing file
aivanova5 Mar 20, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions US/CA/SLAC/.index
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
ica_analysis
electrification
loadfactor
anticipation
7 changes: 7 additions & 0 deletions US/CA/SLAC/anticipation/.catalog
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
header.glm:a=r
folium.glm:a=r
add_info.py:a=rx
status_log.py:a=rx
folium_data.py:a=rx
convert_to_csv.glm:a=rx
veg_data_preprocess.py:a=rx
7 changes: 7 additions & 0 deletions US/CA/SLAC/anticipation/add_info.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
import pandas as pd
csv_input = pd.read_csv('path_vege.csv')
wind_speed = 10
width = 5
csv_input['wind_speed'] = wind_speed
csv_input['width'] = width
csv_input.to_csv('path_vege.csv', index=False)
1 change: 1 addition & 0 deletions US/CA/SLAC/anticipation/convert_to_csv.glm
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
#write ${modelname/.glm/}_feeder.csv overhead_line,node:phases
47 changes: 47 additions & 0 deletions US/CA/SLAC/anticipation/folium.glm
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
module powerflow;
#set relax_naming_rules=TRUE
class pole
{
double pole_height;
double position;
double linesag;
double linesway;
double contact;
double strike;
}
class point
{
double latitude;
double longitude;
double position;
double linesag;
double linesway;
double contact;
double strike;
char1024 status;
}
object pole_configuration
{
name "tower";
}
object pole_configuration
{
name "3pole";
}
object pole_configuration
{
name "2pole";
}
object pole_configuration
{
name "vert3";
}
object pole_configuration
{
name "sideT";
}
object pole_configuration
{
name "flat3";
}
#input "path_result_plot.csv" -f "table" -t "object" -C "point" -M "powerflow"
10 changes: 10 additions & 0 deletions US/CA/SLAC/anticipation/folium_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import pandas as pd
csv_input = pd.read_csv('path_result.csv', \
usecols = ['position', 'latitude', 'longitude', 'linesag', 'linesway', 'contact', 'strike'])
csv_input['status'] = 'OK' # set default as OK
# read the strike value
strike_threshold = 0.1
for index, row in csv_input.iterrows():
if row['strike'] > strike_threshold:
csv_input.loc[index, "status"] = 'FAILED'
csv_input.to_csv('path_result_plot.csv', index=False)
76 changes: 76 additions & 0 deletions US/CA/SLAC/anticipation/header.glm
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
// Test poles
//
//
// Convert command:
// gridlabd convert CARDINAL_Polar\ -\ Design\ CalcDesign\ DSO.xlsx cardinal_poles.csv -f xlsx-spida -t csv-geodata
//
// Run command:
// gridlabd main_cardinal_poles.glm // cardinal_poles.glm
//
// Python command:
// convert('CARDINAL_Polar - Design CalcDesign DSO.xlsx', 'CARDINAL_PolarDesign Attachment and Equipment_Asset Details from SPIDA and SAP.xlsx', 'cardinal_poles.csv', options={'extract_equipment':'yes','include_network':'yes', 'include_mount':'yes', 'include_network':'CARDINAL.csv'})
//
#define suppress_repeat_messages=TRUE
#define pythonpath="/usr/local/opt/gridlabd/current/share/gridlabd/template/US/CA/SLAC/anticipation"
//#exec printenv
#set minimum_timestep=3600


module status_log;
module climate;
module tape;

module powerflow {
solver_method NR;
line_capacitance true;
message_flags VERBOSE;
}
#ifdef RELIABILITY_ON
module reliability {
report_event_log true;
}
object fault_check {
name test_fault;
check_mode ONCHANGE;
reliability_mode true;
// output_filename ${FAULT_OUT_PATH};
grid_association true;
};

object power_metrics {
name pwrmetrics;
base_time_value 1 h;
}

object metrics {
name testmetrics;
report_file metrics.txt;
module_metrics_object pwrmetrics;
metrics_of_interest "SAIFI,SAIDI,CAIDI,ASAI,MAIFI";
customer_group "class=meter";
metric_interval 5 h;
report_interval 5 h;
};
#endif
clock {
starttime "${STARTTIME}";
stoptime "${STOPTIME}";
timezone "${TIMEZONE}";
}


#ifdef WIND_SPEED
object climate {
name weather;
object player {
property wind_speed;
file ${WIND_SPEED};
};
}
#else
#weather get CA-Chino_Airport.tmy3
object climate {
name weather;
tmyfile "CA-Chino_Airport.tmy3";
}
#endif
53 changes: 53 additions & 0 deletions US/CA/SLAC/anticipation/status_log.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import pandas as pd
import datetime

metered_energy = {}
pole_status = {}
objs = {}
wrn_count = 0

def get_info(t, object_class, target_property) :
for obj in objs:
if gridlabd.get_object(obj)['class'] == object_class:
if t not in data:
data[t] = {}
data[t][obj] = gridlabd.get_object(obj)[target_property]
return data

def dump_csv(data,file_name ) :
# Flatten the nested dictionary
flat_data = []

for timestamp, item in data.items():
row = {'timestamp': datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')}
row.update(item)
flat_data.append(row)

# Create a DataFrame
df = pd.DataFrame(flat_data).set_index('timestamp')
df.to_csv(file_name, index=True)
return df

def on_init(t) :
global objs
objs = gridlabd.get('objects')
return True

def on_commit(t) :
global pole_status, metered_energy, wrn_count
pole_status.update(get_info(t,'pole', 'status'))
# pole_status.update(get_info(t,'pole', 'total_moment'))
try :
metered_energy.update(get_info(t,'meter','measured_real_energy'))
finally :
if wrn_count == 0 :
print(f"WARNING [status_log]: meters are not available in the model.")
wrn_count = 1
return True

def on_term(t) :
global pole_status, metered_energy

df_energy = dump_csv(pole_status,'/tmp/output/pole_status.csv')
dump_csv(metered_energy, '/tmp/output/metered_energy.csv')
return None
35 changes: 35 additions & 0 deletions US/CA/SLAC/anticipation/veg_data_preprocess.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import pandas as pd
import sys

def extract_poles() :
input_file_name = sys.argv[1]
output_file_name = sys.argv[2]

df = pd.read_csv(input_file_name)

# Create a new DataFrame with only rows where 'class' is 'pole'
filtered_df = df[df['class'] == 'pole']

# Reset the index of the filtered DataFrame
filtered_df.reset_index(drop=True, inplace=True)

# Extract 'pole_length' from the corresponding 'pole_configuration' rows
pole_length_map = dict(zip(df[df['class'] == 'pole_configuration']['name'], df[df['class'] == 'pole_configuration']['pole_length']))

# Map 'pole_length' values based on 'class' using .map after creating a copy
filtered_df = filtered_df.copy() # Create a copy to avoid SettingWithCopyWarning

filtered_df['pole_length'] = filtered_df['configuration'].map(pole_length_map)

# Check for missing values in latitude and longitude columns and drop rows with any missing value
filtered_df.dropna(subset=["latitude", "longitude"], inplace=True)

# Reset the index
filtered_df.reset_index(drop=True, inplace=True)

# Display the final DataFrame
filtered_df.to_csv(output_file_name, index=False)


if __name__ == "__main__":
extract_poles()
Loading