Skip to content

Commit

Permalink
update README
Browse files Browse the repository at this point in the history
  • Loading branch information
celiaescribe committed Sep 5, 2024
1 parent ab1869e commit 823546d
Show file tree
Hide file tree
Showing 10 changed files with 230 additions and 51 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ The EOLES-ResIRF Coupling model is a tool for studying integrated decarbonizatio

## Installation

The current version of the code should be run on Unix.

### Step 1: Git clone the folder in your computer.

```bash
Expand Down
24 changes: 24 additions & 0 deletions eoles/inputs/config/scenarios/scenarios_dr.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
{ "trash": {
"profile": ["reference", "ProfileFlat"],
"capacity_nuc": ["reference", "Nuc-"],
"capacity_pv": ["reference", "PV-"],
"capacity_offshore": ["reference", "Offshore-"],
"woodprices": ["reference", "PriceWood+"],
"weather": ["reference", "cold"],
"policy_mix": ["reference", "NoPolicy"],
"cop": ["reference", "COP+"],
"biogas": ["reference", "Biogas-", "Biogas+"],
"capacity_ren": ["reference", "Ren-", "Ren+"],
"demand": ["reference", "Sufficiency", "Reindustrialisation"],
"carbon_budget": ["reference", "CarbonBudget-"]
},
"supply" : {
"discount_rate": ["reference", "DiscountRate-", "DiscountRate--", "DiscountRate+", "DiscountRate++"]
},
"demand" : {

},
"prices": {

}
}
7 changes: 7 additions & 0 deletions eoles/inputs/config/scenarios/scenarios_weather.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"supply" : {
"weather": ["reference", "2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014", "2015", "2016"]
},
"demand" : {},
"prices": {}
}
1 change: 1 addition & 0 deletions eoles/inputs/resources.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,7 @@
'Solar PV': color_palette[8],
'peaking plants': color_palette[5],
'Peaking Plants': color_palette[5],
'Peaking \nPlants': color_palette[5],
'nuclear': color_palette[7],
'Nuclear': color_palette[7],
'natural gas': color_palette[10],
Expand Down
3 changes: 3 additions & 0 deletions eoles/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1556,6 +1556,9 @@ def create_configs_coupling(list_design, config_coupling: dict, config_additiona

config_coupling_update['resirf'] = {} # we create a dictionary for the ResIRF configuration specs

if 'discount_rate' in config_additional.keys():
config_coupling_update['discount_rate'] = config_additional['discount_rate']

if 'prices' in config_additional.keys():
config_coupling_update['resirf']['prices'] = config_additional['prices']['resirf']
config_coupling_update['resirf']['energy'] = config_additional['energy'] # information necessary to update prices afterwards in modif_config_resirf
Expand Down
18 changes: 9 additions & 9 deletions eoles/write_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@
'pv': 'Solar PV',
'battery': 'Battery',
'hydro': 'Hydroelectricity',
'peaking plants': 'Peaking Plants',
'peaking plants': 'Peaking \nPlants',
'Generation offshore (TWh)': 'Offshore',
'Generation onshore (TWh)': 'Onshore',
'Generation pv (TWh)': 'Solar PV',
Expand Down Expand Up @@ -2671,7 +2671,7 @@ def format_ax(ax: plt.Axes, title=None, y_label=None, x_label=None, x_ticks=None

def format_ax_new(ax, y_label=None, title=None, format_x=None,
format_y=lambda y, _: y, ymin=None, ymax=None, xinteger=True, xmin=None, x_max=None, loc_title=None,
c_title=None):
c_title=None, y_fontsize=20, x_fontsize=20):
"""
Parameters
Expand Down Expand Up @@ -2700,7 +2700,7 @@ def format_ax_new(ax, y_label=None, title=None, format_x=None,
ax.xaxis.set_major_formatter(plt.FuncFormatter(format_x))

if y_label is not None:
ax.set_ylabel(y_label)
ax.set_ylabel(y_label, fontsize=y_fontsize)

if title is not None:
if loc_title is not None:
Expand Down Expand Up @@ -3216,7 +3216,7 @@ def plot_blackbox_optimization(dict_optimizer, save_path, two_stage_optim=False)

def waterfall_chart(df, colors=None, rotation=0, save=None, format_y=lambda y, _: '{:.0f}'.format(y), title=None,
y_label=None, hline=False, dict_legend=None, total=True, unit='B€', float_precision=0, neg_offset=None,
pos_offset=None, df_max=None, df_min=None):
pos_offset=None, df_max=None, df_min=None, x_fontsize=24, y_fontsize=24, annotate_fontsize=20):
if isinstance(df, pd.DataFrame):
df = df.squeeze()
if dict_legend is not None:
Expand Down Expand Up @@ -3289,9 +3289,9 @@ def waterfall_chart(df, colors=None, rotation=0, save=None, format_y=lambda y, _
y -= neg_offset
# if loop > 0:
if float_precision == 0:
ax.annotate("{:+,.0f} {}".format(val, unit), (loop, y), ha="center")
ax.annotate("{:+,.0f} {}".format(val, unit), (loop, y), ha="center", fontsize=annotate_fontsize)
else:
ax.annotate("{:+,.1f} {}".format(val, unit), (loop, y), ha="center")
ax.annotate("{:+,.1f} {}".format(val, unit), (loop, y), ha="center", fontsize=annotate_fontsize)
loop += 1

if blank.max() > 0: # total est True quand on fait les graphes pour les coûts, et False quand on fait les graphes pour les capacités
Expand All @@ -3315,7 +3315,7 @@ def waterfall_chart(df, colors=None, rotation=0, save=None, format_y=lambda y, _
ax.set_ylim(ymax=y_max)
ax.set_ylim(ymin=y_min)
ax.set_xlabel('')
ax = format_ax_new(ax, format_y=format_y, xinteger=True)
ax = format_ax_new(ax, format_y=format_y, xinteger=True, x_fontsize=x_fontsize)

if title is not None:
if total:
Expand All @@ -3336,8 +3336,8 @@ def waterfall_chart(df, colors=None, rotation=0, save=None, format_y=lambda y, _
# fig.legend(handles, labels, loc='center left', frameon=False, ncol=1,
# bbox_to_anchor=(1, 0.5))

plt.setp(ax.xaxis.get_majorticklabels(), rotation=rotation)
ax.tick_params(axis='both', which='major', labelsize=18)
plt.setp(ax.xaxis.get_majorticklabels(), rotation=rotation, fontsize=x_fontsize)
ax.tick_params(axis='both', which='major', labelsize=x_fontsize)

save_fig(fig, save=save)

Expand Down
61 changes: 53 additions & 8 deletions postprocessing/process_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@
MAPPING = {'Learning+': 'High', 'Learning-': 'Low',
'Elasticity+': 'High', 'Elasticity-': 'Low',
'Biogas+': 'High', 'Biogas-': 'Low',
'Capacity_ren+': 'High', 'Ren-': 'Low',
'Demand+': 'High', 'Sufficiency': 'Low',
'Capacity_ren+': 'High', 'Ren-': 'Low', 'Ren+': 'High',
'Demand+': 'High', 'Sufficiency': 'Low', 'Reindustrialisation': 'High',
'PriceGas+': 'High', 'PriceGas-': 'Low',
'PriceWood+': 'High', 'PriceWood-': 'Low',
'Policy_mix+': 'High', 'Policy_mix-': 'Low',
Expand All @@ -49,7 +49,7 @@
'learning': 'Technical progress heat-pumps',
'elasticity': 'Heat-pump price elasticity',
'biogas': 'Biogas potential',
'capacity_ren': 'Renewable capacity',
'capacity_ren': 'Renewable potential',
'demand': 'Other electricity demand',
'gasprices': 'Gas prices',
'woodprices': 'Wood prices',
Expand All @@ -66,6 +66,7 @@
'demand', 'carbon_budget', 'gasprices']



def parse_outputs(folderpath, features, emissions=False):
"""Parses the outputs of the simulations and creates a csv file with the results.
Return scenarios_complete, and output which has been processed to only include information on the difference between the Ban and the reference scenario."""
Expand Down Expand Up @@ -141,8 +142,8 @@ def make_table_summary(data, folderpath):
'Consumption Wood fuel (TWh)': 'Consumption Wood',
'offshore': 'Offshore capacity',
'onshore': 'Onshore capacity',
'pv': 'Solar PV',
'battery': 'Battery',
'pv': 'Solar PV capacity',
'battery': 'Battery capacity',
'peaking plants': 'Peaking plants capacity',
'methanization': 'Methanization capacity',
'pyrogazification': 'Pyrogazification capacity',
Expand All @@ -168,6 +169,49 @@ def make_table_summary(data, folderpath):
return df


def make_table_summary_resirf(data1, data2, folderpath):
scenarios = {('S0', 'reference'): 'Counterfactual', ('S0', 'Ban'): 'Ban'}
selected_keys = [
'Stock (Million)',
'Surface (Million m2)',
'Consumption (TWh)',
'Consumption (kWh/m2)',
'Consumption PE (TWh)',
'Consumption Electricity (TWh)',
'Consumption Natural gas (TWh)',
'Consumption Oil fuel (TWh)',
'Consumption Wood fuel (TWh)',
'Consumption Heating (TWh)',
'Energy poverty (Million)',
'Emission (MtCO2)',
'Stock G (Million)',
'Stock F (Million)',
'Stock E (Million)',
'Stock D (Million)',
'Stock C (Million)',
'Stock B (Million)',
'Stock A (Million)',
'Stock Heat pump (Million)',
'Stock Direct electric (Million)',
'Stock Natural gas (Million)',
'Stock Oil fuel (Million)',
'Stock Wood fuel (Million)',
'Stock District heating (Million)',
'Energy poverty (Million)'
]
df1 = data1.loc[selected_keys, '2049'].rename({'2049': 2050})
df1 = df1.astype('int')

df2 = data2.loc[selected_keys, '2049'].rename({'2049': 2050})
df2 = df2.astype('int')

df = pd.concat([df1, df2], axis=1)
df.to_csv(folderpath / Path('summary_table_resirf.csv'))

return df



def get_distributional_data(df):
"""Extracts detailed distributional data, using the knowledge that those distributional data are tuples."""

Expand Down Expand Up @@ -512,7 +556,7 @@ def frequency_chart_subplot(results1, results2, category_names=None, save_path=N

plt.tight_layout()

def horizontal_stack_bar_plot(df, columns=None, title=None, order=None, save_path=None):
def horizontal_stack_bar_plot(df, columns=None, title=None, order=None, save_path=None, fontsize=20):
"""
Create a horizontal stacked bar plot from a DataFrame.
Expand Down Expand Up @@ -561,7 +605,7 @@ def horizontal_stack_bar_plot(df, columns=None, title=None, order=None, save_pat
ax.spines['left'].set_visible(False)

# size of x-axis and y-axis ticks
ax.tick_params(axis='both', which='major', labelsize=18)
ax.tick_params(axis='both', which='major', labelsize=fontsize)
# size of title

# Remove the x-axis and y-axis titles
Expand Down Expand Up @@ -684,6 +728,7 @@ def distributional_plot(df, folder_name=None):
# scenarios_complete, output, hourly_generation = parse_outputs(folderpath, features=features)

# LOCAL
folderpath = Path('assessing_ban/simulations/exhaustive_20240506_195738') # for cluster use
folderpath = Path('assessing_ban/simulations/exhaustive_20240826_120338') # for cluster use
features = ['policy_heater', 'policy_insulation', 'learning', 'elasticity', 'biogas', 'capacity_ren', 'demand', 'carbon_budget', 'gasprices']
features = ['discount_rate']
scenarios_complete, output, hourly_generation = parse_outputs(folderpath, features=features)
61 changes: 31 additions & 30 deletions preprocessing_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,33 +7,33 @@

####### Demand profile #########

# Process csv file to get the demand profile
# First column is the type of vehicule, second is the hour, last is the demand value in MW
demand_ev = pd.read_csv('eoles/inputs/demand_data_other/demand_transport2050.csv', index_col=0, header=None).reset_index().rename(columns={0: 'vehicule', 1: 'hour', 2: 'demand'})
demand_rte = pd.read_csv("eoles/inputs/demand/demand2050_RTE.csv", index_col=0, header=None).squeeze("columns")

adjust_demand = (530 * 1e3 - 580 * 1e3) / 8760 # 580TWh is the total of the profile we use as basis for electricity hourly demand (from RTE), c'est bien vérifié
demand_rte_rescaled_1 = demand_rte + adjust_demand
demand_rte_rescaled_2 = demand_rte * (530 / 580)

plt.plot(demand_rte_rescaled_1[0:150], c='red')
plt.plot(demand_rte_rescaled_2[0:150], c='blue')
# plt.plot(demand_rte[0:1000], c='green')
plt.show()

# plot the demand profile for vehicule = 'light'

demand_ev_light = demand_ev.loc[demand_ev.vehicule == 'light']
demand_ev_light = demand_ev_light.drop(columns=['vehicule'])
demand_ev_light = demand_ev_light.set_index('hour')

demand_ev_heavy = demand_ev.loc[demand_ev.vehicule == 'heavy']
demand_ev_heavy = demand_ev_heavy.drop(columns=['vehicule'])
demand_ev_heavy = demand_ev_heavy.set_index('hour')

demand_ev_bus = demand_ev.loc[demand_ev.vehicule == 'bus']
demand_ev_bus = demand_ev_bus.drop(columns=['vehicule'])
demand_ev_bus = demand_ev_bus.set_index('hour')
# # Process csv file to get the demand profile
# # First column is the type of vehicule, second is the hour, last is the demand value in MW
# demand_ev = pd.read_csv('eoles/inputs/demand_data_other/demand_transport2050.csv', index_col=0, header=None).reset_index().rename(columns={0: 'vehicule', 1: 'hour', 2: 'demand'})
# demand_rte = pd.read_csv("eoles/inputs/demand/demand2050_RTE.csv", index_col=0, header=None).squeeze("columns")
#
# adjust_demand = (530 * 1e3 - 580 * 1e3) / 8760 # 580TWh is the total of the profile we use as basis for electricity hourly demand (from RTE), c'est bien vérifié
# demand_rte_rescaled_1 = demand_rte + adjust_demand
# demand_rte_rescaled_2 = demand_rte * (530 / 580)
#
# plt.plot(demand_rte_rescaled_1[0:150], c='red')
# plt.plot(demand_rte_rescaled_2[0:150], c='blue')
# # plt.plot(demand_rte[0:1000], c='green')
# plt.show()
#
# # plot the demand profile for vehicule = 'light'
#
# demand_ev_light = demand_ev.loc[demand_ev.vehicule == 'light']
# demand_ev_light = demand_ev_light.drop(columns=['vehicule'])
# demand_ev_light = demand_ev_light.set_index('hour')
#
# demand_ev_heavy = demand_ev.loc[demand_ev.vehicule == 'heavy']
# demand_ev_heavy = demand_ev_heavy.drop(columns=['vehicule'])
# demand_ev_heavy = demand_ev_heavy.set_index('hour')
#
# demand_ev_bus = demand_ev.loc[demand_ev.vehicule == 'bus']
# demand_ev_bus = demand_ev_bus.drop(columns=['vehicule'])
# demand_ev_bus = demand_ev_bus.set_index('hour')
# plot only for a subset of hours
# demand_ev_light.loc[0:100].plot()
# plt.show()
Expand Down Expand Up @@ -179,7 +179,8 @@
lake_inflows = pd.read_csv('eoles/inputs/hourly_profiles/lake_2000-2019.csv', index_col=0, header=None).reset_index()
lake_inflows.columns = ["month", "capacity_factor"]
#
list_year = [2012]
year = 2016
list_year = [year]
vre_profiles_subset = pd.DataFrame()
lake_inflows_subset = pd.DataFrame()

Expand All @@ -203,8 +204,8 @@

lake_inflows_subset = lake_inflows_subset.set_index('month')
#
vre_profiles_subset.to_csv('eoles/inputs/hourly_profiles/vre_profiles_2012.csv', header=False)
lake_inflows_subset.to_csv('eoles/inputs/hourly_profiles/lake_2012.csv', header=False)
vre_profiles_subset.to_csv(f'eoles/inputs/hourly_profiles/vre_profiles_{year}.csv', header=False)
lake_inflows_subset.to_csv(f'eoles/inputs/hourly_profiles/lake_{year}.csv', header=False)
#
# ############## Estimate run of river values for different years ######################
#
Expand Down
Loading

0 comments on commit 823546d

Please sign in to comment.