Skip to content

Commit

Permalink
Merge pull request #958 from rl-institut/feature/results-as-pandas
Browse files Browse the repository at this point in the history
Feature/results as pandas
  • Loading branch information
Bachibouzouk authored Apr 25, 2024
2 parents 1e56e39 + 9130f7d commit b6b3a16
Show file tree
Hide file tree
Showing 9 changed files with 302 additions and 54 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ Here is a template for new release sections
- `PARAMETER_DOC` in `utils` can be used to get information about a parameter directly from the csv file `docs/MVS_parameters_list.csv` (#956)
- Now oemof-solph ExtractionTurbine CHP component can be simulated (only tested from the json input) (#952)
- The heat pump and chp components can now be simulated with MVS although no explicit support/documentation is present for running from the command line (#954)
- Saving the raw oemof result in a pandas Dataframe with multi index (#958)

### Changed
- `F0_output.parse_simulation_log`, so that `SIMULATION_RESULTS` are not overwritten anymore (#901)
Expand All @@ -58,6 +59,7 @@ Here is a template for new release sections
- If an asset has 2 output busses, the output flow of only one of the busses is provided using its name `asset[FLOW][bus_name]` (#952)
- The user can choose on which bus the investment will take place (useful for transformers with 2 inputs and 1 outputs or 1 input and 2 outputs) (#954)
- energy_price and feedin of DSO (providers) can be provided as timeseries (#954)
- The peak-demand pricing cost is applied to the consumption of DSO only (before was split between consumption and feedin) (#958)

### Removed
- Input timeseries is now not returned to epa in `utils.data_parser.py` (#936)
Expand Down
24 changes: 19 additions & 5 deletions src/multi_vector_simulator/C0_data_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,7 @@ def define_excess_sinks(dict_values):
price={VALUE: 0, UNIT: CURR + "/" + UNIT},
inflow_direction=bus,
energy_vector=energy_vector,
asset_type="excess",
)
dict_values[ENERGY_BUSSES][bus].update({EXCESS_SINK: excess_sink_name})
auto_sinks.append(excess_sink_name)
Expand Down Expand Up @@ -733,6 +734,7 @@ def define_auxiliary_assets_of_energy_providers(dict_values, dso_name):
price=dso_dict[ENERGY_PRICE],
energy_vector=dso_dict[ENERGY_VECTOR],
emission_factor=dso_dict[EMISSION_FACTOR],
asset_type=dso_dict.get(TYPE_ASSET),
)
dict_feedin = change_sign_of_feedin_tariff(dso_dict[FEEDIN_TARIFF], dso_name)

Expand All @@ -746,6 +748,7 @@ def define_auxiliary_assets_of_energy_providers(dict_values, dso_name):
inflow_direction=inflow_bus_name,
specific_costs={VALUE: 0, UNIT: CURR + "/" + UNIT},
energy_vector=dso_dict[ENERGY_VECTOR],
asset_type=dso_dict.get(TYPE_ASSET),
)
dso_dict.update(
{
Expand Down Expand Up @@ -1021,15 +1024,16 @@ def define_transformer_for_peak_demand_pricing(
EFFICIENCY: {VALUE: 1, UNIT: "factor"},
DEVELOPMENT_COSTS: {VALUE: 0, UNIT: CURR},
SPECIFIC_COSTS: {VALUE: 0, UNIT: CURR + "/" + dict_dso[UNIT],},
# the demand pricing is split between consumption and feedin
# the demand pricing is only applied to consumption
SPECIFIC_COSTS_OM: {
VALUE: dict_dso[PEAK_DEMAND_PRICING][VALUE] / 2,
VALUE: dict_dso[PEAK_DEMAND_PRICING][VALUE],
UNIT: CURR + "/" + dict_dso[UNIT] + "/" + UNIT_YEAR,
},
DISPATCH_PRICE: {VALUE: 0, UNIT: CURR + "/" + dict_dso[UNIT] + "/" + UNIT_HOUR},
OEMOF_ASSET_TYPE: OEMOF_TRANSFORMER,
ENERGY_VECTOR: dict_dso[ENERGY_VECTOR],
AGE_INSTALLED: {VALUE: 0, UNIT: UNIT_YEAR},
TYPE_ASSET: dict_dso.get(TYPE_ASSET),
}

dict_values[ENERGY_CONVERSION].update(
Expand All @@ -1053,15 +1057,16 @@ def define_transformer_for_peak_demand_pricing(
EFFICIENCY: {VALUE: 1, UNIT: "factor"},
DEVELOPMENT_COSTS: {VALUE: 0, UNIT: CURR},
SPECIFIC_COSTS: {VALUE: 0, UNIT: CURR + "/" + dict_dso[UNIT],},
# the demand pricing is split between consumption and feedin
# the demand pricing is only applied to consumption
SPECIFIC_COSTS_OM: {
VALUE: dict_dso[PEAK_DEMAND_PRICING][VALUE] / 2,
VALUE: 0,
UNIT: CURR + "/" + dict_dso[UNIT] + "/" + UNIT_YEAR,
},
DISPATCH_PRICE: {VALUE: 0, UNIT: CURR + "/" + dict_dso[UNIT] + "/" + UNIT_HOUR},
OEMOF_ASSET_TYPE: OEMOF_TRANSFORMER,
ENERGY_VECTOR: dict_dso[ENERGY_VECTOR],
AGE_INSTALLED: {VALUE: 0, UNIT: UNIT_YEAR},
TYPE_ASSET: dict_dso.get(TYPE_ASSET)
# LIFETIME: {VALUE: 100, UNIT: UNIT_YEAR},
}
if dict_dso.get(DSO_FEEDIN_CAP, None) is not None:
Expand Down Expand Up @@ -1089,6 +1094,7 @@ def define_source(
emission_factor,
price=None,
timeseries=None,
asset_type=None,
):
r"""
Defines a source with default input values. If kwargs are given, the default values are overwritten.
Expand Down Expand Up @@ -1145,6 +1151,7 @@ def define_source(
AGE_INSTALLED: {VALUE: 0, UNIT: UNIT_YEAR,},
ENERGY_VECTOR: energy_vector,
EMISSION_FACTOR: emission_factor,
TYPE_ASSET: asset_type,
}

if outflow_direction not in dict_values[ENERGY_BUSSES]:
Expand Down Expand Up @@ -1271,7 +1278,13 @@ def determine_dispatch_price(dict_values, price, source):


def define_sink(
dict_values, asset_key, price, inflow_direction, energy_vector, **kwargs
dict_values,
asset_key,
price,
inflow_direction,
energy_vector,
asset_type=None,
**kwargs,
):
r"""
This automatically defines a sink for an oemof-sink object. The sinks are added to the energyConsumption assets.
Expand Down Expand Up @@ -1323,6 +1336,7 @@ def define_sink(
ENERGY_VECTOR: energy_vector,
OPTIMIZE_CAP: {VALUE: True, UNIT: TYPE_BOOL},
DISPATCHABILITY: {VALUE: True, UNIT: TYPE_BOOL},
TYPE_ASSET: asset_type,
}

if inflow_direction not in dict_values[ENERGY_BUSSES]:
Expand Down
2 changes: 1 addition & 1 deletion src/multi_vector_simulator/D1_model_components.py
Original file line number Diff line number Diff line change
Expand Up @@ -776,7 +776,7 @@ def transformer_constant_efficiency_optimize(model, dict_asset, **kwargs):

bus = dict_asset[INFLOW_DIRECTION]
inputs = {
kwargs[OEMOF_BUSSES][dict_asset[INFLOW_DIRECTION]]: solph.Flow(
kwargs[OEMOF_BUSSES][bus]: solph.Flow(
investment=investment if bus == investment_bus else None
)
}
Expand Down
15 changes: 14 additions & 1 deletion src/multi_vector_simulator/E1_process_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
ENERGY_PRODUCTION,
ENERGY_STORAGE,
OEMOF_ASSET_TYPE,
INVESTMENT_BUS,
ENERGY_VECTOR,
KPI,
KPI_COST_MATRIX,
Expand Down Expand Up @@ -480,7 +481,7 @@ def get_results(settings, bus_data, dict_asset, asset_group):
# Check if the parameter/bus is defined for dict_asset
if parameter_to_be_evaluated not in dict_asset:
logging.warning(
f"The asset {dict_asset[LCOE_ASSET]} of group {asset_group} should contain parameter {parameter_to_be_evaluated}, but it does not."
f"The asset {dict_asset[LABEL]} of group {asset_group} should contain parameter {parameter_to_be_evaluated}, but it does not."
)

# Determine bus that needs to be evaluated
Expand All @@ -492,6 +493,17 @@ def get_results(settings, bus_data, dict_asset, asset_group):
dict_asset[LABEL], asset_group, bus_name
)

investment_bus = dict_asset.get(INVESTMENT_BUS)
if investment_bus is not None:
bus_name = investment_bus
logging.info(
f"The asset {dict_asset[LABEL]} of group {asset_group} had 'investment_bus' set to '{investment_bus}'"
)
if investment_bus in dict_asset.get(INFLOW_DIRECTION, []):
flow_tuple = (bus_name, dict_asset[LABEL])
elif investment_bus in dict_asset.get(OUTFLOW_DIRECTION, []):
flow_tuple = (dict_asset[LABEL], bus_name)

# Get flow information
get_flow(
settings=settings,
Expand Down Expand Up @@ -640,6 +652,7 @@ def get_optimal_cap(bus, dict_asset, flow_tuple):
"""
if OPTIMIZE_CAP in dict_asset:

if (
dict_asset[OPTIMIZE_CAP][VALUE] is True
and (flow_tuple, OEMOF_INVEST) in bus[OEMOF_SCALARS]
Expand Down
153 changes: 152 additions & 1 deletion src/multi_vector_simulator/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import tempfile

from oemof.tools import logger
import oemof.solph as solph

# Loading all child functions
import multi_vector_simulator.B0_data_input_json as B0
Expand All @@ -59,8 +60,133 @@
OUTPUT_LP_FILE,
VALUE,
UNIT,
ENERGY_BUSSES,
AUTO_CREATED_HIGHLIGHT,
ENERGY_VECTOR,
TYPE_ASSET,
OEMOF_ASSET_TYPE,
ENERGY_PRODUCTION,
ENERGY_CONSUMPTION,
ENERGY_CONVERSION,
ENERGY_PROVIDERS,
ENERGY_STORAGE,
TIMESERIES_PEAK,
OPTIMIZE_CAP,
OPTIMIZED_ADD_CAP,
VALUE,
)
from multi_vector_simulator.utils.constants import TYPE_STR
from multi_vector_simulator.utils.helpers import get_asset_types


import pandas as pd


def bus_flow(
flow_tuple, busses_info, asset_types=None
): # can work as well with nodes (assets)
if not isinstance(busses_info, dict):
raise ValueError("Expected a dict")
busses_names = [bn for bn in busses_info]
bus_name = set(busses_names).intersection(set(flow_tuple))
answer = None
if len(bus_name) == 1:
bus_name = bus_name.pop()
idx_bus = flow_tuple.index(bus_name)
if idx_bus == 0:
asset_name = flow_tuple[1]
answer = (bus_name, busses_info[bus_name][ENERGY_VECTOR], "out", asset_name)
elif idx_bus == 1:
asset_name = flow_tuple[0]
answer = (bus_name, busses_info[bus_name][ENERGY_VECTOR], "in", asset_name)
if asset_types is not None:
df_at = pd.DataFrame.from_records(asset_types).set_index("label")
answer = answer + (
df_at.loc[asset_name, TYPE_ASSET],
df_at.loc[asset_name, OEMOF_ASSET_TYPE],
)
return answer


class OemofBusResults(pd.DataFrame): # real results
def __init__(self, results, busses_info=None, asset_types=None):
# TODO add a division by timeseries peak
if isinstance(results, dict):
ts = []
investments = []
flows = []
for x, res in solph.views.convert_keys_to_strings(results).items():
if x[1] != "None":
col_name = res["sequences"].columns[0]
ts.append(
res["sequences"].rename(
columns={col_name: x, "variable_name": "timesteps"}
)
)
flows.append(bus_flow(x, busses_info, asset_types))
invest = (
None if res["scalars"].empty is True else res["scalars"].invest
)
investments.append(invest)
ts_df = pd.concat(ts, axis=1, join="inner")
mindex = pd.MultiIndex.from_tuples(
flows,
names=[
"bus",
"energy_vector",
"direction",
"asset",
"asset_type",
"oemof_type",
],
)

elif isinstance(results, str):
js = json.loads(results)
mindex = pd.MultiIndex.from_tuples(
js["columns"],
names=[
"bus",
"energy_vector",
"direction",
"asset",
"asset_type",
"oemof_type",
],
)
df = pd.DataFrame(data=js["data"], columns=mindex)

ts_df = df.iloc[:-1]
ts_index = pd.to_datetime(js["index"][:-1], unit="ms")
investments = df.iloc[-1]
ts_df.index = ts_index

super().__init__(
data=ts_df.T.to_dict(orient="split")["data"],
index=mindex,
columns=ts_df.index,
)

self["investments"] = investments
self.sort_index(inplace=True)

def to_json(self, **kwargs):
kwargs["orient"] = "split"
return self.T.to_json(**kwargs)

def bus_flows(self, bus_name):
return self.loc[bus_name, self.columns != "investments"].T

def asset_optimized_capacities(self):
return self.loc[:, "investments"]

def asset_optimized_capacity(self, asset_name):
optimized_capacity = self.loc[
self.index.get_level_values("asset") == asset_name, "investments"
].dropna()
if len(optimized_capacity) == 1:
optimized_capacity = optimized_capacity[0]
return optimized_capacity


def run_simulation(json_dict, epa_format=True, **kwargs):
Expand Down Expand Up @@ -137,6 +263,12 @@ def run_simulation(json_dict, epa_format=True, **kwargs):
dict_values, return_les=True
)

br = OemofBusResults(
results_main,
busses_info=dict_values[ENERGY_BUSSES],
asset_types=get_asset_types(dict_values),
) # if AUTO_CREATED_HIGHLIGHT not in bl])

if lp_file_output is True:
logging.debug("Saving the content of the model's lp file")
with tempfile.TemporaryDirectory() as tmpdirname:
Expand All @@ -154,14 +286,33 @@ def run_simulation(json_dict, epa_format=True, **kwargs):
logging.debug("Accessing script: E0_evaluation")
E0.evaluate_dict(dict_values, results_main, results_meta)

# Correct the optimized values
for asset_group in [
ENERGY_PRODUCTION,
ENERGY_CONSUMPTION,
ENERGY_CONVERSION,
ENERGY_PROVIDERS,
ENERGY_STORAGE,
]:
for asset_name, asset in dict_values[asset_group].items():
if (
asset.get(OPTIMIZE_CAP, {VALUE: False}).get(VALUE, False) is True
and TIMESERIES_PEAK in asset
):
corrected_optimized_capacity = asset[OPTIMIZED_ADD_CAP][VALUE]
br.loc[
br.index.get_level_values("asset") == asset_name, "investments"
] = corrected_optimized_capacity

dict_values["raw_results"] = br.to_json() # to_dict(orient="split") #

logging.debug("Convert results to json")

if epa_format is True:
epa_dict_values = data_parser.convert_mvs_params_to_epa(dict_values)

json_values = F0.store_as_json(epa_dict_values)
answer = json.loads(json_values)

else:
answer = dict_values

Expand Down
30 changes: 21 additions & 9 deletions src/multi_vector_simulator/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,15 +354,27 @@ def compare_input_parameters_with_reference(
)
elif set_default is True:

main_parameters[mp][k][sp] = {
VALUE: PARAMETERS_DOC.get_doc_default(sp),
UNIT: PARAMETERS_DOC.get_doc_unit(sp),
}
logging.warning(
f"You are not providing a value for the parameter '{sp}' of asset '{k}' in asset group '{mp}'"
+ f"This parameter is then set to it's default value ({PARAMETERS_DOC.get_doc_default(sp)}).\n"
+ PARAMETERS_DOC.where_to_find_param_documentation
)
if k == "non-asset":
main_parameters[mp][sp] = {
VALUE: PARAMETERS_DOC.get_doc_default(sp),
UNIT: PARAMETERS_DOC.get_doc_unit(sp),
}
logging.warning(
f"You are not providing a value for the parameter '{sp}' in parameter group '{mp}'"
+ f"This parameter is then set to it's default value ({PARAMETERS_DOC.get_doc_default(sp)}).\n"
+ PARAMETERS_DOC.where_to_find_param_documentation
)
else:
main_parameters[mp][k][sp] = {
VALUE: PARAMETERS_DOC.get_doc_default(sp),
UNIT: PARAMETERS_DOC.get_doc_unit(sp),
}

logging.warning(
f"You are not providing a value for the parameter '{sp}' of asset '{k}' in asset group '{mp}'"
+ f"This parameter is then set to it's default value ({PARAMETERS_DOC.get_doc_default(sp)}).\n"
+ PARAMETERS_DOC.where_to_find_param_documentation
)
else:
# the sub parameter is not provided but is required --> missing
param_list = missing_parameters.get(mp, [])
Expand Down
Loading

0 comments on commit b6b3a16

Please sign in to comment.