Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add energy storage interface #567

Draft
wants to merge 35 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
f59e9cb
add pypsa-eur fixes for add_extra_components
pz-max Jan 15, 2023
2cdbdc2
restructure code to read config from main
pz-max Jan 16, 2023
f87c389
add general energy storage interface in add_electricity
pz-max Jan 16, 2023
9823521
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 16, 2023
4d3ba14
add storage unit in add_extra_component
pz-max Jan 17, 2023
6b77699
merge X1 branch
pz-max Jan 17, 2023
9f1f30e
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 17, 2023
9f479f0
rename and clean
pz-max Jan 17, 2023
141ec36
add generic attach store function
pz-max Jan 17, 2023
9e8d98c
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 17, 2023
3519720
add generic storage function in solve_network
pz-max Jan 17, 2023
e1235dd
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 18, 2023
e7aa91d
add fixes
pz-max Jan 19, 2023
3bb0c26
Merge branch 'X1' of https://github.com/pz-max/pypsa-earth into X1
pz-max Jan 19, 2023
19fc88a
Merge branch 'main' into X1
pz-max Jan 24, 2023
bedee1c
add workflow structure
pz-max Jan 24, 2023
b4d1032
implement feature assessment
pz-max Jan 25, 2023
0436656
add changes and merge monte-carlo scripts
pz-max Jan 26, 2023
2362960
Merge branch 'main' into X1
pz-max Jan 26, 2023
b009908
Merge pull request #1 from pz-max/X2
pz-max Jan 26, 2023
897ed02
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 26, 2023
bdf606d
add reviewed changes
pz-max Jan 26, 2023
72eda79
add docstring
pz-max Jan 26, 2023
c317272
add TODO
pz-max Jan 26, 2023
ff28fd3
Merge branch 'main' into X1
pz-max Jan 26, 2023
14fe873
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 26, 2023
b8d4180
Merge branch 'pypsa-meets-earth:main' into X1
pz-max Jan 31, 2023
6838d71
Merge branch 'pypsa-meets-earth:main' into X1
pz-max Feb 3, 2023
17c30e5
Merge branch 'pypsa-meets-earth:main' into X1
pz-max Feb 8, 2023
8e6d6d1
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 8, 2023
99cf8df
enable monte in scenario management
pz-max Feb 9, 2023
25ce05f
Merge branch 'X1' of https://github.com/pz-max/pypsa-earth into X1
pz-max Feb 9, 2023
322652f
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 9, 2023
11656a9
Merge branch 'main' into X1
pz-max May 24, 2023
8840365
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] May 24, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
115 changes: 55 additions & 60 deletions Snakefile
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider
from scripts._helpers import create_country_list
from scripts.build_demand_profiles import get_load_paths_gegis
from scripts.retrieve_databundle_light import datafiles_retrivedatabundle
from scripts.monte_carlo import wildcard_creator
from pathlib import Path

HTTP = HTTPRemoteProvider()
Expand All @@ -33,9 +34,8 @@ config["countries"] = create_country_list(config["countries"])

# create a list of iteration steps, required to solve the experimental design
# each value is used as wildcard input e.g. solution_{unc}
config["scenario"]["unc"] = [
f"m{i}" for i in range(config["monte_carlo"]["options"]["samples"])
]
if config["monte_carlo"].get("add_to_snakefile", False) == True:
config["scenario"]["unc"] = wildcard_creator(config)

run = config.get("run", {})
RDIR = run["name"] + "/" if run.get("name") else ""
Expand Down Expand Up @@ -666,41 +666,7 @@ def memory(w):
return int(factor * (10000 + 195 * int(w.clusters)))


if config["monte_carlo"]["options"].get("add_to_snakefile", False) == False:

rule solve_network:
input:
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output:
"results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log:
solver=normpath(
"logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
),
python="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
memory="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log",
benchmark:
(
"benchmarks/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
)
threads: 20
resources:
mem=memory,
shadow:
"shallow"
script:
"scripts/solve_network.py"


if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True:
if config["monte_carlo"].get("add_to_snakefile", False) == True:

rule monte_carlo:
input:
Expand All @@ -723,18 +689,10 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True:
script:
"scripts/monte_carlo.py"

rule solve_monte:
input:
expand(
"networks/"
+ RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc",
**config["scenario"]
),

rule solve_network:
input:
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc",
tech_costs=COSTS,
output:
"results/"
+ RDIR
Expand Down Expand Up @@ -774,6 +732,40 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True:
**config["scenario"]
),

else:
pz-max marked this conversation as resolved.
Show resolved Hide resolved

rule solve_network:
input:
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
tech_costs=COSTS,
output:
"results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log:
solver=normpath(
"logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
),
python="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
memory="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log",
benchmark:
(
"benchmarks/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
)
threads: 20
resources:
mem=memory,
shadow:
"shallow"
script:
"scripts/solve_network.py"


def input_make_summary(w):
# It's mildly hacky to include the separate costs input as first entry
Expand Down Expand Up @@ -888,26 +880,29 @@ rule run_scenario:
resources:
mem_mb=5000,
run:
from scripts.build_test_configs import create_test_config
import yaml

# get base configuration file from diff config
with open(input.diff_config) as f:
base_config_path = (
yaml.full_load(f)
.get("run", {})
.get("base_config", "config.tutorial.yaml")
)
Comment on lines -891 to -900
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @davide-f, we already had a review iteration. Not sure about this bit @davide-f . I will check this PR over the next weeks and ping you if there is a review needed. It would be good to get this monster merged during the next month.

from scripts.build_test_configs import (
create_test_config,
_parse_inputconfig,
)
from ruamel.yaml import YAML

# Ensure the scenario name matches the name of the configuration
# Ensure the scenario name matches the name of the configuration
create_test_config(
input.diff_config,
{"run": {"name": wildcards.scenario_name}},
input.diff_config,
)
# merge the default config file with the difference
create_test_config(base_config_path, input.diff_config, "config.yaml")
os.system("snakemake -j all solve_all_networks --rerun-incomplete")
create_test_config(input.default_config, input.diff_config, "config.yaml")
config = _parse_inputconfig("config.yaml", YAML())
if config["monte_carlo"].get("add_to_snakefile", False) == True:
os.system(
"snakemake -j all solve_all_networks_monte --forceall --rerun-incomplete"
)
else:
os.system(
"snakemake -j all solve_all_networks --forceall --rerun-incomplete"
)
os.system("snakemake -j1 make_statistics --force")
copyfile("config.yaml", output.copyconfig)

Expand Down
14 changes: 8 additions & 6 deletions config.default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -329,18 +329,20 @@ costs:


monte_carlo:
add_to_snakefile: true
options:
add_to_snakefile: false
samples: 7 # number of optimizations
sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported
method: "global_sensitivity" # or single_best_in_worst for technology assessment
samples: 7 # number of optimizations
sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported
pypsa_standard:
# User can add here flexibly more features for the Monte-Carlo sampling.
# Given as "key: value" format
# Key: add below the pypsa object for the monte_carlo sampling, "network" is only allowed for filtering!
# Value: currently supported format [l_bound, u_bound] or empty [], represent multiplication factors for the object
loads_t.p_set: [0.9, 1.1]
generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1]
generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: [0.9, 1.1]
# Examples:
# loads_t.p_set: [0.9, 1.1]
# generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1]
stores.capital_cost: [0.8, 1.2] # for single_best_in_worst


solving:
Expand Down
14 changes: 8 additions & 6 deletions config.tutorial.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -325,18 +325,20 @@ costs:


monte_carlo:
add_to_snakefile: true
options:
add_to_snakefile: false
samples: 7 # number of optimizations
sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported
method: "global_sensitivity" # or single_best_in_worst for technology assessment
samples: 7 # number of optimizations
sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported
pypsa_standard:
# User can add here flexibly more features for the Monte-Carlo sampling.
# Given as "key: value" format
# Key: add below the pypsa object for the monte_carlo sampling, "network" is only allowed for filtering!
# Value: currently supported format [l_bound, u_bound] or empty [], represent multiplication factors for the object
loads_t.p_set: [0.9, 1.1]
generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1]
generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: [0.9, 1.1]
# Examples:
# loads_t.p_set: [0.9, 1.1]
# generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1]
stores.capital_cost: [0.8, 1.2] # for single_best_in_worst


solving:
Expand Down
16 changes: 9 additions & 7 deletions doc/configtables/monte-carlo.csv
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
,Unit,Values,Description
options,,,
-- add_to_snakemake,,true or false,Add rule to Snakefile or not
-- samples,,"int** read description!", "Defines the number of total sample networks that will be later optimized. if orthogonal latin hypercube sampling is applied (default option in scripts), then a prime number, that is not a quadrat of a prime, needs to be chosen. E.g. 7 not 9 (3^2)"
-- sampling_strategy,,"Any subset of {""pydoe2"", ""chaospy"", ""scipy""}",Current supported packages to create an experimental design
pypsa_standard,,,
-- <any pypsa.object syntax>,MW/MWh,"[l_bound, u_bound] or empty []","`Key` is a dynamic PyPSA object that allows to access any pypsa object such as `loads_t.p_set` or the max. wind generation per hour `generators_t.p_max_pu.loc[:, n.generators.carrier == ""wind""]`. `Values` or bounds are multiplication for each object."
,Unit,Values,Description,,,
add_to_snakemake,,true or false,Add rule to Snakefile or not,,,
options,,,,,,
-- method,,"""global_sensitivity"" or ""single_best_in_worst""","""global_sensitivity"" create scenarios in latin hypercube space, or ""single_best_in_worst"" create scenarios where all values are pessimistic and only one is optimistic,,,
-- samples,,integer," Defines the number of total sample networks that will be later optimized. if orthogonal latin hypercube sampling is applied (default option in scripts) then a prime number that is not a quadrat of a prime needs to be chosen. E.g. 7 not 9 (3^2)""",,,
-- sampling_strategy,,"Any subset of {""pydoe2"", ""chaospy"", ""scipy""}",Current supported packages to create an experimental design,,,
pypsa_standard,,,,,,
-- <any pypsa.object syntax>,MW/MWh,"[l_bound, u_bound] or empty []","`Key` is a dynamic PyPSA object that allows to access any pypsa object such as `loads_t.p_set` or the max. wind generation per hour `generators_t.p_max_pu.loc[:, n.generators.carrier == ""wind""]`. `Values` or bounds are multiplication for each object. Method ""any_chance_store_test"" only supports e.g. n.stores = [0.7,1.3]",,,
,,,,,,
98 changes: 98 additions & 0 deletions scripts/_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -613,6 +613,104 @@ def read_geojson(fn):
return gpd.GeoDataFrame(geometry=[])


def nested_storage_dict(tech_costs):
"""
Create a nested dictionary with a storage index and meta data relation.
pz-max marked this conversation as resolved.
Show resolved Hide resolved

The costs.csv file from the technology_data interface contains metadata
for storage technologies when the PNNL data extractions is activated PR #67.
The metadata is stored in a csv as a nested dictionary value which is
read out by this function and converted to a nested dictionary for further
use. One example use the metadata enables is the automatic energy storage
creation in the model from the config.yaml.

Input:
------
tech_costs: str, path to technology costs.csv file

Output:
-------
nested_dict: dict, nested dictionary with storage index and meta data relation
storage_techs: list, list of unique storage technologies

Example:
--------
Data format in Input:
costs['further description'][0] -> {'carrier': ['elec', 'nizn', 'elec'], 'technology_type': ['bicharger'], 'type': ['electrochemical']} with index 'Ni-Zn-bicharger'
costs['further description'][1] -> {'carrier': ['nizn'], 'technology_type': ['store'], 'type': ['electrochemical']} with index 'Ni-Zn-store'

Output:
.. code-block:: python
{
'Ni-Zn-bicharger': {'carrier': ['elec', 'nizn', 'elec'], 'technology_type': ['bicharger'], 'type': ['electrochemical']}
'Ni-Zn-store': {'carrier': ['nizn'], 'technology_type': ['store'], 'type': ['electrochemical']}
...
}
"""
import ast

df = pd.read_csv(
tech_costs,
index_col=["technology"],
usecols=["technology", "further description"],
).sort_index()
df = df[df["further description"].str.contains("{'carrier':", na=False)]
storage_techs = df.index.unique()
nested_storage_dict = {}
if df.empty:
print("No storage technology found in costs.csv")
else:
for i in range(len(df)):
storage_dict = ast.literal_eval(
df.iloc[i, 0]
) # https://stackoverflow.com/a/988251/13573820
storage_dict.pop("note", None)
nested_storage_dict[df.index[i]] = storage_dict
return [nested_storage_dict, storage_techs]


def add_storage_col_to_costs(costs, storage_meta_dict, storage_techs):
"""
Add storage specific columns e.g. "carrier", "type", "technology_type" to costs.csv

Input:
------
costs: pd.DataFrame, costs.csv
storage_meta_dict: dict, nested dictionary with storage index and meta data relation
storage_techs: list, list of unique storage technologies

Output:
-------
costs: pd.DataFrame, costs.csv with added storage specific columns

Example:
--------
From the nested dictionary:
{
'Ni-Zn-bicharger': {'carrier': ['elec', 'nizn', 'elec'], 'technology_type': ['bicharger'], 'type': ['electrochemical']}
...
}
The columns "carrier", "type", "technology_type" will be added to costs.csv
"""
# add storage specific columns to costs.csv
for c in ["carrier", "technology_type", "type"]:
costs.loc[storage_techs, c] = [
storage_meta_dict[X][c] for X in costs.loc[storage_techs].index
]
# remove all 'elec's from carrier columns and read carrier as string
for i in range(len(costs.loc[storage_techs])):
costs.loc[storage_techs[i], "carrier"] = "".join(
[e for e in costs.loc[storage_techs].carrier.iloc[i] if e != "elec"]
)
costs.loc[storage_techs[i], "technology_type"] = "".join(
costs.loc[storage_techs].technology_type.iloc[i]
)
costs.loc[storage_techs[i], "type"] = "".join(
costs.loc[storage_techs].type.iloc[i]
)
return costs


def create_country_list(input, iso_coding=True):
"""
Create a country list for defined regions in config_osm_data.py
Expand Down
Loading