Skip to content

Commit

Permalink
Merge pull request #1178 from yerbol-akhmetov/subworkflow_sector
Browse files Browse the repository at this point in the history
Enable subworkflow import for sector rules
  • Loading branch information
davide-f authored Nov 11, 2024
2 parents ca31dfe + a59c02d commit d9a364c
Show file tree
Hide file tree
Showing 8 changed files with 68 additions and 37 deletions.
18 changes: 10 additions & 8 deletions doc/release_notes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -20,24 +20,26 @@ E.g. if a new rule becomes available describe how to use it `make test` and in o

* Integrate RDIR into sector rules to store intermediate data in scenario folders `PR #1154 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1154>`__

**Minor Changes and bug-fixing**

* The default configuration for `electricity:estimate_renewable_capacities:year` was updated from 2020 to 2023. `PR #1106 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1106>`__

* Include a dedicated cutout for North America in bundle_config.yaml `PR #1121 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1121>`__

* Include a dedicated cutout for Europe in bundle_config.yaml `PR #1125 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1125>`__

* Include a dedicated cutout for Oceania in bundle_config.yaml `PR #1157 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1157>`__

* Use BASE_DIR in rules and `_helpers.py` script for facilitate module import in subworkflow `PR #1137 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1137>`__

* Enable sector rules import in subworkflow `PR #1178 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1178>`__

**Minor Changes and bug-fixing**

* The default configuration for `electricity:estimate_renewable_capacities:year` was updated from 2020 to 2023. `PR #1106 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1106>`__

* Fix the mismatch between buses and x, y locations while creating H2 Stores `PR #1134 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1134>`__

* Enable configfile specification for mock_snakemake `PR #1135 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1135>`__

* Fix pre-commit docformatter python issue. `PR #1153 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1153>`__

* Use BASE_DIR in rules and `_helpers.py` script for facilitate module import in subworkflow `PR #1137 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1137>`__

* Include a dedicated cutout for Oceania in bundle_config.yaml `PR #1157 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1157>`__

* Drop duplicate entries in `AL_production.csv` data used in `build_industry_demand` rule `PR #1143 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1143>`__

* The computation of `hydro_profile.nc` in `build_renewable_profiles.py` is not differentiated whether alternative clustering is applied or not; the indexing of the different power plants in `add_electricity.py` is performed according to the bus either in case alternative clustering is applied or not and a `hydro_inflow_factor` is computed prior to the computation of `inflow_t` to split the inflow according to the capacity of each different unit of each power plant (if more units are present). `PR #1119 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1119>`__
Expand Down
16 changes: 10 additions & 6 deletions scripts/build_base_energy_totals.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import pandas as pd
import py7zr
import requests
from _helpers import aggregate_fuels, get_conv_factors
from _helpers import BASE_DIR, aggregate_fuels, get_conv_factors

_logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -375,7 +375,7 @@ def calc_sector(sector):
if snakemake.params.update_data:
# Delete and existing files to avoid duplication and double counting

files = glob.glob("data/demand/unsd/data/*.txt")
files = glob.glob(os.path.join(BASE_DIR, "data/demand/unsd/data/*.txt"))
for f in files:
os.remove(f)

Expand All @@ -385,12 +385,14 @@ def calc_sector(sector):

with urlopen(zipurl) as zipresp:
with ZipFile(BytesIO(zipresp.read())) as zfile:
zfile.extractall("data/demand/unsd/data")
zfile.extractall(os.path.join(BASE_DIR, "data/demand/unsd/data"))

path = "data/demand/unsd/data"
path = os.path.join(BASE_DIR, "data/demand/unsd/data")

# Get the files from the path provided in the OP
all_files = list(Path("data/demand/unsd/data").glob("*.txt"))
all_files = list(
Path(os.path.join(BASE_DIR, "data/demand/unsd/data")).glob("*.txt")
)

# Create a dataframe from all downloaded files
df = pd.concat(
Expand Down Expand Up @@ -433,7 +435,9 @@ def calc_sector(sector):
df_yr = df_yr[df_yr.country.isin(countries)]

# Create an empty dataframe for energy_totals_base
energy_totals_cols = pd.read_csv("data/energy_totals_DF_2030.csv").columns
energy_totals_cols = pd.read_csv(
os.path.join(BASE_DIR, "data/energy_totals_DF_2030.csv")
).columns
energy_totals_base = pd.DataFrame(columns=energy_totals_cols, index=countries)

# Lists that combine the different fuels in the dataset to the model's carriers
Expand Down
14 changes: 10 additions & 4 deletions scripts/build_industry_demand.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from itertools import product

import pandas as pd
from _helpers import mock_snakemake, read_csv_nafix
from _helpers import BASE_DIR, mock_snakemake, read_csv_nafix

_logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -69,8 +69,12 @@ def country_to_nodal(industrial_production, keys):
)

industry_demand = pd.read_csv(
"data/custom/industry_demand_{0}_{1}.csv".format(
snakemake.wildcards["demand"], snakemake.wildcards["planning_horizons"]
os.path.join(
BASE_DIR,
"data/custom/industry_demand_{0}_{1}.csv".format(
snakemake.wildcards["demand"],
snakemake.wildcards["planning_horizons"],
),
),
index_col=[0, 1],
)
Expand Down Expand Up @@ -204,7 +208,9 @@ def match_technology(df):
geo_locs = match_technology(geo_locs).loc[countries_geo]

aluminium_year = snakemake.params.aluminium_year
AL = read_csv_nafix("data/AL_production.csv", index_col=0)
AL = read_csv_nafix(
os.path.join(BASE_DIR, "data/AL_production.csv"), index_col=0
)
# Filter data for the given year and countries
AL_prod_tom = AL.query("Year == @aluminium_year and index in @countries_geo")[
"production[ktons/a]"
Expand Down
12 changes: 8 additions & 4 deletions scripts/copy_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,15 @@
import os
from shutil import copy

from _helpers import BASE_DIR

files_to_copy = {
"./config.yaml": "config.yaml",
"./Snakefile": "Snakefile",
"./scripts/solve_network.py": "solve_network.py",
"./scripts/prepare_sector_network.py": "prepare_sector_network.py",
os.path.join(BASE_DIR, "./config.yaml"): "config.yaml",
os.path.join(BASE_DIR, "./Snakefile"): "Snakefile",
os.path.join(BASE_DIR, "./scripts/solve_network.py"): "solve_network.py",
os.path.join(
BASE_DIR, "./scripts/prepare_sector_network.py"
): "prepare_sector_network.py",
}

if __name__ == "__main__":
Expand Down
6 changes: 4 additions & 2 deletions scripts/prepare_energy_totals.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import pandas as pd
import py7zr
import requests
from _helpers import read_csv_nafix, three_2_two_digits_country
from _helpers import BASE_DIR, read_csv_nafix, three_2_two_digits_country

_logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -53,7 +53,9 @@ def calculate_end_values(df):
investment_year = int(snakemake.wildcards.planning_horizons)
demand_sc = snakemake.wildcards.demand # loading the demand scenrario wildcard

base_energy_totals = read_csv_nafix("data/energy_totals_base.csv", index_col=0)
base_energy_totals = read_csv_nafix(
os.path.join(BASE_DIR, "data/energy_totals_base.csv"), index_col=0
)
growth_factors_cagr = read_csv_nafix(
snakemake.input.growth_factors_cagr, index_col=0
)
Expand Down
16 changes: 12 additions & 4 deletions scripts/prepare_gas_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,12 @@
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import pandas as pd
from _helpers import content_retrieve, progress_retrieve, two_2_three_digits_country
from _helpers import (
BASE_DIR,
content_retrieve,
progress_retrieve,
two_2_three_digits_country,
)
from build_shapes import gadm
from matplotlib.lines import Line2D
from pyproj import CRS
Expand Down Expand Up @@ -58,8 +63,8 @@ def download_IGGIELGN_gas_network():
url = "https://zenodo.org/record/4767098/files/IGGIELGN.zip"

# Save locations
zip_fn = Path("IGGIELGN.zip")
to_fn = Path("data/gas_network/scigrid-gas")
zip_fn = Path(os.path.join(BASE_DIR, "IGGIELGN.zip"))
to_fn = Path(os.path.join(BASE_DIR, "data/gas_network/scigrid-gas"))

logger.info(f"Downloading databundle from '{url}'.")
progress_retrieve(url, zip_fn)
Expand Down Expand Up @@ -344,6 +349,7 @@ def download_GADM(country_code, update=False, out_logging=False):
GADM_filename = get_GADM_filename(country_code)

GADM_inputfile_gpkg = os.path.join(
BASE_DIR,
"data",
"gadm",
GADM_filename,
Expand Down Expand Up @@ -887,7 +893,9 @@ def check_existence(row):
elif snakemake.params.gas_config["network_data"] == "IGGIELGN":
download_IGGIELGN_gas_network()

gas_network = "data/gas_network/scigrid-gas/data/IGGIELGN_PipeSegments.geojson"
gas_network = os.path.join(
BASE_DIR, "data/gas_network/scigrid-gas/data/IGGIELGN_PipeSegments.geojson"
)

pipelines = load_IGGIELGN_data(gas_network)
pipelines = prepare_IGGIELGN_data(pipelines)
Expand Down
17 changes: 12 additions & 5 deletions scripts/prepare_sector_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import ruamel.yaml
import xarray as xr
from _helpers import (
BASE_DIR,
create_dummy_data,
create_network_topology,
cycling_shift,
Expand Down Expand Up @@ -322,8 +323,11 @@ def add_hydrogen(n, costs):
if snakemake.config["sector"]["hydrogen"]["underground_storage"]:
if snakemake.config["custom_data"]["h2_underground"]:
custom_cavern = pd.read_csv(
"data/custom/h2_underground_{0}_{1}.csv".format(
demand_sc, investment_year
os.path.join(
BASE_DIR,
"data/custom/h2_underground_{0}_{1}.csv".format(
demand_sc, investment_year
),
)
)
# countries = n.buses.country.unique().to_list()
Expand Down Expand Up @@ -2661,9 +2665,12 @@ def add_residential(n, costs):
def add_custom_water_cost(n):
for country in countries:
water_costs = pd.read_csv(
"resources/custom_data/{}_water_costs.csv".format(country),
sep=",",
index_col=0,
os.path.join(
BASE_DIR,
"resources/custom_data/{}_water_costs.csv".format(country),
sep=",",
index_col=0,
)
)
water_costs = water_costs.filter(like=country, axis=0).loc[spatial.nodes]
electrolysis_links = n.links.filter(like=country, axis=0).filter(
Expand Down
6 changes: 2 additions & 4 deletions scripts/prepare_transport_data_input.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,7 @@
import country_converter as coco
import numpy as np
import pandas as pd

# from _helpers import configure_logging

from _helpers import BASE_DIR

# logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -127,7 +125,7 @@ def download_CO2_emissions():

if vehicles_csv.empty or CO2_emissions_csv.empty:
# In case one of the urls is not working, we can use the hard-coded data
src = os.getcwd() + "/data/temp_hard_coded/transport_data.csv"
src = BASE_DIR + "/data/temp_hard_coded/transport_data.csv"
dest = snakemake.output.transport_data_input
shutil.copy(src, dest)
else:
Expand Down

0 comments on commit d9a364c

Please sign in to comment.