Skip to content

Commit

Permalink
Merge branch '351-introduce-linting' into 'develop'
Browse files Browse the repository at this point in the history
Introduce mandatory linting for FINE

See merge request iek-3/shared-code/fine!356
  • Loading branch information
JohannesBehrens committed Aug 6, 2024
2 parents 2f00422 + 37ff838 commit 307540b
Show file tree
Hide file tree
Showing 15 changed files with 237 additions and 226 deletions.
38 changes: 37 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@ If you want to use ETHOS.FINE in a published work, please [**kindly cite followi
There are several options for the installation of ETHOS.FINE. You can install it via PyPI or from conda-forge.
For detailed information, have a look at the [installation documentation](https://vsa-fine.readthedocs.io/en/latest/installationDoc.html).

If you would like to use ETHOS.FINE for your analysis, we recommend to install it directly from conda-forge into a new Python environment with
NOTE: If you want to work on the source code of FINE, see [Editable install from conda-forge](#editable-install-from-conda-forge).

If you would like to run ETHOS.FINE for your analysis we recommend to install it directly from conda-forge into a new Python environment with

```bash
mamba create --name fine --channel conda-forge fine
Expand Down Expand Up @@ -63,6 +65,40 @@ A number of [examples](https://github.com/FZJ-IEK3-VSA/FINE/tree/develop/example
- [11_Partload](https://github.com/FZJ-IEK3-VSA/FINE/tree/develop/examples/11_Partload)
- In this application, a hydrogen system is modeled and optimized considering partload behavior of the electrolyzer.

## Notes for developers

### Editable install from conda-forge

It is recommended to create a clean environment with conda to use ETHOS.FINE because it requires many dependencies.

```bash
mamba env create --name fine --file requirements_dev.yml
mamba activate fine
```

Install ETHOS.FINE as editable install and without checking the dependencies from pypi with

```bash
python -m pip install --no-deps --editable .
```

### Editable install from pypi

If you do not want to use conda-forge consider the steps in section [Installation from pipy](#Installation-from-pipy) and install ETHOS.FINE as editable install and with developer dependencies with

```bash
python -m pip install --editable .[develop]
```

### Good coding style

We use [ruff](https://docs.astral.sh/ruff) to ensure good coding style. Make
sure to use it before contributing to the code base with

```bash
ruff check fine
```

## License

MIT License
Expand Down
15 changes: 7 additions & 8 deletions fine/IOManagement/standardIO.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import fine as fn
import fine.utils as utils
from fine import utils
import pandas as pd
import ast
import inspect
Expand Down Expand Up @@ -1241,13 +1241,12 @@ def plotLocationalColorMap(
unit = " [" + unit + "/" + area_unit + "]"
zlabel = "Installed capacity \n" + unit + "\n"

else:
if zlabel is None:
if isinstance(esM.getComponent(compName), fn.Conversion):
unit = esM.getComponent(compName).physicalUnit
else:
unit = esM.commodityUnitsDict[esM.getComponent(compName).commodity]
zlabel = f"Installed capacity \n [ {unit} ] \n"
elif zlabel is None:
if isinstance(esM.getComponent(compName), fn.Conversion):
unit = esM.getComponent(compName).physicalUnit
else:
unit = esM.commodityUnitsDict[esM.getComponent(compName).commodity]
zlabel = f"Installed capacity \n [ {unit} ] \n"

vmax = gdf["data"].max() if vmax == -1 else vmax

Expand Down
40 changes: 18 additions & 22 deletions fine/IOManagement/utilsIO.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,9 +170,9 @@ def generateIterationDicts(component_dict, investmentPeriods):
# Loop through every class-component-variable combination
for classname in component_dict:
for component in component_dict[classname]:
for variable_description, data in component_dict[classname][
for variable_description in component_dict[classname][
component
].items():
].keys():
# 1. iterate through nested dict levels until constant, series or df, add
# 1. find list of keys in nested dict level
key_lists = getListsOfKeyPathsInNestedDict(
Expand Down Expand Up @@ -210,15 +210,14 @@ def generateIterationDicts(component_dict, investmentPeriods):
description_tuple
)
# 3 add constant
elif _variable_description not in constants_iteration_dict.keys():
constants_iteration_dict[_variable_description] = [
description_tuple
]
else:
if _variable_description not in constants_iteration_dict.keys():
constants_iteration_dict[_variable_description] = [
description_tuple
]
else:
constants_iteration_dict[_variable_description].append(
description_tuple
)
constants_iteration_dict[_variable_description].append(
description_tuple
)

return df_iteration_dict, series_iteration_dict, constants_iteration_dict

Expand Down Expand Up @@ -455,19 +454,16 @@ def addSeriesVariablesToXarray(xr_ds, component_dict, series_iteration_dict, loc

space_space_dict[df_description] = multi_index_dataframe

elif set(data.index.values).issubset(set(locations)):
space_dict[df_description] = data.rename_axis("space")
else:
# If the data indices correspond to esM locations, then the
# data is appended to space_dict, else time_dict
if set(data.index.values).issubset(set(locations)):
space_dict[df_description] = data.rename_axis("space")
else:
time_dict[df_description] = data.rename_axis("time")
time_dict[df_description] = pd.concat(
{locations[0]: time_dict[df_description]}, names=["space"]
)
time_dict[df_description] = time_dict[
df_description
].reorder_levels(["time", "space"])
time_dict[df_description] = data.rename_axis("time")
time_dict[df_description] = pd.concat(
{locations[0]: time_dict[df_description]}, names=["space"]
)
time_dict[df_description] = time_dict[
df_description
].reorder_levels(["time", "space"])

# If the dicts are populated with at least one item,
# process them further and merge with xr_ds
Expand Down
2 changes: 1 addition & 1 deletion fine/IOManagement/xarrayIO.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import xarray as xr
from netCDF4 import Dataset

import fine.utils as utils
from fine import utils
from fine.IOManagement import dictIO, utilsIO


Expand Down
7 changes: 4 additions & 3 deletions fine/aggregations/spatialAggregation/aggregation.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""
Functions to aggregate region data for a reduced set
of regions obtained as a result of spatial grouping of regions.
Functions to aggregate region data for a reduced set
of regions obtained as a result of spatial grouping of regions.
"""

import logging
Expand Down Expand Up @@ -484,7 +484,8 @@ def _get_aggregation_mode(varname, comp=None, comp_ds=None):
for comp, comp_ds in comp_dict.items():
aggregated_comp_ds = xr.Dataset()

for varname, da in comp_ds.data_vars.items():
for varname, da_iter in comp_ds.data_vars.items():
da = da_iter
# Check and set aggregation mode and weights
aggregation_mode, aggregation_weight = _get_aggregation_mode(
varname, comp, comp_ds
Expand Down
98 changes: 48 additions & 50 deletions fine/component.py
Original file line number Diff line number Diff line change
Expand Up @@ -1654,38 +1654,37 @@ def opBounds(pyM, loc, compName, ip, p, t):
return (rate[loc][p, t], rate[loc][p, t])
else:
return (0, None)
elif getattr(compDict[compName], opRateMaxName) is not None:
rate = getattr(compDict[compName], opRateMaxName)[ip]
if rate is not None:
if relevanceThreshold is not None:
validThreshold = 0 < relevanceThreshold
if validThreshold and (
rate[loc][p, t] < relevanceThreshold
):
return (0, 0)
return (
0,
rate[loc][p, t]
* esM.timeStepsPerSegment[ip].to_dict()[p, t],
)
elif getattr(compDict[compName], opRateFixName) is not None:
rate = getattr(compDict[compName], opRateFixName)[ip]
if rate is not None:
if relevanceThreshold is not None:
validThreshold = 0 < relevanceThreshold
if validThreshold and (
rate[loc][p, t] < relevanceThreshold
):
return (0, 0)
return (
rate[loc][p, t]
* esM.timeStepsPerSegment[ip].to_dict()[p, t],
rate[loc][p, t]
* esM.timeStepsPerSegment[ip].to_dict()[p, t],
)
else:
if getattr(compDict[compName], opRateMaxName) is not None:
rate = getattr(compDict[compName], opRateMaxName)[ip]
if rate is not None:
if relevanceThreshold is not None:
validThreshold = 0 < relevanceThreshold
if validThreshold and (
rate[loc][p, t] < relevanceThreshold
):
return (0, 0)
return (
0,
rate[loc][p, t]
* esM.timeStepsPerSegment[ip].to_dict()[p, t],
)
elif getattr(compDict[compName], opRateFixName) is not None:
rate = getattr(compDict[compName], opRateFixName)[ip]
if rate is not None:
if relevanceThreshold is not None:
validThreshold = 0 < relevanceThreshold
if validThreshold and (
rate[loc][p, t] < relevanceThreshold
):
return (0, 0)
return (
rate[loc][p, t]
* esM.timeStepsPerSegment[ip].to_dict()[p, t],
rate[loc][p, t]
* esM.timeStepsPerSegment[ip].to_dict()[p, t],
)
else:
return (0, None)
return (0, None)
else:
return (0, None)

Expand Down Expand Up @@ -3745,27 +3744,26 @@ def getLocEconomicsOperation(
)
/ esM.numberOfYears
)
else:
if not getOptValue:
return (
sum(
factor[p, t]
* var[loc, compName, ip, p, t]
* esM.periodOccurrences[ip][p]
for p, t in timeSet_pt
)
/ esM.numberOfYears
elif not getOptValue:
return (
sum(
factor[p, t]
* var[loc, compName, ip, p, t]
* esM.periodOccurrences[ip][p]
for p, t in timeSet_pt
)
else:
return (
sum(
factor[p, t]
* var[loc, compName, ip, p, t].value
* esM.periodOccurrences[ip][p]
for p, t in timeSet_pt
)
/ esM.numberOfYears
/ esM.numberOfYears
)
else:
return (
sum(
factor[p, t]
* var[loc, compName, ip, p, t].value
* esM.periodOccurrences[ip][p]
for p, t in timeSet_pt
)
/ esM.numberOfYears
)

def setOptimalValues(self, esM, pyM, indexColumns, plantUnit, unitApp=""):
"""
Expand Down
Loading

0 comments on commit 307540b

Please sign in to comment.