diff --git a/io.openems.edge.application/EdgeApp.bndrun b/io.openems.edge.application/EdgeApp.bndrun
index 4dfeab59531..8dddf82bfa1 100644
--- a/io.openems.edge.application/EdgeApp.bndrun
+++ b/io.openems.edge.application/EdgeApp.bndrun
@@ -167,6 +167,7 @@
bnd.identity;id='io.openems.edge.meter.weidmueller',\
bnd.identity;id='io.openems.edge.meter.ziehl',\
bnd.identity;id='io.openems.edge.onewire.thermometer',\
+ bnd.identity;id='io.openems.edge.predictor.lstmmodel',\
bnd.identity;id='io.openems.edge.predictor.persistencemodel',\
bnd.identity;id='io.openems.edge.predictor.similardaymodel',\
bnd.identity;id='io.openems.edge.pvinverter.cluster',\
@@ -345,6 +346,7 @@
io.openems.edge.meter.ziehl;version=snapshot,\
io.openems.edge.onewire.thermometer;version=snapshot,\
io.openems.edge.predictor.api;version=snapshot,\
+ io.openems.edge.predictor.lstmmodel;version=snapshot,\
io.openems.edge.predictor.persistencemodel;version=snapshot,\
io.openems.edge.predictor.similardaymodel;version=snapshot,\
io.openems.edge.pvinverter.api;version=snapshot,\
diff --git a/io.openems.edge.predictor.lstmmodel/.classpath b/io.openems.edge.predictor.lstmmodel/.classpath
new file mode 100644
index 00000000000..bbfbdbe40e7
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/.classpath
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/io.openems.edge.predictor.lstmmodel/.gitignore b/io.openems.edge.predictor.lstmmodel/.gitignore
new file mode 100644
index 00000000000..c2b941a96de
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/.gitignore
@@ -0,0 +1,2 @@
+/bin_test/
+/generated/
diff --git a/io.openems.edge.predictor.lstmmodel/.project b/io.openems.edge.predictor.lstmmodel/.project
new file mode 100644
index 00000000000..8fe907a680b
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/.project
@@ -0,0 +1,23 @@
+
+
+ io.openems.edge.predictor.lstmmodel
+
+
+
+
+
+ org.eclipse.jdt.core.javabuilder
+
+
+
+
+ bndtools.core.bndbuilder
+
+
+
+
+
+ org.eclipse.jdt.core.javanature
+ bndtools.core.bndnature
+
+
diff --git a/io.openems.edge.predictor.lstmmodel/.settings/org.eclipse.core.resources.prefs b/io.openems.edge.predictor.lstmmodel/.settings/org.eclipse.core.resources.prefs
new file mode 100644
index 00000000000..99f26c0203a
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/.settings/org.eclipse.core.resources.prefs
@@ -0,0 +1,2 @@
+eclipse.preferences.version=1
+encoding/=UTF-8
diff --git a/io.openems.edge.predictor.lstmmodel/bnd.bnd b/io.openems.edge.predictor.lstmmodel/bnd.bnd
new file mode 100644
index 00000000000..055ee9ad704
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/bnd.bnd
@@ -0,0 +1,16 @@
+Bundle-Name: OpenEMS Edge Predictor Lstm-Model
+Bundle-Vendor: OpenEMS Association e.V.
+Bundle-License: https://opensource.org/licenses/EPL-2.0
+Bundle-Version: 1.0.0.${tstamp}
+
+-buildpath: \
+ ${buildpath},\
+ io.openems.common,\
+ io.openems.edge.common,\
+ io.openems.edge.controller.api,\
+ io.openems.edge.predictor.api,\
+ io.openems.edge.timedata.api,\
+ org.apache.commons.math3,\
+
+-testpath: \
+ ${testpath}
\ No newline at end of file
diff --git a/io.openems.edge.predictor.lstmmodel/readme.adoc b/io.openems.edge.predictor.lstmmodel/readme.adoc
new file mode 100644
index 00000000000..0791f8580b3
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/readme.adoc
@@ -0,0 +1,28 @@
+= Long short term model predictor
+
+The Long Short-Term Memory (LSTM) model is a type of recurrent neural network (RNN) that is particularly well-suited for time series prediction tasks, including consumption and production power predictions, due to its ability to capture dependencies and patterns over time. https://en.wikipedia.org/wiki/Long_short-term_memory[More details of LSTM]
+
+This application is used for predicting power (consumption and production) values.
+Here, For power prediction, LSTM models can analyze historical power data to learn patterns and trends that occur over time, such as:
+
+* Daily and Seasonal Variations: example, Consumption power often follows cyclic patterns (e.g., higher usage during the day, lower at night). Production power often higher during the day and none during the nights.
+* External Factors: LSTM can incorporate external factors like weather, day of the week, or holidays to improve prediction accuracy.
+
+== Training LSTM for Power Predictions:
+
+* Input Data (Channels address "_sum/ConsumptionActivePower"): Time series data of past consumption levels.
+* Pre-processing: Data needs to be scaled and sometimes transformed to remove seasonality or noise.
+* Training: The LSTM is trained on historical data using techniques like backpropagation through time (BPTT), where it learns to minimize the error between predicted and actual consumption.
+* Prediction: Once trained, the model can predict future power consumption for various time steps ahead (e.g., hours, days, or even weeks).
+
+In practice, LSTMs are favored for their ability to learn complex time-related patterns, making them effective in forecasting energy demand patterns that can inform Energy management system (EMS), energy distribution, and cost optimization strategies.
+
+== Note for activating the predictor
+
+To run this predictor, please create a folder named "models" in the OpenEMS data directory (openems/data/).
+
+Initially, a generic model will be used for predictions, which may not yield optimal results. However, a training process is scheduled to occur every 45 days, during which the models in this directory will be updated. The 45-day interval consists of 30 days for training and 15 days for validation.
+
+As a result of this process, a new model will be trained and will automatically replace the previous one.
+
+https://github.com/OpenEMS/openems/tree/develop/io.openems.edge.predictor.lstmmodel[Source Code icon:github[]]
\ No newline at end of file
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/Config.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/Config.java
new file mode 100644
index 00000000000..400d302a3fc
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/Config.java
@@ -0,0 +1,31 @@
+package io.openems.edge.predictor.lstmmodel;
+
+import org.osgi.service.metatype.annotations.AttributeDefinition;
+import org.osgi.service.metatype.annotations.ObjectClassDefinition;
+
+import io.openems.edge.predictor.api.prediction.LogVerbosity;
+
+@ObjectClassDefinition(//
+ name = "Predictor Lstm-Model", //
+ description = "Implements Long Short-Term Memory (LSTM) model, which is a type of recurrent neural network (RNN) designed to capture long-range dependencies in sequential data, such as time series. "
+ + "This makes LSTMs particularly effective for time series prediction, "
+ + "as they can learn patterns and trends over time, handling long-term dependencies while filtering out irrelevant information.")
+@interface Config {
+
+ @AttributeDefinition(name = "Component-ID", description = "Unique ID of this Component")
+ String id() default "predictor0";
+
+ @AttributeDefinition(name = "Alias", description = "Human-readable name of this Component; defaults to Component-ID")
+ String alias() default "";
+
+ @AttributeDefinition(name = "Is enabled?", description = "Is this Component enabled?")
+ boolean enabled() default true;
+
+ @AttributeDefinition(name = "Channel-Address", description = "Channel-Address this Predictor is used for, e.g. '_sum/UnmanagedConsumptionActivePower'")
+ String channelAddress();
+
+ @AttributeDefinition(name = "Log-Verbosity", description = "The log verbosity.")
+ LogVerbosity logVerbosity() default LogVerbosity.NONE;
+
+ String webconsole_configurationFactory_nameHint() default "Predictor Lstm-Model [{id}]";
+}
\ No newline at end of file
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/LstmModel.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/LstmModel.java
new file mode 100644
index 00000000000..534cd1661bc
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/LstmModel.java
@@ -0,0 +1,129 @@
+package io.openems.edge.predictor.lstmmodel;
+
+import io.openems.common.types.OpenemsType;
+import io.openems.edge.common.channel.BooleanReadChannel;
+import io.openems.edge.common.channel.Doc;
+import io.openems.edge.common.channel.DoubleReadChannel;
+import io.openems.edge.common.channel.StringReadChannel;
+import io.openems.edge.common.channel.value.Value;
+import io.openems.edge.common.component.OpenemsComponent;
+
+public interface LstmModel extends OpenemsComponent {
+
+ public enum ChannelId implements io.openems.edge.common.channel.ChannelId {
+ LAST_TRAINED_TIME(Doc.of(OpenemsType.STRING) //
+ .text("Last trained time in Unixstimestamp")), //
+ MODEL_ERROR(Doc.of(OpenemsType.DOUBLE) //
+ .text("Error in the Model")), //
+ CANNOT_TRAIN_CONDITON(Doc.of(OpenemsType.BOOLEAN) //
+ .text("When the data set is empty, entirely null, or contains 50% null values."));
+
+ private final Doc doc;
+
+ private ChannelId(Doc doc) {
+ this.doc = doc;
+ }
+
+ @Override
+ public Doc doc() {
+ return this.doc;
+ }
+ }
+
+ /**
+ * Gets the Channel for {@link ChannelId#CANNOT_TRAIN_CONDITON}.
+ *
+ * @return the Channel
+ */
+ public default BooleanReadChannel getCannotTrainConditionChannel() {
+ return this.channel(ChannelId.CANNOT_TRAIN_CONDITON);
+ }
+
+ /**
+ * Gets the Cannot train condition in boolean. See
+ * {@link ChannelId#CANNOT_TRAIN_CONDITON}.
+ *
+ * @return the Channel {@link Value}
+ */
+ public default Value getCannotTrainCondition() {
+ return this.getCannotTrainConditionChannel().value();
+ }
+
+ /**
+ * Internal method to set the 'nextValue' on
+ * {@link ChannelId#CANNOT_TRAIN_CONDITON} Channel.
+ *
+ * @param value the next value
+ */
+ public default void _setCannotTrainCondition(boolean value) {
+ this.getCannotTrainConditionChannel().setNextValue(value);
+ }
+
+ /**
+ * Internal method to set the 'nextValue' on
+ * {@link ChannelId#CANNOT_TRAIN_CONDITON} Channel.
+ *
+ * @param value the next value
+ */
+ public default void _setCannotTrainCondition(Boolean value) {
+ this.getCannotTrainConditionChannel().setNextValue(value);
+ }
+
+ /**
+ * Gets the Channel for {@link ChannelId#LAST_TRAINED_TIME}.
+ *
+ * @return the Channel
+ */
+ public default StringReadChannel getLastTrainedTimeChannel() {
+ return this.channel(ChannelId.LAST_TRAINED_TIME);
+ }
+
+ /**
+ * Gets the Last time trained time in Unix time stamp. See
+ * {@link ChannelId#LAST_TRAINED_TIME}.
+ *
+ * @return the Channel {@link Value}
+ */
+ public default Value getLastTrainedTime() {
+ return this.getLastTrainedTimeChannel().value();
+ }
+
+ /**
+ * Internal method to set the 'nextValue' on {@link ChannelId#LAST_TRAINED_TIME}
+ * Channel.
+ *
+ * @param value the next value
+ */
+ public default void _setLastTrainedTime(String value) {
+ this.getLastTrainedTimeChannel().setNextValue(value);
+ }
+
+ /**
+ * Gets the Channel for {@link ChannelId#MODEL_ERROR}.
+ *
+ * @return the Channel
+ */
+ public default DoubleReadChannel getModelErrorChannel() {
+ return this.channel(ChannelId.MODEL_ERROR);
+ }
+
+ /**
+ * Gets the Model error. See {@link ChannelId#MODEL_ERROR}.
+ *
+ * @return the Channel {@link Value}
+ */
+ public default Value getModelError() {
+ return this.getModelErrorChannel().value();
+ }
+
+ /**
+ * Internal method to set the 'nextValue' on {@link ChannelId#LAST_TRAINED_TIME}
+ * Channel.
+ *
+ * @param value the next value
+ */
+ public default void _setModelError(Double value) {
+ this.getModelErrorChannel().setNextValue(value);
+ }
+
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/LstmModelImpl.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/LstmModelImpl.java
new file mode 100644
index 00000000000..d32764bf3a9
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/LstmModelImpl.java
@@ -0,0 +1,301 @@
+package io.openems.edge.predictor.lstmmodel;
+
+import static io.openems.common.utils.ThreadPoolUtils.shutdownAndAwaitTermination;
+import static io.openems.edge.predictor.lstmmodel.utilities.DataUtility.combine;
+import static io.openems.edge.predictor.lstmmodel.utilities.DataUtility.concatenateList;
+import static io.openems.edge.predictor.lstmmodel.utilities.DataUtility.getData;
+import static io.openems.edge.predictor.lstmmodel.utilities.DataUtility.getDate;
+import static io.openems.edge.predictor.lstmmodel.utilities.DataUtility.getMinute;
+
+import java.time.ZonedDateTime;
+import java.time.temporal.ChronoUnit;
+import java.util.ArrayList;
+import java.util.SortedMap;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.IntStream;
+
+import org.osgi.service.component.ComponentContext;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.ConfigurationPolicy;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+//import org.slf4j.Logger;
+//import org.slf4j.LoggerFactory;
+import org.osgi.service.metatype.annotations.Designate;
+
+import com.google.common.collect.Sets;
+import com.google.gson.JsonElement;
+
+import io.openems.common.exceptions.OpenemsError.OpenemsNamedException;
+import io.openems.common.session.Role;
+import io.openems.common.timedata.Resolution;
+import io.openems.common.types.ChannelAddress;
+import io.openems.edge.common.component.ClockProvider;
+import io.openems.edge.common.component.ComponentManager;
+import io.openems.edge.common.component.OpenemsComponent;
+import io.openems.edge.common.jsonapi.ComponentJsonApi;
+import io.openems.edge.common.jsonapi.EdgeGuards;
+import io.openems.edge.common.jsonapi.JsonApiBuilder;
+import io.openems.edge.common.sum.Sum;
+import io.openems.edge.controller.api.Controller;
+import io.openems.edge.predictor.api.manager.PredictorManager;
+import io.openems.edge.predictor.api.prediction.AbstractPredictor;
+import io.openems.edge.predictor.api.prediction.Prediction;
+import io.openems.edge.predictor.api.prediction.Predictor;
+import io.openems.edge.predictor.lstmmodel.common.HyperParameters;
+import io.openems.edge.predictor.lstmmodel.common.ReadAndSaveModels;
+import io.openems.edge.predictor.lstmmodel.jsonrpc.GetPredictionRequest;
+import io.openems.edge.predictor.lstmmodel.jsonrpc.PredictionRequestHandler;
+import io.openems.edge.predictor.lstmmodel.preprocessing.DataModification;
+import io.openems.edge.predictor.lstmmodel.train.LstmTrain;
+import io.openems.edge.timedata.api.Timedata;
+
+@Designate(ocd = Config.class, factory = true)
+@Component(//
+ name = "Predictor.LstmModel", //
+ immediate = true, //
+ configurationPolicy = ConfigurationPolicy.REQUIRE //
+)
+public class LstmModelImpl extends AbstractPredictor
+ implements Predictor, OpenemsComponent, ComponentJsonApi, LstmModel {
+
+ // private final Logger log = LoggerFactory.getLogger(LstmModelImpl.class);
+
+ /** 45 days. */
+ private static final long DAYS_45 = 45;
+
+ /** 45 days in minutes. */
+ private static final long PERIOD = DAYS_45 * 24 * 60;
+
+ @Reference
+ private Sum sum;
+
+ @Reference
+ private Timedata timedata;
+
+ @Reference
+ private ComponentManager componentManager;
+
+ @Reference
+ private PredictorManager predictorManager;
+
+ @Override
+ protected ClockProvider getClockProvider() {
+ return this.componentManager;
+ }
+
+ public LstmModelImpl() throws OpenemsNamedException {
+ super(//
+ OpenemsComponent.ChannelId.values(), //
+ Controller.ChannelId.values(), //
+ LstmModel.ChannelId.values()//
+ );
+ }
+
+ private ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
+ private ChannelAddress channelForPrediction;
+
+ @Activate
+ private void activate(ComponentContext context, Config config) throws OpenemsNamedException {
+ super.activate(context, config.id(), config.alias(), config.enabled(), //
+ new String[] { config.channelAddress() }, config.logVerbosity());
+
+ var channelAddress = ChannelAddress.fromString(config.channelAddress());
+ this.channelForPrediction = channelAddress;
+
+ /*
+ * Avoid training for the new FEMs due to lack of data. Set a fixed 45-day
+ * period: 30 days for training and 15 days for validation.
+ */
+ this.scheduler.scheduleAtFixedRate(//
+ new LstmTrain(this.timedata, channelAddress, this, DAYS_45), //
+ 0, //
+ PERIOD, //
+ TimeUnit.MINUTES//
+ );
+ }
+
+ @Override
+ @Deactivate
+ protected void deactivate() {
+ shutdownAndAwaitTermination(this.scheduler, 0);
+ super.deactivate();
+ }
+
+ @Override
+ protected Prediction createNewPrediction(ChannelAddress channelAddress) {
+
+ var hyperParameters = ReadAndSaveModels.read(channelAddress.getChannelId());
+ var nowDate = ZonedDateTime.now();
+
+ var seasonalityFuture = CompletableFuture
+ .supplyAsync(() -> this.predictSeasonality(channelAddress, nowDate, hyperParameters));
+
+ var trendFuture = CompletableFuture
+ .supplyAsync(() -> this.predictTrend(channelAddress, nowDate, hyperParameters));
+
+ var dayPlus1SeasonalityFuture = CompletableFuture
+ .supplyAsync(() -> this.predictSeasonality(channelAddress, nowDate.plusDays(1), hyperParameters));
+
+ var combinePrerequisites = CompletableFuture.allOf(seasonalityFuture, trendFuture);
+
+ try {
+ combinePrerequisites.get();
+
+ // Current day prediction
+ var currentDayPredicted = combine(trendFuture.get(), seasonalityFuture.get());
+
+ // Next Day prediction
+ var plus1DaySeasonalityPrediction = dayPlus1SeasonalityFuture.get();
+
+ // Concat current and Nextday
+ var actualPredicted = concatenateList(currentDayPredicted, plus1DaySeasonalityPrediction);
+
+ var baseTimeOfPrediction = nowDate.withMinute(getMinute(nowDate, hyperParameters)).withSecond(0)
+ .withNano(0);
+
+ return Prediction.from(//
+ Prediction.getValueRange(this.sum, channelAddress), //
+ baseTimeOfPrediction, //
+ averageInChunks(actualPredicted));
+ } catch (Exception e) {
+ throw new RuntimeException("Error in getting prediction execution", e);
+ }
+ }
+
+ /**
+ * Averages the elements of an integer array in chunks of a specified size.
+ *
+ *
+ * This method takes an input array of integers and divides it into chunks of a
+ * fixed size. For each chunk, it calculates the average of the integers and
+ * stores the result in a new array. The size of the result array is determined
+ * by the total number of elements in the input array divided by the chunk size.
+ *
+ *
+ * @param inputList an arrayList of Doubles to be processed. The array length
+ * must be a multiple of the chunk size for correct processing.
+ * @return an array of integers containing the averages of each chunk.
+ *
+ */
+ private static Integer[] averageInChunks(ArrayList inputList) {
+ final int chunkSize = 3;
+ int resultSize = inputList.size() / chunkSize;
+ Integer[] result = new Integer[resultSize];
+
+ for (int i = 0; i < inputList.size(); i += chunkSize) {
+ double sum = IntStream.range(i, Math.min(i + chunkSize, inputList.size()))
+ .mapToDouble(j -> inputList.get(j))//
+ .sum();
+ result[i / chunkSize] = (int) (sum / chunkSize);
+ }
+ return result;
+ }
+
+ /**
+ * Queries historic data for a specified time range and channel address with
+ * given {@link ChannelAddress}.
+ *
+ * @param from the start of the time range
+ * @param until the end of the time range
+ * @param channelAddress the {@link ChannelAddress} for the query
+ * @param hyperParameters the {@link HyperParameters} that include the interval
+ * for data resolution
+ * @return a SortedMap where the key is a ZonedDateTime representing the
+ * timestamp of the data point, and the value is another SortedMap where
+ * the key is the ChannelAddress and the value is the data point as a
+ * JsonElement. and null if error
+ */
+ private SortedMap> queryHistoricData(ZonedDateTime from,
+ ZonedDateTime until, ChannelAddress channelAddress, HyperParameters hyperParameters) {
+ try {
+ return this.timedata.queryHistoricData(null, from, until, Sets.newHashSet(channelAddress),
+ new Resolution(hyperParameters.getInterval(), ChronoUnit.MINUTES));
+ } catch (OpenemsNamedException e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+
+ /**
+ * Predicts trend values for a specified channel at the current date using LSTM
+ * models.
+ *
+ * @param channelAddress The {@link ChannelAddress} for which trend values are
+ * predicted.
+ * @param nowDate The current date and time for which trend values are
+ * predicted.
+ * @param hyperParameters The {@link HyperParameters} for the prediction model.
+ * @return A list of predicted trend values for the specified channel at the
+ * current date.
+ * @throws SomeException If there's any specific exception that might be thrown
+ * during the process.
+ */
+ public ArrayList predictTrend(ChannelAddress channelAddress, ZonedDateTime nowDate,
+ HyperParameters hyperParameters) {
+
+ var till = nowDate.withMinute(getMinute(nowDate, hyperParameters)).withSecond(0).withNano(0);
+ var from = till.minusMinutes(hyperParameters.getInterval() * hyperParameters.getWindowSizeTrend());
+
+ var trendQueryResult = this.queryHistoricData(//
+ from, //
+ till, //
+ channelAddress, //
+ hyperParameters);
+
+ return LstmPredictor.predictTrend(//
+ getData(trendQueryResult), //
+ getDate(trendQueryResult), //
+ till, //
+ hyperParameters);
+ }
+
+ /**
+ * Predicts Seasonality values for a specified channel at the current date using
+ * LSTM models.
+ *
+ * @param channelAddress The address of the channel for which seasonality
+ * values are predicted.
+ * @param nowDate The current date and time for which seasonality values
+ * are predicted.
+ * @param hyperParameters The {@link ChannelAddress} for the prediction model.
+ * @return A list of predicted seasonality values for the specified channel at
+ * the current date.
+ * @throws SomeException If there's any specific exception that might be thrown
+ * during the process.
+ */
+ public ArrayList predictSeasonality(ChannelAddress channelAddress, ZonedDateTime nowDate,
+ HyperParameters hyperParameters) {
+
+ var till = nowDate.withMinute(getMinute(nowDate, hyperParameters)).withSecond(0).withNano(0);
+ var temp = till.minusDays(hyperParameters.getWindowSizeSeasonality() - 1);
+
+ var from = temp//
+ .withMinute(getMinute(nowDate, hyperParameters))//
+ .withSecond(0)//
+ .withNano(0);
+
+ var targetFrom = till.plusMinutes(hyperParameters.getInterval());
+ var queryResult = this.queryHistoricData(from, till, channelAddress, hyperParameters);
+
+ return LstmPredictor.getArranged(
+ LstmPredictor.getIndex(targetFrom.getHour(), targetFrom.getMinute(), hyperParameters), //
+ LstmPredictor.predictSeasonality(DataModification.removeNegatives(getData(queryResult)),
+ getDate(queryResult), //
+ hyperParameters));
+ }
+
+ @Override
+ public void buildJsonApiRoutes(JsonApiBuilder builder) {
+ builder.handleRequest(GetPredictionRequest.METHOD, endpoint -> {
+ endpoint.setGuards(EdgeGuards.roleIsAtleast(Role.OWNER));
+ }, call -> {
+ return PredictionRequestHandler.handlerGetPredictionRequest(call.getRequest().id, this.predictorManager,
+ this.channelForPrediction);
+ });
+ }
+}
\ No newline at end of file
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/LstmPredictor.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/LstmPredictor.java
new file mode 100644
index 00000000000..0081e2efddc
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/LstmPredictor.java
@@ -0,0 +1,416 @@
+package io.openems.edge.predictor.lstmmodel;
+
+import static io.openems.edge.predictor.lstmmodel.utilities.UtilityConversion.to1DArray;
+import static io.openems.edge.predictor.lstmmodel.utilities.UtilityConversion.to1DArrayList;
+import static io.openems.edge.predictor.lstmmodel.utilities.UtilityConversion.to2DArrayList;
+import static io.openems.edge.predictor.lstmmodel.utilities.UtilityConversion.to2DList;
+
+import java.time.OffsetDateTime;
+import java.time.ZonedDateTime;
+import java.util.ArrayList;
+import java.util.List;
+
+import io.openems.edge.predictor.lstmmodel.common.DataStatistics;
+import io.openems.edge.predictor.lstmmodel.common.HyperParameters;
+import io.openems.edge.predictor.lstmmodel.preprocessingpipeline.PreprocessingPipeImpl;
+import io.openems.edge.predictor.lstmmodel.utilities.MathUtils;
+
+public class LstmPredictor {
+
+ /**
+ * Predicts seasonality based on the provided data and models.
+ *
+ * @param data The input data to predict seasonality for.
+ * @param date The corresponding date and time information for the
+ * data points.
+ * @param hyperParameters The hyperparameters for the prediction model.
+ * @return A list of predicted values for the seasonality.
+ * @throws SomeException If there's any specific exception that might be thrown
+ * during the process.
+ */
+ public static ArrayList predictSeasonality(ArrayList data, ArrayList date,
+ HyperParameters hyperParameters) {
+
+ var preprocessing = new PreprocessingPipeImpl(hyperParameters);
+ preprocessing.setData(to1DArray(data)).setDates(date);
+ var resized = to2DList((double[][][]) preprocessing.interpolate()//
+ .scale()//
+ .filterOutliers() //
+ .groupByHoursAndMinutes()//
+ .execute());
+ preprocessing.setData(resized);
+ var normalized = (double[][]) preprocessing//
+ .normalize()//
+ .execute();
+ var allModel = hyperParameters.getBestModelSeasonality();
+ var predicted = predictPre(to2DArrayList(normalized), allModel, hyperParameters);
+ preprocessing.setData(to1DArray(predicted))//
+ .setMean(DataStatistics.getMean(resized))
+ .setStandardDeviation(DataStatistics.getStandardDeviation(resized));
+ var seasonalityPrediction = (double[]) preprocessing.reverseNormalize()//
+ .reverseScale()//
+ .execute();
+ return to1DArrayList(seasonalityPrediction);
+ }
+
+ /**
+ * Predicts trend values for a given time period using LSTM models.
+ *
+ * @param data The historical data for trend prediction.
+ * @param date The corresponding date and time information for the
+ * historical data points.
+ * @param until The target time until which trend values will be
+ * predicted.
+ * @param hyperParameters The hyperparameters for the prediction model.
+ * @return A list of predicted trend values.
+ * @throws SomeException If there's any specific exception that might be thrown
+ * during the process.
+ */
+ public static ArrayList predictTrend(ArrayList data, ArrayList date,
+ ZonedDateTime until, HyperParameters hyperParameters) {
+
+ var preprocessing = new PreprocessingPipeImpl(hyperParameters);
+ preprocessing.setData(to1DArray(data)).setDates(date);
+
+ var scaled = (double[]) preprocessing//
+ .interpolate()//
+ .scale()//
+ .execute();
+ // normalize
+ var trendPrediction = new double[hyperParameters.getTrendPoint()];
+ var mean = DataStatistics.getMean(scaled);
+ var standerDev = DataStatistics.getStandardDeviation(scaled);
+ preprocessing.setData(scaled);
+ var normData = to1DArrayList((double[]) preprocessing//
+ .normalize()//
+ .execute());
+
+ var predictionFor = until.plusMinutes(hyperParameters.getInterval());
+ var val = hyperParameters.getBestModelTrend();
+ for (int i = 0; i < hyperParameters.getTrendPoint(); i++) {
+ var temp = predictionFor.plusMinutes(i * hyperParameters.getInterval());
+
+ var modlelindex = (int) decodeDateToColumnIndex(temp, hyperParameters);
+ double predTemp = LstmPredictor.predict(//
+ normData, //
+ val.get(modlelindex).get(0), val.get(modlelindex).get(1), //
+ val.get(modlelindex).get(2), val.get(modlelindex).get(3), //
+ val.get(modlelindex).get(4), val.get(modlelindex).get(5), //
+ val.get(modlelindex).get(7), val.get(modlelindex).get(6), //
+ hyperParameters);
+ normData.add(predTemp);
+ normData.remove(0);
+ trendPrediction[i] = (predTemp);
+ }
+
+ preprocessing.setData(trendPrediction).setMean(mean).setStandardDeviation(standerDev);
+
+ return to1DArrayList((double[]) preprocessing//
+ .reverseNormalize()//
+ .reverseScale()//
+ .execute());
+ }
+
+ /**
+ * Decodes a ZonedDateTime to its corresponding column index based on prediction
+ * interval and window size.
+ *
+ * @param predictionFor The ZonedDateTime for which the column index is to be
+ * decoded.
+ * @param hyperParameters The hyperparameters for the prediction model.
+ * @return The decoded column index for the given ZonedDateTime. If the index is
+ * negative, it is adjusted to the corresponding positive index for a
+ * 24-hour period.
+ */
+ public static double decodeDateToColumnIndex(ZonedDateTime predictionFor, HyperParameters hyperParameters) {
+ var hour = predictionFor.getHour();
+ var minute = predictionFor.getMinute();
+ var index = (Integer) hour * (60 / hyperParameters.getInterval()) + minute / hyperParameters.getInterval();
+ var modifiedIndex = index - hyperParameters.getWindowSizeTrend();
+ if (modifiedIndex >= 0) {
+ return modifiedIndex;
+ } else {
+ return modifiedIndex + 60 / hyperParameters.getInterval() * 24;
+ }
+ }
+
+ /**
+ * Re-arranges an ArrayList of Double values by splitting it at the specified
+ * index and moving the second part to the front.
+ *
+ * @param splitIndex The index at which the ArrayList will be split.
+ * @param singleArray An ArrayList of Double values to be re-arranged.
+ * @return A new ArrayList containing the Double values after re-arrangement.
+ */
+ public static ArrayList getArranged(int splitIndex, ArrayList singleArray) {
+ var arranged = new ArrayList();
+ var firstGroup = new ArrayList();
+ var secondGroup = new ArrayList();
+
+ for (var i = 0; i < singleArray.size(); i++) {
+ if (i < splitIndex) {
+ firstGroup.add(singleArray.get(i));
+ } else {
+ secondGroup.add(singleArray.get(i));
+ }
+ }
+
+ arranged.addAll(secondGroup);
+ arranged.addAll(firstGroup);
+
+ return arranged;
+ }
+
+ /**
+ * Calculates the index of a specific hour and minute combination within a
+ * 24-hour period, divided into 15-minute intervals.
+ *
+ * @param hour The hour component (0-23) to be used for the
+ * calculation.
+ * @param minute The minute component (0, 5, 10, ..., 55) to be used
+ * for the
+ * @param hyperParameters is the object of class HyperParameters, calculation.
+ * @return The index representing the specified hour and minute combination.
+ */
+ public static Integer getIndex(Integer hour, Integer minute, HyperParameters hyperParameters) {
+ var k = 0;
+ for (var i = 0; i < 24; i++) {
+ for (var j = 0; j < (int) 60 / hyperParameters.getInterval(); j++) {
+ var h = i;
+ var m = j * hyperParameters.getInterval();
+ if (hour == h && minute == m) {
+ return k;
+ } else {
+ k = k + 1;
+ }
+ }
+ }
+ return k;
+ }
+
+ /**
+ * Predict output values based on input data and a list of model parameters for
+ * multiple instances. This method takes a list of input data instances and a
+ * list of model parameters and predicts output values for each instance using
+ * the model.
+ *
+ * @param inputData An ArrayList of ArrayLists of Doubles, where each
+ * inner ArrayList represents input data for one
+ * instance.
+ * @param val An ArrayList of ArrayLists of ArrayLists of Doubles
+ * representing the model parameters for each instance.
+ * Each innermost ArrayList should contain model
+ * parameters in the following order: 0: Input weight
+ * vector (wi) 1: Output weight vector (wo) 2: Recurrent
+ * weight vector (wz) 3: Recurrent input activations (rI)
+ * 4: Recurrent output activations (rO) 5: Recurrent
+ * update activations (rZ) 6: Current cell state (ct) 7:
+ * Current output (yt)
+ * @param hyperParameters instance of class HyperParamters data
+ * @return An ArrayList of Double values representing the predicted output for
+ * each input data instance.
+ */
+ public static ArrayList predictPre(ArrayList> inputData,
+ ArrayList>> val, HyperParameters hyperParameters) {
+
+ var result = new ArrayList();
+ for (var i = 0; i < inputData.size(); i++) {
+
+ var wi = val.get(i).get(0);
+ var wo = val.get(i).get(1);
+ var wz = val.get(i).get(2);
+ var rI = val.get(i).get(3);
+ var rO = val.get(i).get(4);
+ var rZ = val.get(i).get(5);
+ var ct = val.get(i).get(7);
+ var yt = val.get(i).get(6);
+
+ result.add(predict(inputData.get(i), wi, wo, wz, rI, rO, rZ, ct, yt, hyperParameters));
+ }
+ return result;
+ }
+
+ /**
+ * Predict the output values based on input data and model parameters. This
+ * method takes input data and a set of model parameters and predicts output
+ * values for each data point using the model.
+ *
+ * @param data A 2D array representing the input data where each row
+ * is a data point.
+ * @param val An ArrayList containing model parameters, including
+ * weight vectors and activation values. The ArrayList
+ * should contain the following sublists in this order:
+ * 0: Input weight vector (wi) 1: Output weight vector
+ * (wo) 2: Recurrent weight vector (wz) 3: Recurrent
+ * input activations (rI) 4: Recurrent output activations
+ * (rO) 5: Recurrent update activations (rZ) 6: Current
+ * output (yt) 7: Current cell state (ct)
+ *
+ * @param hyperParameters instance of class HyperParamters data
+ *
+ * @return An ArrayList of Double values representing the predicted output for
+ * each input data point.
+ *
+ */
+ public static ArrayList predictPre(double[][] data, List> val,
+ HyperParameters hyperParameters) {
+
+ var result = new ArrayList();
+
+ var wi = val.get(0);
+ var wo = val.get(1);
+ var wz = val.get(2);
+ var rI = val.get(3);
+ var rO = val.get(4);
+ var rZ = val.get(5);
+ var yt = val.get(6);
+ var ct = val.get(7);
+
+ for (var i = 0; i < data.length; i++) {
+ result.add(predict(data[i], wi, wo, wz, rI, rO, rZ, yt, ct, hyperParameters));
+ }
+ return result;
+ }
+
+ /**
+ * Predict an output value based on input data and model parameters. This method
+ * predicts a single output value based on input data and a set of model
+ * parameters for a LSTM model.
+ *
+ * @param inputData An ArrayList of Doubles representing the input data
+ * for prediction.
+ * @param wi An ArrayList of Doubles representing the input weight
+ * vector (wi) for the RNN model.
+ * @param wo An ArrayList of Doubles representing the output weight
+ * vector (wo) for the RNN model.
+ * @param wz An ArrayList of Doubles representing the recurrent
+ * weight vector (wz) for the RNN model.
+ * @param rI An ArrayList of Doubles representing the recurrent
+ * input activations (rI) for the RNN model.
+ * @param rO An ArrayList of Doubles representing the recurrent
+ * output activations (rO) for the RNN model.
+ * @param rZ An ArrayList of Doubles representing the recurrent
+ * update activations (rZ) for the RNN model.
+ * @param cta An ArrayList of Doubles representing the current cell
+ * state (ct) for the RNN model.
+ * @param yta An ArrayList of Doubles representing the current
+ * output (yt) for the RNN model.
+ * @param hyperParameters instance of class HyperParamters data
+ * @return A double representing the predicted output value based on the input
+ * data and model parameters.
+ */
+ public static double predict(ArrayList inputData, ArrayList wi, ArrayList wo,
+ ArrayList wz, ArrayList rI, ArrayList rO, ArrayList rZ,
+ ArrayList cta, ArrayList yta, HyperParameters hyperParameters) {
+ var ct = hyperParameters.getCtInit();
+ var yt = hyperParameters.getYtInit();
+ var standData = inputData;// DataModification.standardize(inputData, hyperParameters);
+
+ for (var i = 0; i < standData.size(); i++) {
+ var ctMinusOne = ct;
+ var yTMinusOne = yt;
+ var xt = standData.get(i);
+ var it = MathUtils.sigmoid(wi.get(i) * xt + rI.get(i) * yTMinusOne);
+ var ot = MathUtils.sigmoid(wo.get(i) * xt + rO.get(i) * yTMinusOne);
+ var zt = MathUtils.tanh(wz.get(i) * xt + rZ.get(i) * yTMinusOne);
+ ct = ctMinusOne + it * zt;
+ yt = ot * MathUtils.tanh(ct);
+ }
+ return yt;
+ }
+
+ /**
+ * Predict an output value based on input data and model parameters. This method
+ * predicts a single output value based on input data and a set of model
+ * parameters for a LSTM model.
+ *
+ * @param inputData An ArrayList of Doubles representing the input data
+ * for prediction.
+ * @param wi An ArrayList of Doubles representing the input weight
+ * vector (wi) for the RNN model.
+ * @param wo An ArrayList of Doubles representing the output weight
+ * vector (wo) for the RNN model.
+ * @param wz An ArrayList of Doubles representing the recurrent
+ * weight vector (wz) for the RNN model.
+ * @param rI An ArrayList of Doubles representing the recurrent
+ * input activations (rI) for the RNN model.
+ * @param rO An ArrayList of Doubles representing the recurrent
+ * output activations (rO) for the RNN model.
+ * @param rZ An ArrayList of Doubles representing the recurrent
+ * update activations (rZ) for the RNN model.
+ * @param cta An ArrayList of Doubles representing the current cell
+ * state (ct) for the RNN model.
+ * @param yta An ArrayList of Doubles representing the current
+ * output (yt) for the RNN model.
+ * @param hyperParameters instance of class HyperParamters data
+ * @return A double representing the predicted output value based on the input
+ * data and model parameters.
+ */
+ public static double predict(double[] inputData, ArrayList wi, ArrayList wo, ArrayList wz,
+ ArrayList rI, ArrayList rO, ArrayList rZ, ArrayList cta,
+ ArrayList yta, HyperParameters hyperParameters) {
+ var ct = hyperParameters.getCtInit();
+ var yt = hyperParameters.getYtInit();
+ var standData = inputData;// DataModification.standardize(inputData, hyperParameters);
+
+ for (var i = 0; i < standData.length; i++) {
+ var ctMinusOne = ct;
+ var yTMinusOne = yt;
+ var xt = standData.length;
+ var it = MathUtils.sigmoid(wi.get(i) * xt + rI.get(i) * yTMinusOne);
+ var ot = MathUtils.sigmoid(wo.get(i) * xt + rO.get(i) * yTMinusOne);
+ var zt = MathUtils.tanh(wz.get(i) * xt + rZ.get(i) * yTMinusOne);
+ ct = ctMinusOne + it * zt;
+ yt = ot * MathUtils.tanh(ct);
+ }
+ return yt;
+ }
+
+ /**
+ * Predict a focused output value based on input data and model parameters. This
+ * method predicts a single focused output value based on input data and a set
+ * of model parameters for a LSTM model with a focus on specific activations.
+ *
+ * @param inputData An ArrayList of Doubles representing the input data
+ * for prediction.
+ * @param wi An ArrayList of Doubles representing the input weight
+ * vector (wi) for the RNN model.
+ * @param wo An ArrayList of Doubles representing the output weight
+ * vector (wo) for the RNN model.
+ * @param wz An ArrayList of Doubles representing the recurrent
+ * weight vector (wz) for the RNN model.
+ * @param rI An ArrayList of Doubles representing the recurrent
+ * input activations (rI) for the RNN model.
+ * @param rO An ArrayList of Doubles representing the recurrent
+ * output activations (rO) for the RNN model.
+ * @param rZ An ArrayList of Doubles representing the recurrent
+ * update activations (rZ) for the RNN model.
+ * @param cta An ArrayList of Doubles representing the current cell
+ * state (ct) for the RNN model.
+ * @param yta An ArrayList of Doubles representing the current
+ * output (yt) for the RNN model.
+ * @param hyperParameters instance of class HyperParamters data
+ * @return A double representing the predicted focused output value based on the
+ * input data and model parameters.
+ */
+ public static double predictFocoused(ArrayList inputData, ArrayList wi, ArrayList wo,
+ ArrayList wz, ArrayList rI, ArrayList rO, ArrayList rZ,
+ ArrayList cta, ArrayList yta, HyperParameters hyperParameters) {
+ var ct = hyperParameters.getCtInit();
+ var yt = hyperParameters.getYtInit();
+
+ var standData = inputData;
+
+ for (var i = 0; i < standData.size(); i++) {
+ var ctMinusOne = ct;
+ var ytMinusOne = yt;
+ var xt = standData.get(i);
+ var it = MathUtils.sigmoid(rI.get(i) * ytMinusOne);
+ var ot = MathUtils.sigmoid(rO.get(i) * ytMinusOne);
+ var zt = MathUtils.tanh(wz.get(i) * xt);
+ ct = ctMinusOne + it * zt;
+ yt = ot * MathUtils.tanh(ct);
+ }
+ return yt;
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/DataStatistics.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/DataStatistics.java
new file mode 100644
index 00000000000..a7a3f014f16
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/DataStatistics.java
@@ -0,0 +1,145 @@
+package io.openems.edge.predictor.lstmmodel.common;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.stream.IntStream;
+
+public class DataStatistics {
+
+ /**
+ * Get the mean of the array.
+ *
+ * @param data the data
+ * @return mean value
+ */
+ public static double getMean(Collection extends Number> data) {
+ return data.stream().mapToDouble(Number::doubleValue).average().orElse(0.0);
+ }
+
+ /**
+ * Calculates the mean (average) of each row in a 2D array of doubles and
+ * returns an ArrayList containing the means of each row.
+ *
+ * @param data a 2D array of doubles containing the data from which to calculate
+ * means
+ * @return an ArrayList of Double containing the means of each row
+ */
+ public static double[] getMean(double[][] data) {
+ return Arrays.stream(data).mapToDouble(row -> Arrays.stream(row).average().orElse(0.0)).toArray();
+ }
+
+ /**
+ * Computes the mean (average) of an array of double values. *
+ *
+ * This method calculates the mean by summing all the elements in the input
+ * array and dividing by the number of elements. If the array is empty, it
+ * throws a NoSuchElementException.
+ *
+ *
+ * @param data the array of double values for which the mean is to be computed
+ * @return the mean of the input array
+ * @throws java.util.NoSuchElementException if the array is empty
+ */
+ public static double getMean(double[] data) {
+ return Arrays.stream(data).parallel().average().getAsDouble();
+ }
+
+ /**
+ * Calculates the standard deviation of a list of double values. This method
+ * computes the standard deviation of the provided list of double values.
+ * Standard deviation measures the amount of variation or dispersion in the
+ * data. It is calculated as the square root of the variance, which is the
+ * average of the squared differences between each data point and the mean. When
+ * stander deviation is 0, the method returns a value close to zero to avoid
+ * divisible by 0 error
+ *
+ * @param data An ArrayList of double values for which to calculate the standard
+ * deviation.
+ * @return The standard deviation of the provided data as a double value.
+ * @throws IllegalArgumentException if the input list is empty.
+ */
+ public static double getStandardDeviation(Collection extends Number> data) {
+ double mean = getMean(data);
+ double sumSquaredDeviations = data.stream().mapToDouble(x -> Math.pow(x.doubleValue() - mean, 2)).sum();
+ double variance = sumSquaredDeviations / data.size();
+ double stdDeviation = Math.sqrt(variance);
+ return (stdDeviation == 0) ? 0.000000000000001 : stdDeviation;
+ }
+
+ /**
+ * * calculates the deviation of the data from the expected error. THis method
+ * computes the average deviation from the expected error.
+ *
+ * @param data the data of type numbers
+ * @param expectedError the expected error
+ * @return stdDeviation the standard deviation
+ */
+ public static double getStandardDeviation(Collection extends Number> data, double expectedError) {
+ double mean = expectedError;
+ double sumSquaredDeviations = data.stream()//
+ .mapToDouble(x -> Math.pow(x.doubleValue() - mean, 2))//
+ .sum();
+ double variance = sumSquaredDeviations / data.size();
+ double stdDeviation = Math.sqrt(variance);
+ return (stdDeviation == 0) ? 0.000000000000001 : stdDeviation;
+ }
+
+ /**
+ * Computes the standard deviation of an array of double values.
+ *
+ *
+ * This method calculates the mean of the input array, then computes the
+ * variance by finding the average of the squared differences from the mean.
+ * Finally, it returns the square root of the variance as the standard
+ * deviation. If the standard deviation is zero, a very small positive number
+ * (1e-15) is returned to avoid returning zero.
+ *
+ *
+ * @param data the array of double values for which the standard deviation is to
+ * be computed
+ * @return the standard deviation of the input array
+ */
+
+ public static double getStandardDeviation(double[] data) {
+ double mean = Arrays.stream(data).average().getAsDouble();
+ double sumSquaredDeviations = Arrays.stream(data).map(x -> Math.pow(x - mean, 2)).sum();
+ double variance = sumSquaredDeviations / data.length;
+ double stdDeviation = Math.sqrt(variance);
+ return (stdDeviation == 0) ? 0.000000000000001 : stdDeviation;
+ }
+
+ /**
+ * Calculates the standard deviation of each row in a 2D array of doubles and
+ * returns an ArrayList containing the standard deviations of each row.
+ *
+ * @param data a 2D array of doubles containing the data from which to calculate
+ * standard deviations
+ * @return an ArrayList of Double containing the standard deviations of each row
+ */
+ public static double[] getStandardDeviation(double[][] data) {
+ return Arrays.stream(data)//
+ .mapToDouble(row -> getStandardDeviation(row))//
+ .toArray();
+ }
+
+ /**
+ * Computes the root mean square (RMS) error between two arrays of double
+ * values.
+ *
+ * @param original the original array of double values
+ * @param computed the computed array of double values
+ * @return the RMS error between the original and computed arrays
+ * @throws IllegalArgumentException if the arrays have different lengths
+ */
+ public static double computeRms(double[] original, double[] computed) {
+ if (original.length != computed.length) {
+ throw new IllegalArgumentException("Arrays must have the same length");
+ }
+
+ var sumOfSquaredDifferences = IntStream.range(0, original.length)
+ .mapToDouble(i -> Math.pow(original[i] - computed[i], 2))//
+ .average();
+
+ return Math.sqrt(sumOfSquaredDifferences.getAsDouble());
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/DynamicItterationValue.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/DynamicItterationValue.java
new file mode 100644
index 00000000000..7876753043b
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/DynamicItterationValue.java
@@ -0,0 +1,25 @@
+package io.openems.edge.predictor.lstmmodel.common;
+
+import java.util.ArrayList;
+import java.util.Collections;
+
+public class DynamicItterationValue {
+
+ public static int setIteration(ArrayList errors, int errorIndex, HyperParameters hyperParameters) {
+
+ if (errors.isEmpty()) {
+ return 10;
+ }
+
+ var minError = Collections.min(errors);
+ var maxError = Collections.max(errors);
+ var minIteration = 1;
+ var maxIteration = 10 * hyperParameters.getEpochTrack() + 1;
+
+ var errorValue = errors.get(errorIndex);
+ var normalizedError = (errorValue - minError) / (maxError - minError);
+ var iterationValue = minIteration + (normalizedError * (maxIteration - minIteration));
+
+ return (int) Math.round(iterationValue);
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/HyperParameters.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/HyperParameters.java
new file mode 100644
index 00000000000..1ae7a819e3d
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/HyperParameters.java
@@ -0,0 +1,932 @@
+package io.openems.edge.predictor.lstmmodel.common;
+
+import java.io.Serializable;
+import java.time.OffsetDateTime;
+import java.util.ArrayList;
+import java.util.Collections;
+
+public class HyperParameters implements Serializable {
+
+ private OffsetDateTime lastTrainedDate;
+
+ public OffsetDateTime getLastTrainedDate() {
+ return this.lastTrainedDate;
+ }
+
+ /**
+ * Serializable class version number for ensuring compatibility during
+ * serialization.
+ */
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Maximum iteration factor.
+ *
+ *
+ * This value is used by DynamicItterationValue class to set the
+ * gdItterationValue DynamicItterationValue class changes the classes such that
+ * the gdItteration value is in between 1 and maxItterFactor*current Epoch value
+ * +1 When epoch increase, learning rate decreases and the gdItteration value
+ * increases. Set the value always to 10.
+ *
+ */
+ private final int maxItterFactor = 10;
+
+ /**
+ * Upper limit for the learning rate.
+ *
+ *
+ * This value is used by the ADGRAD optimizer as the initial learning rate. The
+ * optimizer dynamically adjusts the learning rate over epochs, starting with
+ * the value of learningRateUpperLimit. The adjustment is typically aimed at
+ * improving convergence by starting with a higher learning rate and gradually
+ * decreasing it.
+ *
+ *
+ * This variable can be set to any value between 0 and 1. It is important to
+ * ensure that the value of learningRateUpperLimit is always greater than
+ * learnignRateLowerLimit to allow proper functioning of the dynamic learning
+ * rate setup. Default value: 0.01
+ */
+ private double learningRateUpperLimit = 0.01;
+
+ /**
+ * Lower limit for the learning rate.
+ *
+ *
+ * This value is used by the ADGRAD optimizer as the minimum learning rate. As
+ * the training progresses, the optimizer adjusts the learning rate and it
+ * converges to the value of learnignRateLowerLimit by the final epoch. This
+ * helps in fine-tuning the model parameters and achieving better accuracy by
+ * the end of the training.
+ *
+ * This variable can be set to any value between 0 and 1. It is crucial that the
+ * value of learnignRateLowerLimit is always less than learningRateUpperLimit to
+ * enable the proper decreasing trend of the learning rate throughout the
+ * training process. Default value: 0.0001
+ */
+ private double learnignRateLowerLimit = 0.0001;
+
+ /**
+ * Proportion of data to be used for training.
+ *
+ *
+ * This variable determines the fraction of the entire dataset that will be
+ * allocated for training purposes. The remaining portion of the dataset will be
+ * used for validation. The value of this variable should be within the range of
+ * 0 to 1, where:
+ *
+ *
0 means 0% of the dataset is used for training (i.e., no training
+ * data).
+ *
1 means 100% of the dataset is used for training (i.e., no validation
+ * data).
+ *
+ *
+ * The program utilizes this variable to split the input dataset vector into two
+ * separate vectors. One vector contains the training data, and the other vector
+ * contains the validation data. The split is essential for assessing the
+ * performance of the model on unseen data, helping to prevent overfitting and
+ * to ensure the model's generalizability.
+ */
+ private double dataSplitTrain = 0.7;
+
+ /**
+ * Proportion of data to be used for validation.
+ */
+ private double dataSplitValidate = 1 - this.dataSplitTrain;
+
+ private double wiInit = 0.2;
+ private double woInit = 0.2;
+ private double wzInit = 0.2;
+ private double riInit = 0.2;
+ private double roInit = 0.2;
+ private double rzInit = 0.2;
+ private double ytInit = 0.2;
+ private double ctInit = 0.2;
+
+ /**
+ * Interval for logging or updating parameters.
+ */
+ private int interval = 5;
+
+ /**
+ * Size of each batch for training.
+ *
+ *
+ * To manage the computational load on the CPU during training, the training
+ * data is divided into smaller subsets called batches.
+ *
+ *
+ *
+ * For our LSTM (Long Short-Term Memory) model, a general rule of thumb is that
+ * datasets consisting of 30 days of data with 5-minute intervals should not be
+ * divided into batches greater than 2. This helps to balance the computational
+ * load and the memory usage during training.
+ *
+ *
+ *
+ * Considerations for setting the batch size:
+ *
+ *
+ *
If the training data size is large, more batches should be created to
+ * avoid excessive memory usage, which could lead to heap memory errors.
+ *
If the training data size is small, fewer batches should be created to
+ * ensure each batch contains a sufficient number of samples for meaningful
+ * updates. Creating too many batches with too few samples can lead to index out
+ * of range errors during training.
+ * This counter keeps track of the number of batches that have passed through
+ * the training process.
+ *
+ * - It updates after each batch completes its training. - In case the training
+ * is interrupted, this counter allows the process to resume from the last
+ * completed batch, ensuring continuity and efficiency in the training process.
+ *
+ *
+ * This mechanism is crucial for maintaining the state of the training process,
+ * especially in scenarios where interruptions may occur.
+ *
+ */
+ private int batchTrack = 0;
+
+ /**
+ * Number of epochs for training.
+ *
+ *
+ * An epoch refers to one complete pass through the entire training dataset.
+ * During each epoch, the model processes all the training data in batches,
+ * updating the model parameters iteratively. After each epoch, the learning
+ * rate can be adjusted, and the training process continues on the same dataset.
+ *
+ *
+ *
+ * The number of epochs is a crucial hyperparameter in training neural networks.
+ * More epochs generally mean that the model has more opportunities to learn
+ * from the data, potentially improving its performance. However, more epochs
+ * also mean longer training times and a higher risk of overfitting, where the
+ * model learns the training data too well and performs poorly on new, unseen
+ * data.
+ *
+ *
+ *
+ * It is recommended to keep the number of epochs in the range of 30 to 50 for a
+ * balanced approach between training time and model performance. Adjusting the
+ * number of epochs can be necessary based on the specific characteristics of
+ * the dataset and the complexity of the model.
+ *
+ */
+ private int epoch = 10;
+
+ /**
+ * Counter for tracking epochs. The counter updates after every time all batches
+ * undergoes training. This value is searilized along with the weights. in case
+ * training stops, this record is used to resme the training from the last stop
+ * point.
+ */
+ private int epochTrack = 0;
+
+ /**
+ * Number of predictions using trend weights.
+ *
+ *
+ * This parameter determines the number of predictions made based on the trend
+ * weights derived from the most recent trend window data. The trend window is a
+ * specific period used to analyze the trend patterns of the data.
+ *
+ *
+ *
+ *
+ *
+ * By default, one prediction is made using the last trend window data if this
+ * value is set to 1. This means that the system will use the data from the last
+ * trend window to make a single prediction.
+ *
+ *
+ * It is advisable to set this value to 12 if the interval between data points
+ * is 5 minutes Similarly, set this value to 8 if the interval between data
+ * points is 15 minutes . The interval represents the time or sequence gap
+ * between consecutive data points being analyzed.
+ *
+ *
+ * Setting a higher value than recommended can lead to inaccuracies in the
+ * prediction. This is because too many trend points may cause the model
+ * misinterpret the trend patterns, resulting in errors.
+ *
+ * This parameter defines the window size used for analyzing seasonal patterns
+ * in the data. A window size of 7 means that the model will use data from the
+ * last 7 days to train at one instance. Additionally, it will utilize the data
+ * from the last 7 days to predict data points for the next 24 hours.
+ *
+ *
+ *
+ * The window size can be adjusted up to a maximum of 14. While increasing the
+ * window size can potentially provide more accurate seasonal insights, it also
+ * increases the computational load.
+ *
+ *
+ *
+ * Key points: - Set to 7 to use the last 7 days of data for training and for
+ * predicting the next 24 hours. - The value can be adjusted up to 14. - Be
+ * aware that higher values may be computationally intensive.
+ *
+ * This parameter specifies the window size used for analyzing trend patterns in
+ * the data. A window size of 5 means that the model will consider data from the
+ * last 5 time intervals to analyze the trend. This helps in identifying the
+ * direction and strength of the trend over recent time periods. Keep the value
+ * in between 5 to 7
+ *
+ */
+ private int windowSizeTrend = 5;
+
+ /**
+ * Number of iterations for gradient descent.
+ *
+ *
+ * This parameter defines the number of iterations to be performed during the
+ * gradient descent optimization process. Gradient descent is used to minimize
+ * the cost function by iteratively updating the model parameters.
+ *
+ *
+ *
+ * The number of iterations can be set between 1 and 100. A higher number of
+ * iterations can potentially lead to models with improved accuracy as the
+ * optimization process has more opportunities to converge to a minimum.
+ * However, increasing the number of iterations also increases the computation
+ * time required for training the model.
+ *
+ *
+ *
+ * Key points: - Set to 10 to perform 10 iterations of gradient descent. - Can
+ * be adjusted between 1 and 100 based on the trade-off between accuracy and
+ * computation time. - Higher values may improve model accuracy but will also
+ * increase computation time.
+ *
+ */
+ private int gdIterration = 10;
+
+ /**
+ * Counter for general tracking purposes.
+ *
+ *
+ * This counter is used to determine whether the training process is being
+ * executed for the first time.
+ *
+ *
+ *
+ * - If the count is 0, the algorithm will use the initial weights and start a
+ * new training process. - If the count value is greater than 0, the algorithm
+ * will continue training the existing models.
+ *
+ *
+ *
+ * This mechanism ensures that the model can distinguish between initializing
+ * new training sessions and performing subsequent training iterations.
+ *
+ *
+ *
+ * Note: Just like in programming, remember that if you start counting from 0,
+ * you're a true computer scientist!
+ *
+ * This value represents the threshold error, typically measured in the same
+ * units as the training data. It can also be considered as the allowed error
+ * margin. The Root Mean Square (RMS) error computed during the model evaluation
+ * reflects the average deviation from this threshold value.
+ *
+ *
+ *
+ * Key points: - Measured in the same units as the training data. - Represents
+ * the acceptable error margin. - RMS error indicates the average deviation from
+ * this threshold.
+ *
+ * This value defines the minimum threshold for scaling the data. It should
+ * always be less than the `scalingMax` value. The unit of this value is the
+ * same as that of the training data. this valve can be negative and positive it
+ * id
+ *
+ *
+ *
+ * Once set, it is important not to change this value, as it could affect the
+ * consistency of the scaling process.
+ *
+ *
+ *
+ * Note: value once set should not be changed, as changing it is as risky as
+ * debugging a program on a Friday afternoon!
+ *
+ */
+ private double scalingMin = 0;
+
+ /**
+ * Maximum value for scaling data.
+ *
+ *
+ * This value defines the maximum threshold for scaling the data. It should
+ * always be greater than the `scalingMin` value. The unit of this value is the
+ * same as that of the training data. This value can be positive or negative,
+ * depending on the data range.
+ *
+ *
+ *
+ * Once set, it is important not to change this value, as it could affect the
+ * consistency of the scaling process.
+ *
+ *
+ *
+ * Note: Setting this value high is like aiming for the stars with your data!
+ * Just remember, changing it later could be as risky as giving a programmer, a
+ * cup of coffee after midnight!
+ *
+ *
+ */
+ private double scalingMax = 20000;
+
+ /**
+ * Model data structure for trend analysis.
+ *
+ *
+ * This is the brain of the model, responsible for storing updated weights and
+ * biases during the training process for trend analysis.
+ *
+ *
+ *
+ * The structure comprises nested arrays to store weights and biases:
+ *
+ * Where Wi, Wo, Wz, Ri, Ro, Rz, Yt, and Ct are the weights and biases of the
+ * LSTM cells, and 1, 2, 3, ..., k represent the window size.
+ *
+ *
+ *
+ * The first two nested arrays ensure that the second nested array is available
+ * for every time depending on the interval. first element of second nested
+ * array is used for the prediction of the trend point for 00:05 (if the
+ * interval is 5)
+ *
+ *
+ *
+ * Fun Fact: This data structure holds the keys to predicting trends better than
+ * a psychic octopus predicting World Cup winners!
+ *
+ */
+ private ArrayList>>> modelTrend = new ArrayList>>>();
+
+ /**
+ * Model data structure for seasonality analysis.
+ *
+ *
+ * This data structure serves as the backbone of the model, specifically
+ * designed to store updated weights and biases during the training process for
+ * seasonality analysis.
+ *
+ *
+ *
+ * The structure consists of nested ArrayLists to accommodate the weights and
+ * biases.
+ *
+ * Where Wi, Wo, Wz are the weights of the LSTM cells, and 1, 2, 3, ..., k
+ * represent the window size.
+ *
+ *
+ *
+ * The first two nested arrays ensure that the second nested array is available
+ * for every time depending on the interval. first element of second nested
+ * array is used for the prediction of the trend point for 00:00 (if the
+ * interval is 5)
+ *
+ *
+ *
+ * Fun Fact: With this data structure, our model can predict seasonal pattern
+ * more accurately than a fortune-teller!
+ *
+ */
+ private ArrayList>>> modelSeasonality = new ArrayList>>>();
+
+ /**
+ * List of all model errors related to trend analysis.
+ *
+ *
+ * This vector holds the Root Mean Square (RMS) errors of different models
+ * recorded during multiple training steps in modelTrend.
+ *
+ *
+ *
+ *
+ * Fun Fact: These errors are like the turn signals on a BMW - sometimes they're
+ * there, sometimes they're not, but they always keep us guessing and learning
+ * along the way!
+ *
+ */
+ private ArrayList allModelErrorTrend = new ArrayList();
+
+ /**
+ * List of all model errors related to seasonality analysis.
+ *
+ *
+ * This vector contains the Root Mean Square (RMS) errors of different models
+ * recorded during multiple training steps in modelSeasonality.
+ *
+ *
+ *
+ * Fun Fact: These errors are like the various recipes for currywurst - some may
+ * be a bit spicier than others, but they all add flavor to our models, just
+ * like currywurst adds flavor to German cuisine!
+ *
+ */
+ private ArrayList allModelErrorSeasonality = new ArrayList();
+
+ /**
+ * Mean value for normalization or scaling purposes.
+ *
+ *
+ * This value is crucial for ensuring proper normalization or scaling of the
+ * data. It acts as the central point around which the data is normalized or
+ * scaled.
+ *
+ *
+ *
+ *
+ * It's important to set this value to 0, just like it's important to feed your
+ * girlfriend when she's hungry, because, trust me, she can be mean when hungry!
+ *
+ */
+ private double mean = 0;
+
+ /**
+ * Standard deviation for normalization or scaling purposes.
+ *
+ *
+ * This value plays a crucial role in determining the spread or dispersion of
+ * the data during normalization or scaling.
+ *
+ * This list contains RMSE values for trend analysis. Unlike
+ * 'allModelErrorTrend', this list is limited in size to accommodate 60 divided
+ * by the interval multiplied by 24, and each value represents the RMSE of the
+ * model predicting for a specific time interval.
+ *
+ *
+ * The error at index 0 corresponds to the model predicting for 00:05, with
+ * subsequent indices representing subsequent time intervals.
+ */
+ private ArrayList rmsErrorTrend = new ArrayList();
+
+ /**
+ * Root Mean Square Error (RMSE) for seasonality analysis.
+ *
+ *
+ * This list contains RMSE values for seasonality analysis. Each value
+ * represents the RMSE of the model's predictions related to seasonality.
+ *
+ */
+ private ArrayList rmsErrorSeasonality = new ArrayList();
+
+ /**
+ * Counter for outer loop iterations, possibly for nested loops. Note: only used
+ * in unit test case
+ */
+ private int outerLoopCount = 0;
+
+ /**
+ * Name of the model.
+ */
+ private String modelName = "";
+
+ public HyperParameters() {
+ }
+
+ public void setLearningRateUpperLimit(double rate) {
+ this.learningRateUpperLimit = rate;
+ }
+
+ public double getLearningRateUpperLimit() {
+ return this.learningRateUpperLimit;
+ }
+
+ public void setLearningRateLowerLimit(double val) {
+ this.learnignRateLowerLimit = val;
+ }
+
+ public double getLearningRateLowerLimit() {
+ return this.learnignRateLowerLimit;
+ }
+
+ public void setWiInit(double val) {
+ this.wiInit = val;
+ }
+
+ public double getWiInit() {
+ return this.wiInit;
+ }
+
+ public void setWoInit(double val) {
+ this.woInit = val;
+ }
+
+ public double getWoInit() {
+ return this.woInit;
+ }
+
+ public void setWzInit(double val) {
+ this.wzInit = val;
+ }
+
+ public double getWzInit() {
+ return this.wzInit;
+ }
+
+ public void setriInit(double rate) {
+ this.riInit = rate;
+ }
+
+ public double getRiInit() {
+ return this.riInit;
+ }
+
+ public void setRoInit(double val) {
+ this.roInit = val;
+ }
+
+ public double getRoInit() {
+ return this.roInit;
+ }
+
+ public void setRzInit(double val) {
+ this.rzInit = val;
+ }
+
+ public double getRzInit() {
+ return this.rzInit;
+ }
+
+ public void setYtInit(double val) {
+ this.ytInit = val;
+ }
+
+ public double getYtInit() {
+ return this.ytInit;
+ }
+
+ public void setCtInit(double val) {
+ this.ctInit = val;
+ }
+
+ public double getCtInit() {
+ return this.ctInit;
+ }
+
+ public int getWindowSizeSeasonality() {
+ return this.windowSizeSeasonality;
+ }
+
+ public int getGdIterration() {
+ return this.gdIterration;
+ }
+
+ public void setGdIterration(int val) {
+ this.gdIterration = val;
+ }
+
+ public int getWindowSizeTrend() {
+ return this.windowSizeTrend;
+ }
+
+ public double getScalingMin() {
+ return this.scalingMin;
+ }
+
+ public double getScalingMax() {
+ return this.scalingMax;
+ }
+
+ public void setCount(int val) {
+ this.count = val;
+ }
+
+ public int getCount() {
+ return this.count;
+ }
+
+ public void setDatasplitTrain(double val) {
+ this.dataSplitTrain = val;
+ }
+
+ public double getDataSplitTrain() {
+ return this.dataSplitTrain;
+ }
+
+ public void setDatasplitValidate(double val) {
+ this.dataSplitValidate = val;
+ }
+
+ public double getDataSplitValidate() {
+ return this.dataSplitValidate;
+ }
+
+ public int getTrendPoint() {
+ return this.trendPoints;
+ }
+
+ public int getEpoch() {
+
+ return this.epoch;
+ }
+
+ public int getInterval() {
+ return this.interval;
+ }
+
+ public void setRmsErrorTrend(double val) {
+ this.rmsErrorTrend.add(val);
+ }
+
+ public void setRmsErrorSeasonality(double val) {
+ this.rmsErrorSeasonality.add(val);
+ }
+
+ public ArrayList getRmsErrorSeasonality() {
+ return this.rmsErrorSeasonality;
+ }
+
+ public ArrayList getRmsErrorTrend() {
+ return this.rmsErrorTrend;
+ }
+
+ public void setEpochTrack(int val) {
+ this.epochTrack = val;
+ }
+
+ public int getEpochTrack() {
+ return this.epochTrack;
+ }
+
+ public int getMinimumErrorModelSeasonality() {
+ return this.rmsErrorSeasonality.indexOf(Collections.min(this.rmsErrorSeasonality));
+ }
+
+ public int getMinimumErrorModelTrend() {
+ return this.rmsErrorTrend.indexOf(Collections.min(this.rmsErrorTrend));
+ }
+
+ public int getOuterLoopCount() {
+ return this.outerLoopCount;
+ }
+
+ public void setOuterLoopCount(int val) {
+ this.outerLoopCount = val;
+ }
+
+ public int getBatchSize() {
+ return this.batchSize;
+ }
+
+ public int getBatchTrack() {
+ return this.batchTrack;
+ }
+
+ public void setBatchTrack(int val) {
+ this.batchTrack = val;
+ }
+
+ public void setModelName(String val) {
+ this.modelName = val;
+ }
+
+ public String getModelName() {
+ return this.modelName;
+ }
+
+ public double getMean() {
+ return this.mean;
+
+ }
+
+ public double getStanderDeviation() {
+ return this.standerDeviation;
+ }
+
+ public double getTargetError() {
+ return this.targetError;
+ }
+
+ public void setTargetError(double val) {
+ this.targetError = val;
+ }
+
+ public int getMaxItter() {
+ return this.maxItterFactor;
+ }
+
+ /**
+ * Updates the model trend with new values.
+ *
+ * @param val ArrayList of ArrayLists of ArrayLists of Double containing the new
+ * values to add to the model trend
+ */
+ public void updatModelTrend(ArrayList>> val) {
+ this.modelTrend.add(val);
+ }
+
+ /**
+ * Retrieves the most recently recorded model trend from the list of model
+ * trends.
+ *
+ * @return The most recently recorded model trend, represented as an ArrayList
+ * of ArrayLists of ArrayLists of Double.
+ */
+ public ArrayList>> getlastModelTrend() {
+ return this.modelTrend.get(this.modelTrend.size() - 1);
+ }
+
+ public ArrayList>> getBestModelTrend() {
+ return this.modelTrend.get(this.getMinimumErrorModelTrend());
+ }
+
+ public ArrayList>> getBestModelSeasonality() {
+ return this.modelSeasonality.get(this.getMinimumErrorModelSeasonality());
+ }
+
+ public ArrayList>>> getAllModelsTrend() {
+ return this.modelTrend;
+ }
+
+ public ArrayList>>> getAllModelSeasonality() {
+ return this.modelSeasonality;
+ }
+
+ public void setAllModelErrorTrend(ArrayList val) {
+ this.allModelErrorTrend = val;
+ }
+
+ public void setAllModelErrorSeason(ArrayList val) {
+ this.allModelErrorSeasonality = val;
+ }
+
+ public ArrayList getAllModelErrorTrend() {
+ return this.allModelErrorTrend;
+ }
+
+ public ArrayList getAllModelErrorSeason() {
+ return this.allModelErrorSeasonality;
+ }
+
+ /**
+ * Retrieves the last model trend from the list of model trends.
+ *
+ * @return ArrayList of ArrayLists of ArrayLists of Double representing the last
+ * model trend
+ */
+ public ArrayList>> getlastModelSeasonality() {
+ return this.modelSeasonality.get(this.modelSeasonality.size() - 1);
+ }
+
+ /**
+ * reset the error in the model.
+ */
+ public void resetModelErrorValue() {
+ this.rmsErrorSeasonality = new ArrayList();
+ this.rmsErrorTrend = new ArrayList();
+ }
+
+ /**
+ * Updates the model seasonality with new values.
+ *
+ * @param val The new model seasonality values to add, represented as an
+ * ArrayList of ArrayLists of ArrayLists of Double.
+ */
+ public void updateModelSeasonality(ArrayList>> val) {
+ this.modelSeasonality.add(val);
+ }
+
+ /**
+ * Prints the current values of hyperparameters and related attributes to the
+ * console.
+ */
+ public void printHyperParameters() {
+ StringBuilder builder = new StringBuilder();
+
+ builder.append("learningRateUpperLimit = ").append(this.learningRateUpperLimit).append("\n");
+ builder.append("learnignRateLowerLimit = ").append(this.learnignRateLowerLimit).append("\n");
+ builder.append("wiInit = ").append(this.wiInit).append("\n");
+ builder.append("woInit = ").append(this.woInit).append("\n");
+ builder.append("wzInit = ").append(this.wzInit).append("\n");
+ builder.append("riInit = ").append(this.riInit).append("\n");
+ builder.append("roInit = ").append(this.roInit).append("\n");
+ builder.append("rzInit = ").append(this.rzInit).append("\n");
+ builder.append("ytInit = ").append(this.ytInit).append("\n");
+ builder.append("ctInit = ").append(this.ctInit).append("\n");
+ builder.append("Epoch = ").append(this.epoch).append("\n");
+ builder.append("windowSizeSeasonality = ").append(this.windowSizeSeasonality).append("\n");
+ builder.append("windowSizeTrend = ").append(this.windowSizeTrend).append("\n");
+ builder.append("scalingMin = ").append(this.scalingMin).append("\n");
+ builder.append("scalingMax = ").append(this.scalingMax).append("\n");
+ builder.append("RMS error trend = ").append(this.getRmsErrorTrend()).append("\n");
+ builder.append("RMS error Seasonlality =").append(this.getRmsErrorSeasonality()).append("\n");
+ builder.append("Count value = ").append(this.count).append("\n");
+ builder.append("Outer loop Count = ").append(this.outerLoopCount).append("\n");
+ builder.append("Epoch track = ").append(this.epochTrack).append("\n");
+
+ System.out.println(builder.toString());
+ }
+
+ /**
+ * Updates the models and their corresponding error indices based on the minimum
+ * error values obtained from model trends and model seasonality. This method
+ * first retrieves the indices of models with minimum errors for both trends and
+ * seasonality. Then it retrieves the corresponding models and clears the
+ * existing model trends, model seasonality, RMS errors for trend, and RMS
+ * errors for seasonality. After that, it adds the retrieved models to the
+ * respective model lists and updates the RMS errors with the minimum error
+ * values.
+ */
+ public void update() {
+ int minErrorIndTrend = this.getMinimumErrorModelTrend();
+ int minErrorIndSeasonlity = this.getMinimumErrorModelSeasonality();
+
+ // uipdating models
+ var modelTrendTemp = this.modelTrend.get(minErrorIndTrend);
+ final var modelTempSeasonality = this.modelSeasonality.get(minErrorIndSeasonlity);
+ this.modelTrend.clear();
+ this.modelSeasonality.clear();
+ this.modelTrend.add(modelTrendTemp);
+ this.modelSeasonality.add(modelTempSeasonality);
+ // updating index
+ double minErrorTrend = this.rmsErrorTrend.get(minErrorIndTrend);
+ final double minErrorSeasonality = this.rmsErrorSeasonality.get(minErrorIndSeasonlity);
+ this.rmsErrorTrend.clear();
+ this.rmsErrorSeasonality.clear();
+ this.rmsErrorTrend.add(minErrorTrend);
+ this.rmsErrorSeasonality.add(minErrorSeasonality);
+ this.count = 1;
+ this.lastTrainedDate = OffsetDateTime.now();
+
+ }
+
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/OffsetDateTimeAdapter.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/OffsetDateTimeAdapter.java
new file mode 100644
index 00000000000..d9d0cf13689
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/OffsetDateTimeAdapter.java
@@ -0,0 +1,29 @@
+package io.openems.edge.predictor.lstmmodel.common;
+
+import java.lang.reflect.Type;
+import java.time.OffsetDateTime;
+import java.time.format.DateTimeFormatter;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonPrimitive;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+public class OffsetDateTimeAdapter implements JsonSerializer, JsonDeserializer {
+
+ private static final DateTimeFormatter FORMATTER = DateTimeFormatter.ISO_OFFSET_DATE_TIME;
+
+ @Override
+ public JsonElement serialize(OffsetDateTime src, Type typeOfSrc, JsonSerializationContext context) {
+ return new JsonPrimitive(src.format(FORMATTER));
+ }
+
+ @Override
+ public OffsetDateTime deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context)
+ throws JsonParseException {
+ return OffsetDateTime.parse(json.getAsString(), FORMATTER);
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/ReadAndSaveModels.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/ReadAndSaveModels.java
new file mode 100644
index 00000000000..738e85ca769
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/ReadAndSaveModels.java
@@ -0,0 +1,158 @@
+package io.openems.edge.predictor.lstmmodel.common;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Reader;
+import java.nio.file.Paths;
+import java.time.OffsetDateTime;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.zip.DeflaterOutputStream;
+import java.util.zip.InflaterInputStream;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+import io.openems.common.OpenemsConstants;
+import io.openems.edge.predictor.lstmmodel.validator.ValidationSeasonalityModel;
+import io.openems.edge.predictor.lstmmodel.validator.ValidationTrendModel;
+
+public class ReadAndSaveModels {
+
+ private static final String MODEL_DIRECTORY = Paths.get(OpenemsConstants.getOpenemsDataDir())//
+ .toFile()//
+ .getAbsolutePath();
+
+ private static final String MODEL_FOLDER = File.separator + "models" + File.separator;
+
+ /**
+ * Saves the {@link HyperParameters} object to a file in JSON format. This
+ * method serializes the provided {@link HyperParameters} object into JSON
+ * format and saves it to a file with the specified name in the "models"
+ * directory. The serialization process utilizes a custom Gson instance
+ * configured to handle the serialization of OffsetDateTime objects. The file is
+ * saved in the directory specified by the OpenEMS data directory.
+ *
+ * @param hyperParameters The {@link HyperParameters} object to be saved.
+ */
+ public static void save(HyperParameters hyperParameters) {
+ String modelName = hyperParameters.getModelName();
+ String filePath = Paths.get(MODEL_DIRECTORY, MODEL_FOLDER, modelName)//
+ .toString();
+
+ Gson gson = new GsonBuilder()//
+ .registerTypeAdapter(OffsetDateTime.class, new OffsetDateTimeAdapter())//
+ .create();
+
+ try {
+ var compressedData = compress(hyperParameters);
+ var compressedDataString = Base64.getEncoder().encodeToString(compressedData);
+ var json = gson.toJson(compressedDataString);
+
+ try (FileWriter writer = new FileWriter(filePath)) {
+ writer.write(json);
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * Reads and de-serializes a {@link HyperParameters} object from a JSON file.
+ * This method reads a HyperParameters object from the specified JSON file,
+ * de-serializing it into a {@link HyperParameters} instance. The
+ * de-serialization process utilizes a custom Gson instance configured to handle
+ * the de-serialization of {@link OffsetDateTime} objects. The file is expected
+ * to be located in the "models" directory within the OpenEMS data directory.
+ *
+ * @param fileName The name of the JSON file to read the HyperParameters from.
+ * @return The {@link HyperParameters} object read from the file.
+ * @throws FileNotFoundException If the specified file is not found.
+ * @throws IOException If an I/O error occurs while reading the file.
+ */
+ public static HyperParameters read(String fileName) {
+
+ String filePath = Paths.get(MODEL_DIRECTORY, MODEL_FOLDER, fileName)//
+ .toString();
+
+ try (Reader reader = new FileReader(filePath)) {
+ Gson gson = new GsonBuilder()//
+ .registerTypeAdapter(OffsetDateTime.class, new OffsetDateTimeAdapter())//
+ .create();
+ var json = gson.fromJson(reader, String.class);
+ var deserializedData = Base64.getDecoder().decode(json);
+ return decompress(deserializedData);
+ } catch (IOException e) {
+ var hyperParameters = new HyperParameters();
+ hyperParameters.setModelName(fileName);
+ return hyperParameters;
+ }
+ }
+
+ /**
+ * Compress the data.
+ *
+ * @param hyp the Hyper parameter object
+ * @return compressend byte array
+ */
+ public static byte[] compress(HyperParameters hyp) {
+ try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ DeflaterOutputStream dos = new DeflaterOutputStream(baos);
+ ObjectOutputStream oos = new ObjectOutputStream(dos)) {
+
+ oos.writeObject(hyp);
+ dos.finish();
+ return baos.toByteArray();
+
+ } catch (IOException e) {
+ e.printStackTrace();
+ return null;
+ }
+ }
+
+ /**
+ * DeCompress the data.
+ *
+ * @param value the value array to decompress
+ * @return Hyper parameter
+ */
+ public static HyperParameters decompress(byte[] value) {
+ HyperParameters hyperParameters = null;
+ try (ByteArrayInputStream bais = new ByteArrayInputStream(value);
+ InflaterInputStream iis = new InflaterInputStream(bais);
+ ObjectInputStream ois = new ObjectInputStream(iis)) {
+ hyperParameters = (HyperParameters) ois.readObject();
+ } catch (IOException | ClassNotFoundException e) {
+ e.printStackTrace();
+ }
+ return hyperParameters;
+ }
+
+ /**
+ * Adapt it.
+ *
+ * @param hyperParameters the Hyperparameter
+ * @param data the data
+ * @param dates the dates
+ */
+ public static void adapt(HyperParameters hyperParameters, ArrayList data, ArrayList dates) {
+ if (hyperParameters.getCount() == 0) {
+ return;
+ }
+
+ var valSeas = new ValidationSeasonalityModel();
+ var valTrend = new ValidationTrendModel();
+
+ hyperParameters.resetModelErrorValue();
+
+ valSeas.validateSeasonality(data, dates, hyperParameters.getAllModelSeasonality(), hyperParameters);
+ valTrend.validateTrend(data, dates, hyperParameters.getAllModelsTrend(), hyperParameters);
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/ReadCsv.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/ReadCsv.java
new file mode 100644
index 00000000000..43097788fd6
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/common/ReadCsv.java
@@ -0,0 +1,75 @@
+package io.openems.edge.predictor.lstmmodel.common;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.time.OffsetDateTime;
+import java.util.ArrayList;
+
+import io.openems.common.OpenemsConstants;
+
+public class ReadCsv {
+
+ private static final String MODEL_DIRECTORY = Paths.get(OpenemsConstants.getOpenemsDataDir())//
+ .toFile().getAbsolutePath();
+ private static final String MODEL_FOLDER = File.separator + "models" + File.separator;
+
+ private ArrayList data = new ArrayList();
+ private ArrayList dates = new ArrayList();
+
+ public ReadCsv(String path) {
+ this.getDataFromCsv(path);
+ }
+
+ /**
+ * Reads data from a CSV file and populates class fields with the data. This
+ * method reads data from a CSV file specified by the provided file name. Each
+ * line in the CSV file is expected to contain timestamped data points, where
+ * the first column represents timestamps in the ISO-8601 format and subsequent
+ * columns represent numeric data. The data is parsed, and the timestamps and
+ * numeric values are stored in class fields for further processing.
+ *
+ * @param fileName The name of the CSV file to read data from.
+ * @throws IOException if there are issues reading the file.
+ */
+ public void getDataFromCsv(String fileName) {
+
+ try {
+ var path = Paths.get(MODEL_DIRECTORY, MODEL_FOLDER, fileName)//
+ .toString();
+
+ var reader = new BufferedReader(new FileReader(path));
+ var line = reader.readLine();
+
+ while (line != null) {
+ var parts = line.split(",");
+ var date = OffsetDateTime.parse(parts[0]);
+ var temp2 = 0.0;
+
+ for (int i = 1; i < parts.length; i++) {
+ if (parts[i].equals("") || parts[i].equals("nan")) {
+ temp2 = Double.NaN;
+ } else {
+ temp2 = (Double.parseDouble(parts[i]));
+ }
+ }
+ this.dates.add(date);
+ this.data.add(temp2);
+ line = reader.readLine();
+ }
+ reader.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ public ArrayList getData() {
+ return this.data;
+ }
+
+ public ArrayList getDates() {
+ return this.dates;
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/interpolation/CubicalInterpolation.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/interpolation/CubicalInterpolation.java
new file mode 100644
index 00000000000..41482c01e2e
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/interpolation/CubicalInterpolation.java
@@ -0,0 +1,119 @@
+package io.openems.edge.predictor.lstmmodel.interpolation;
+
+import java.util.ArrayList;
+import java.util.stream.IntStream;
+
+import org.apache.commons.math3.analysis.interpolation.SplineInterpolator;
+import org.apache.commons.math3.analysis.polynomials.PolynomialSplineFunction;
+
+public class CubicalInterpolation extends SplineInterpolator {
+
+ private ArrayList data;
+
+ public CubicalInterpolation(ArrayList data) {
+ this.data = data;
+ }
+
+ public CubicalInterpolation() {
+ }
+
+ /**
+ * Compute Cubical interpolation.
+ *
+ * @return interpolated results
+ */
+ public ArrayList compute() {
+ var interpolation = new ArrayList>();
+ var function = this.getFunctionForAllInterval(this.data);
+ var differences = this.firstOrderDiff(function);
+
+ for (int i = 0; i < differences.length; i++) {
+ if (differences[i] != 1) {
+ int requiredPoints = (int) (differences[i] - 1);
+ interpolation.add(this.calculate(function.getPolynomials()[i].getCoefficients(), requiredPoints));
+ }
+ }
+ this.generateCombineInstruction(interpolation, differences);
+ return this.data;
+ }
+
+ private PolynomialSplineFunction getFunctionForAllInterval(ArrayList data) {
+ var nonNaNCount = data.stream().filter(d -> !Double.isNaN(d)).count();
+
+ var dataNew = new double[(int) nonNaNCount];
+ var xVal = new double[(int) nonNaNCount];
+
+ int[] index = { 0 };
+ IntStream.range(0, data.size())//
+ .filter(i -> !Double.isNaN(data.get(i)))//
+ .forEach(i -> {
+ dataNew[index[0]] = data.get(i);
+ xVal[index[0]] = i + 1;
+ index[0]++;
+ });
+
+ return interpolate(xVal, dataNew);
+ }
+
+ private double[] firstOrderDiff(PolynomialSplineFunction function) {
+ double[] knots = function.getKnots();
+ return IntStream.range(0, knots.length - 1)//
+ .mapToDouble(i -> knots[i + 1] - knots[i])//
+ .toArray();
+ }
+
+ private ArrayList calculate(double[] weight, int requiredPoints) {
+
+ ArrayList result = new ArrayList<>();
+ for (int j = 0; j < requiredPoints; j++) {
+ double sum = 0;
+ for (int i = 0; i < weight.length; i++) {
+ sum += weight[i] * Math.pow(j + 1, i);
+ }
+ result.add(sum);
+ }
+ return result;
+ }
+
+ private void generateCombineInstruction(ArrayList> interPolatedValue, double[] firstOrderDiff) {
+
+ int count = 0;
+ int startingPoint = 0;
+ int addedData = 0;
+
+ for (int i = 0; i < firstOrderDiff.length; i++) {
+
+ if (firstOrderDiff[i] != 1) {
+ startingPoint = i + 1 + addedData;
+ this.combineToData(startingPoint, (int) firstOrderDiff[i] - 1, interPolatedValue.get(count));
+ addedData = (int) (firstOrderDiff[i] - 1 + addedData);
+ count = count + 1;
+ }
+ }
+ }
+
+ private void combineToData(int startingPoint, int totalpointsRequired, ArrayList dataToAdd) {
+ for (int i = 0; i < totalpointsRequired; i++) {
+ this.data.set(i + startingPoint, dataToAdd.get(i));
+ }
+ }
+
+ /**
+ * Can interpolate ?.
+ *
+ * @return boolean yes or no.
+ */
+ public boolean canInterpolate() {
+ var nonNaNCount = this.data.stream().filter(d -> d != null && !Double.isNaN(d)).count();
+ return this.data.size() > 4 && nonNaNCount > 2;
+ }
+
+ public void setData(ArrayList val) {
+ this.data = val;
+ }
+
+ public ArrayList getInterPolatedData() {
+ return this.data;
+ }
+
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/interpolation/InterpolationManager.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/interpolation/InterpolationManager.java
new file mode 100644
index 00000000000..5981aca4141
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/interpolation/InterpolationManager.java
@@ -0,0 +1,149 @@
+package io.openems.edge.predictor.lstmmodel.interpolation;
+
+import java.time.OffsetDateTime;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.OptionalDouble;
+import java.util.stream.Collectors;
+
+import io.openems.edge.predictor.lstmmodel.common.HyperParameters;
+import io.openems.edge.predictor.lstmmodel.utilities.UtilityConversion;
+
+public class InterpolationManager {
+
+ private ArrayList interpolated = new ArrayList();
+ private ArrayList newDates = new ArrayList();
+
+ public ArrayList getInterpolatedData() {
+ return this.interpolated;
+ }
+
+ public ArrayList getNewDates() {
+ return this.newDates;
+ }
+
+ public InterpolationManager(double[] data, HyperParameters hyperParameters) {
+ var dataList = UtilityConversion.to1DArrayList(data);
+ this.makeInterpolation(dataList);
+ }
+
+ public InterpolationManager(ArrayList data, HyperParameters hyperParameters) {
+ this.makeInterpolation(data);
+ }
+
+ private void makeInterpolation(ArrayList data) {
+ ArrayList dataDouble = replaceNullWithNaN(data);
+ double mean = calculateMean(dataDouble);
+
+ // TODO why 96
+ int groupSize = 96;
+
+ List> groupedData = group(dataDouble, groupSize);
+
+ CubicalInterpolation inter = new CubicalInterpolation();
+
+ List> interpolatedGroupedData = groupedData.stream()//
+ .map(currentGroup -> {
+ if (this.interpolationDecision(currentGroup)) {
+ this.handleFirstAndLastDataPoint(currentGroup, mean);
+ inter.setData(currentGroup);
+ return inter.canInterpolate() ? inter.compute() : LinearInterpolation.interpolate(currentGroup);
+ } else {
+ return currentGroup;
+ }
+ }).collect(Collectors.toList());
+
+ this.interpolated = unGroup(interpolatedGroupedData);
+
+ }
+
+ private void handleFirstAndLastDataPoint(ArrayList currentGroup, double mean) {
+ int firstIndex = 0;
+ int lastIndex = currentGroup.size() - 1;
+
+ if (Double.isNaN(currentGroup.get(firstIndex))) {
+ currentGroup.set(firstIndex, mean);
+ }
+ if (Double.isNaN(currentGroup.get(lastIndex))) {
+ currentGroup.set(lastIndex, mean);
+ }
+ }
+
+ /**
+ * Checks whether interpolation is needed based on the presence of NaN values in
+ * the provided list.
+ *
+ * @param data The list of Double values to be checked.
+ * @return true if interpolation is needed (contains at least one NaN value),
+ * false otherwise.
+ */
+ private boolean interpolationDecision(ArrayList data) {
+ return data.stream().anyMatch(value -> Double.isNaN(value));
+ }
+
+ /**
+ * Replaces null values with Double.NaN in the given ArrayList.
+ *
+ * @param data The ArrayList to be processed.
+ * @return A new ArrayList with null values replaced by Double.NaN.
+ */
+ public static ArrayList replaceNullWithNaN(ArrayList data) {
+ return data.stream()//
+ .map(value -> (value == null) ? Double.NaN : value)//
+ .collect(Collectors.toCollection(ArrayList::new));
+ }
+
+ /**
+ * Calculates the mean (average) of a list of numeric values, excluding NaN
+ * values.
+ *
+ * @param data The list of numeric values from which to calculate the mean.
+ * @return The mean of the non-NaN numeric values in the input list.
+ */
+ public static double calculateMean(ArrayList data) {
+ if (data.isEmpty()) {
+ return Double.NaN;
+ }
+
+ OptionalDouble meanOptional = data.stream()//
+ .filter(value -> !Double.isNaN(value))//
+ .mapToDouble(Double::doubleValue)//
+ .average();
+
+ return meanOptional.orElse(Double.NaN);
+ }
+
+ /**
+ * Ungroups a list of sublists into a single list.
+ *
+ * @param data The list of sublists to be ungrouped.
+ * @return A single list containing all elements from the sublists.
+ */
+ public static ArrayList unGroup(List> data) {
+ return data.stream()//
+ .flatMap(List::stream)//
+ .collect(Collectors.toCollection(ArrayList::new));
+ }
+
+ /**
+ * Groups a list of data into sublists of a specified size. This method takes a
+ * list of data and groups it into sublists of a specified size. Each sublist
+ * will contain up to {@code groupSize} elements, except for the last sublist,
+ * which may contain fewer elements if the total number of elements is not a
+ * multiple of {@code groupSize}.
+ *
+ * @param data The list of data to be grouped.
+ * @param groupSize The maximum number of elements in each sublist.
+ * @return A list of sublists, each containing up to {@code groupSize} elements.
+ */
+ public static ArrayList> group(ArrayList data, int groupSize) {
+ ArrayList> groupedData = new ArrayList<>();
+
+ for (int i = 0; i < data.size(); i += groupSize) {
+ ArrayList sublist = new ArrayList<>(data.subList(i, Math.min(i + groupSize, data.size())));
+ groupedData.add(sublist);
+ }
+ return groupedData;
+ }
+
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/interpolation/LinearInterpolation.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/interpolation/LinearInterpolation.java
new file mode 100644
index 00000000000..bd335074a52
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/interpolation/LinearInterpolation.java
@@ -0,0 +1,102 @@
+package io.openems.edge.predictor.lstmmodel.interpolation;
+
+import java.util.ArrayList;
+
+public class LinearInterpolation {
+
+ /**
+ * Interpolates NaN values in the provided data set.
+ *
+ * @param data The input data set with NaN values.
+ * @return The data set with NaN values replaced by interpolated values.
+ */
+ public static ArrayList interpolate(ArrayList data) {
+
+ ArrayList> coordinate = determineInterpolatingPoints(data);
+ for (int i = 0; i < coordinate.size(); i++) {
+ var xVal1 = coordinate.get(i).get(0);
+ var xVal2 = coordinate.get(i).get(1);
+
+ var ineterPolationResult = computeInterpolation(xVal1, xVal2, data.get(xVal1), data.get((int) xVal2));
+ data = combine(data, ineterPolationResult, xVal1, xVal2);
+
+ }
+ return data;
+ }
+
+ /**
+ * Determines the indices where NaN values are sandwiched between non-NaN values
+ * in a given data set.
+ *
+ * @param data The input data set.
+ * @return A list of coordinate pairs representing the indices where NaN values
+ * are sandwiched.
+ */
+ public static ArrayList> determineInterpolatingPoints(ArrayList data) {
+
+ ArrayList> coordinates = new ArrayList<>();
+
+ var inNaNSequence = false;
+ var xVal1 = -1;
+
+ for (int i = 0; i < data.size(); i++) {
+ var currentValue = data.get(i);
+
+ if (Double.isNaN(currentValue)) {
+ if (!inNaNSequence) {
+ xVal1 = i - 1;
+ inNaNSequence = true;
+ }
+ } else {
+ if (inNaNSequence) {
+ var xVal2 = i;
+ ArrayList temp = new ArrayList<>();
+ temp.add(xVal1);
+ temp.add(xVal2);
+ coordinates.add(temp);
+ inNaNSequence = false;
+ }
+ }
+ }
+ return coordinates;
+ }
+
+ /**
+ * Computes linear interpolation between two values.
+ *
+ * @param xValue1 The x-value corresponding to the first data point.
+ * @param xValue2 The x-value corresponding to the second data point.
+ * @param yValue1 The y-value corresponding to the first data point.
+ * @param yValue2 The y-value corresponding to the second data point.
+ * @return A list of interpolated y-values between xValue1 and xValue2.
+ */
+ public static ArrayList computeInterpolation(int xValue1, int xValue2, double yValue1, double yValue2) {
+ var interPolatedResults = new ArrayList();
+ var xVal1 = (double) xValue1;
+ var xVal2 = (double) xValue2;
+
+ for (int i = 1; i < (xValue2 - xValue1); i++) {
+ interPolatedResults
+ .add((yValue1 * ((xVal2 - (i + xVal1)) / (xVal2 - xVal1)) + yValue2 * ((i) / (xVal2 - xVal1))));
+ }
+ return interPolatedResults;
+ }
+
+ /**
+ * Combines the original data set with the interpolation result.
+ *
+ * @param orginalData The original data set.
+ * @param interpolatedResult The result of linear interpolation.
+ * @param xValue1 The first index used for interpolation.
+ * @param xValue2 The second index used for interpolation.
+ * @return The combined data set with interpolated values.
+ */
+ public static ArrayList combine(ArrayList orginalData, ArrayList interpolatedResult,
+ int xValue1, int xValue2) {
+
+ for (int i = 0; i < (interpolatedResult.size()); i++) {
+ orginalData.set((i + xValue1 + 1), interpolatedResult.get(i));
+ }
+ return orginalData;
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/jsonrpc/GetPredictionRequest.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/jsonrpc/GetPredictionRequest.java
new file mode 100644
index 00000000000..f501169bf0a
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/jsonrpc/GetPredictionRequest.java
@@ -0,0 +1,50 @@
+package io.openems.edge.predictor.lstmmodel.jsonrpc;
+
+import com.google.gson.JsonObject;
+
+import io.openems.common.exceptions.OpenemsException;
+import io.openems.common.jsonrpc.base.JsonrpcRequest;
+
+/*
+ * url = http://localhost:8084/jsonrpc
+ * {
+ * "method": "componentJsonApi",
+ * "params": {
+ * "componentId": "predictor0",
+ * "payload": {
+ * "method": "getLstmPrediction",
+ * "params": {
+ * "id": "edge0"
+ * }
+ * }
+ * }
+*}
+ */
+public class GetPredictionRequest extends JsonrpcRequest {
+
+ public static final String METHOD = "getLstmPrediction";
+
+ /**
+ * get predictions.
+ *
+ * @param r the request
+ * @return new prediction
+ * @throws on error
+ */
+ public static GetPredictionRequest from(JsonrpcRequest r) throws OpenemsException {
+ return new GetPredictionRequest(r);
+ }
+
+ public GetPredictionRequest() {
+ super(METHOD);
+ }
+
+ private GetPredictionRequest(JsonrpcRequest request) {
+ super(request, METHOD);
+ }
+
+ @Override
+ public JsonObject getParams() {
+ return new JsonObject();
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/jsonrpc/GetPredictionResponse.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/jsonrpc/GetPredictionResponse.java
new file mode 100644
index 00000000000..2da8233535c
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/jsonrpc/GetPredictionResponse.java
@@ -0,0 +1,55 @@
+package io.openems.edge.predictor.lstmmodel.jsonrpc;
+
+import java.time.ZonedDateTime;
+import java.util.SortedMap;
+import java.util.UUID;
+
+import com.google.gson.JsonArray;
+import com.google.gson.JsonNull;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonPrimitive;
+
+import io.openems.common.jsonrpc.base.JsonrpcResponseSuccess;
+import io.openems.common.utils.JsonUtils;
+import io.openems.common.utils.JsonUtils.JsonObjectBuilder;
+
+public class GetPredictionResponse extends JsonrpcResponseSuccess {
+
+ private JsonArray prediction;
+ private SortedMap predictionResult;
+
+ public GetPredictionResponse(JsonArray prediction) {
+ this(UUID.randomUUID(), prediction);
+ }
+
+ public GetPredictionResponse(UUID id, JsonArray prediction) {
+ super(id);
+ this.prediction = prediction != null ? prediction : new JsonArray();
+ this.predictionResult = null;
+ }
+
+ public GetPredictionResponse(UUID id, SortedMap predictionResult) {
+ super(id);
+ this.predictionResult = predictionResult;
+ this.prediction = new JsonArray();
+ if (predictionResult != null) {
+ predictionResult.values().forEach(value -> {
+ this.prediction.add(value != null ? new JsonPrimitive(value) : JsonNull.INSTANCE);
+ });
+ }
+ }
+
+ @Override
+ public JsonObject getResult() {
+ JsonObjectBuilder result = JsonUtils.buildJsonObject() //
+ .add("prediction", this.prediction) //
+ .add("size", new JsonPrimitive(this.prediction.size()));
+
+ if (this.predictionResult != null) {
+ result.add("TimeValueMap", new JsonPrimitive(this.predictionResult.toString()));
+ } else {
+ result.add("timeValueMap", JsonNull.INSTANCE);
+ }
+ return result.build();
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/jsonrpc/PredictionRequestHandler.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/jsonrpc/PredictionRequestHandler.java
new file mode 100644
index 00000000000..b94f838a7d7
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/jsonrpc/PredictionRequestHandler.java
@@ -0,0 +1,24 @@
+package io.openems.edge.predictor.lstmmodel.jsonrpc;
+
+import java.util.UUID;
+
+import io.openems.common.types.ChannelAddress;
+import io.openems.edge.predictor.api.manager.PredictorManager;
+
+public class PredictionRequestHandler {
+
+ /**
+ * get predictionsReasponse.
+ *
+ * @param requestId the id
+ * @param predictionManager the manager
+ * @param channelAddress the {@link ChannelAddress}
+ * @return the new prediction
+ */
+ public static GetPredictionResponse handlerGetPredictionRequest(UUID requestId, PredictorManager predictionManager,
+ ChannelAddress channelAddress) {
+
+ var sortedMap = predictionManager.getPrediction(channelAddress).valuePerQuarter;
+ return new GetPredictionResponse(requestId, sortedMap);
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/performance/PerformanceMatrix.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/performance/PerformanceMatrix.java
new file mode 100644
index 00000000000..b721a8a14fe
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/performance/PerformanceMatrix.java
@@ -0,0 +1,273 @@
+package io.openems.edge.predictor.lstmmodel.performance;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.math3.distribution.TDistribution;
+import org.apache.commons.math3.stat.StatUtils;
+
+import io.openems.edge.predictor.lstmmodel.common.DataStatistics;
+
+public class PerformanceMatrix {
+ private ArrayList target = new ArrayList();
+ private ArrayList predicted = new ArrayList();
+ private double allowedError = 0.0;
+
+ public PerformanceMatrix(ArrayList tar, ArrayList predict, double allowedErr) {
+ this.target = tar;
+ this.predicted = predict;
+ this.allowedError = allowedErr;
+ }
+
+ /**
+ * Calculates the mean absolute error between the target and predicted values.
+ * Mean absolute error (MAE) is a metric that measures the average absolute
+ * difference between corresponding elements of two lists.
+ *
+ * @param target The list of target values.
+ * @param predicted The list of predicted values.
+ * @return The mean absolute error between the target and predicted values.
+ * @throws IllegalArgumentException If the input lists have different sizes.
+ */
+ public static double meanAbsoluteError(ArrayList target, ArrayList predicted) {
+
+ if (predicted.size() != target.size()) {
+ throw new IllegalArgumentException("Input lists must have the same size");
+ }
+
+ double sumError = 0.0;
+ for (int i = 0; i < predicted.size(); i++) {
+ double error = Math.abs(predicted.get(i) - target.get(i));
+ sumError += error;
+ }
+
+ return sumError / predicted.size();
+ }
+
+ /**
+ * Calculates the Root Mean Square (RMS) error between the target and predicted
+ * values. RMS error is a measure of the average magnitude of the differences
+ * between corresponding elements of two lists.
+ *
+ * @param target The list of target values.
+ * @param predicted The list of predicted values.
+ * @return The root mean square error between the target and predicted values.
+ * @throws IllegalArgumentException If the input lists have different sizes.
+ */
+ public static double rmsError(ArrayList target, ArrayList predicted) {
+ if (predicted.size() != target.size()) {
+ throw new IllegalArgumentException("Input lists must have the same size");
+ }
+
+ double sumSquaredError = 0.0;
+ for (int i = 0; i < predicted.size(); i++) {
+ double error = predicted.get(i) - target.get(i);
+ sumSquaredError += error * error;
+ }
+
+ double meanSquaredError = sumSquaredError / predicted.size();
+ return Math.sqrt(meanSquaredError);
+ }
+
+ /**
+ * Calculate the RmsError of two arrays.
+ *
+ * @param target double array of target
+ * @param predicted double array of predicted
+ * @return rms Error
+ */
+ public static double rmsError(double[] target, double[] predicted) {
+ if (predicted.length != target.length) {
+ throw new IllegalArgumentException("Input lists must have the same size");
+ }
+
+ double sumSquaredError = 0.0;
+ for (int i = 0; i < predicted.length; i++) {
+ double error = predicted[i] - target[i];
+ sumSquaredError += error * error;
+ }
+
+ double meanSquaredError = sumSquaredError / predicted.length;
+ return Math.sqrt(meanSquaredError);
+ }
+
+ /**
+ * Calculates the Mean Squared Error (MSE) between the target and predicted
+ * values. MSE is a measure of the average squared differences between
+ * corresponding elements of two lists.
+ *
+ * @param target The list of target values.
+ * @param predicted The list of predicted values.
+ * @return The mean squared error between the target and predicted values.
+ * @throws IllegalArgumentException If the input lists have different sizes.
+ */
+ public static double meanSquaredError(ArrayList target, ArrayList predicted) {
+ if (predicted.size() != target.size()) {
+ throw new IllegalArgumentException("Input lists must have the same size");
+ }
+
+ double sumSquaredError = 0.0;
+ for (int i = 0; i < predicted.size(); i++) {
+ double error = predicted.get(i) - target.get(i);
+ sumSquaredError += error * error;
+ }
+
+ return sumSquaredError / predicted.size();
+ }
+
+ /**
+ * Calculates the accuracy between the target and predicted values within a
+ * specified allowed percentage difference.
+ *
+ * @param target The list of target values.
+ * @param predicted The list of predicted values.
+ * @param allowedPercentage The maximum allowed percentage difference for
+ * accuracy.
+ * @return The accuracy between the target and predicted values.
+ */
+ public static double accuracy(ArrayList target, ArrayList predicted, double allowedPercentage) {
+ double count = 0;
+
+ for (int i = 0; i < predicted.size(); i++) {
+ double diff = Math.abs(predicted.get(i) - target.get(i)) //
+ / Math.max(predicted.get(i), target.get(i));
+ if (diff <= allowedPercentage) {
+ count++;
+ }
+ }
+ return (double) count / predicted.size();
+ }
+
+ /**
+ * Calculate the Accuracy of the predicted compared to target.
+ *
+ * @param target double array of target
+ * @param predicted double array of predicted
+ * @param allowedPercentage allowed percentage error
+ * @return accuracy
+ */
+ public static double accuracy(double[] target, double[] predicted, double allowedPercentage) {
+ double count = 0;
+
+ for (int i = 0; i < predicted.length; i++) {
+ double diff = Math.abs(predicted[i] - target[i]) //
+ / Math.max(predicted[i], target[i]);
+ if (diff <= allowedPercentage) {
+ count++;
+ }
+ }
+ return (double) count / predicted.length;
+ }
+
+ /**
+ * Calculates the Mean Absolute Percentage Error (MAPE) between the target and
+ * predicted values. MAPE is a measure of the average percentage difference
+ * between corresponding elements of two lists.
+ *
+ * @param target The list of target values.
+ * @param predicted The list of predicted values.
+ * @return The mean absolute percentage error between the target and predicted
+ * values.
+ * @throws IllegalArgumentException If the input lists have different sizes.
+ */
+ public static double meanAbslutePercentage(ArrayList target, ArrayList predicted) {
+ if (predicted.size() != target.size()) {
+ throw new IllegalArgumentException("Input lists must have the same size");
+ }
+
+ double sumPercentageError = 0.0;
+ for (int i = 0; i < predicted.size(); i++) {
+ double absoluteError = Math.abs(predicted.get(i) - target.get(i));
+ double percentageError = absoluteError / target.get(i) * 100.0;
+ sumPercentageError += percentageError;
+ }
+
+ return sumPercentageError / predicted.size();
+ }
+
+ /**
+ * Calculates the two-tailed p-value using the t-statistic for the differences
+ * between predicted and actual values.
+ *
+ * @param target The list of target values.
+ * @param predicted The list of predicted values.
+ * @return The two-tailed p-value for the differences between predicted and
+ * actual values.
+ * @throws IllegalArgumentException If the input lists have different sizes.
+ */
+ public static double pvalue(ArrayList target, ArrayList predicted) {
+ if (predicted.size() != target.size()) {
+ throw new IllegalArgumentException("Input lists must have the same size.");
+ }
+
+ List differences = new ArrayList<>();
+ for (int i = 0; i < predicted.size(); i++) {
+ differences.add(predicted.get(i) - target.get(i));
+ }
+
+ double[] differencesArray = differences.stream()//
+ .mapToDouble(Double::doubleValue).toArray();
+ double mean = StatUtils.mean(differencesArray);
+ double stdDev = Math.sqrt(StatUtils.variance(differencesArray));
+
+ // Calculate the t-statistic
+ double tStat = mean / (stdDev / Math.sqrt(predicted.size()));
+
+ // Degrees of freedom
+ int degreesOfFreedom = predicted.size() - 1;
+
+ // Create a T-distribution with the appropriate degrees of freedom
+ TDistribution tDistribution = new TDistribution(degreesOfFreedom);
+
+ // Calculate the two-tailed p-value
+ double pValue = 2 * (1.0 - tDistribution.cumulativeProbability(Math.abs(tStat)));
+
+ return pValue;
+ }
+
+ /**
+ * Generates and prints a performance report containing various statistical
+ * metrics and error measures between the actual and predicted data. The report
+ * includes average, standard deviation, mean absolute error, RMS error, mean
+ * squared error, mean absolute percentage error, and accuracy with a specified
+ * error margin. Note: This method assumes that the necessary statistical
+ * methods (e.g., meanAbsoluteError, rmsError, meanSquaredError,
+ * meanAbslutePercentage, accuracy) are implemented in the same class. The
+ * p-value calculation is not included in the report by default.
+ */
+ public void statusReport() {
+ System.out.println("\n.................. Performance Report .............................");
+
+ // Calculate and display statistics for actual data
+ double averageActual = DataStatistics.getMean(this.target);
+ double stdDevActual = DataStatistics.getStandardDeviation(this.target);
+ System.out.println("Average of actual data = " + averageActual);
+ System.out.println("Standard deviation of actual data = " + stdDevActual);
+
+ // Calculate and display statistics for predicted data
+ double averagePredicted = DataStatistics.getMean(this.predicted);
+ double stdDevPredicted = DataStatistics.getStandardDeviation(this.predicted);
+ System.out.println("Average of prediction data = " + averagePredicted);
+ System.out.println("Standard deviation of predicted data = " + stdDevPredicted);
+
+ // Display various error metrics
+ System.out.println("Mean absolute error = " + meanAbsoluteError(this.target, this.predicted)
+ + " (average absolute difference between predicted and actual values)");
+
+ System.out.println("RMS error = " + rmsError(this.target, this.predicted) + " (square root of the MSE)");
+
+ System.out.println("Mean squared error = " + meanSquaredError(this.target, this.predicted)
+ + " (average of the squared differences between predicted and actual values)");
+
+ System.out.println("Mean absolute percentage error = " + meanAbslutePercentage(this.target, this.predicted)
+ + " (measures the average percentage difference between predicted and actual values)");
+
+ // Display accuracy with the specified error margin
+ double accuracyPercentage = accuracy(this.target, this.predicted, this.allowedError) * 100;
+
+ System.out.println("Accuracy for " + this.allowedError * 100 + "% error margin = " + accuracyPercentage + "%");
+
+ System.out.println("");
+ }
+
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/DataModification.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/DataModification.java
new file mode 100644
index 00000000000..93df1bc94b4
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/DataModification.java
@@ -0,0 +1,730 @@
+package io.openems.edge.predictor.lstmmodel.preprocessing;
+
+import static io.openems.edge.predictor.lstmmodel.common.DataStatistics.getMean;
+import static io.openems.edge.predictor.lstmmodel.common.DataStatistics.getStandardDeviation;
+
+import java.time.OffsetDateTime;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import io.openems.edge.predictor.lstmmodel.common.HyperParameters;
+
+public class DataModification {
+
+ private static final double MIN_SCALED = 0.2;
+ private static final double MAX_SCALED = 0.8;
+
+ /**
+ * Scales a list of numeric data values to a specified range. This method scales
+ * a list of numeric data values to a specified range defined by the minimum
+ * (min) and maximum (max) values. The scaled data will be within the range
+ * defined by the minimumScaled (minScaled) and maximumScaled (maxScaled)
+ * values.
+ *
+ * @param data The list of numeric data values to be scaled.
+ * @param min The original minimum value in the data.
+ * @param max The original maximum value in the data.
+ * @return A new list containing the scaled data within the specified range.
+ */
+ public static ArrayList scale(ArrayList data, double min, double max) {
+ return data.stream()//
+ .map(value -> MIN_SCALED + ((value - min) / (max - min)) * (MAX_SCALED - MIN_SCALED))
+ .collect(Collectors.toCollection(ArrayList::new));
+ }
+
+ /**
+ * * Scales a list of numeric data values to a specified range. This method
+ * scales a list of numeric data values to a specified range defined by the
+ * minimum (min) and maximum (max) values. The scaled data will be within the
+ * range defined by the minimumScaled (minScaled) and maximumScaled (maxScaled)
+ * values.
+ *
+ * @param data The array of numeric data values to be scaled.
+ * @param min The original minimum value in the data.
+ * @param max The original maximum value in the data.
+ * @return A new list containing the scaled data within the specified range.
+ */
+ public static double[] scale(double[] data, double min, double max) {
+ return Arrays.stream(data)//
+ .map(value -> MIN_SCALED + ((value - min) / (max - min)) * (MAX_SCALED - MIN_SCALED))//
+ .toArray();
+ }
+
+ /**
+ * Re-scales a single data point from the scaled range to the original range.
+ * This method re-scales a single data point from the scaled range (defined by
+ * 'minScaled' and 'maxScaled') back to the original range, which is specified
+ * by 'minOriginal' and 'maxOriginal'. It performs the reverse scaling operation
+ * for a single data value.
+ *
+ * @param scaledData The data point to be rescaled from the scaled range to the
+ * original range.
+ * @param minOriginal The minimum value of the training dataset (original data
+ * range).
+ * @param maxOriginal The maximum value of the training dataset (original data
+ * range).
+ * @return The rescaled data point in the original range.
+ */
+ public static double scaleBack(double scaledData, double minOriginal, double maxOriginal) {
+ return calculateScale(scaledData, MIN_SCALED, MAX_SCALED, minOriginal, maxOriginal);
+ }
+
+ /**
+ * Scales back a list of double values from a scaled range to the original
+ * range. This method takes a list of scaled values and scales them back to
+ * their original range based on the specified minimum and maximum values of the
+ * original range.
+ *
+ * @param data The list of double values to be scaled back.
+ * @param minOriginal The minimum value of the original range.
+ * @param maxOriginal The maximum value of the original range.
+ * @return A new ArrayList containing the scaled back values.
+ */
+ public static ArrayList scaleBack(ArrayList data, double minOriginal, double maxOriginal) {
+ return data.stream()//
+ .map(value -> calculateScale(value, MIN_SCALED, MAX_SCALED, minOriginal, maxOriginal))//
+ .collect(Collectors.toCollection(ArrayList::new));
+ }
+
+ /**
+ * * Scales back a list of double values from a scaled range to the original
+ * range. This method takes a list of scaled values and scales them back to
+ * their original range based on the specified minimum and maximum values of the
+ * original range.
+ *
+ * @param data The list of double values to be scaled back.
+ * @param minOriginal The minimum value of the original range.
+ * @param maxOriginal The maximum value of the original range.
+ * @return A new ArrayList containing the scaled back values.
+ */
+ public static double[] scaleBack(double[] data, double minOriginal, double maxOriginal) {
+ return Arrays.stream(data)//
+ .map(value -> calculateScale(value, MIN_SCALED, MAX_SCALED, minOriginal, maxOriginal))//
+ .toArray();
+ }
+
+ /**
+ * Scales a value from a scaled range back to the original range.
+ *
+ * @param valScaled The value in the scaled range to be converted back to the
+ * original range.
+ * @param minScaled The minimum value of the scaled range.
+ * @param maxScaled The maximum value of the scaled range.
+ * @param minOriginal The minimum value of the original range.
+ * @param maxOriginal The maximum value of the original range.
+ * @return The value converted back to the original range.
+ */
+ private static double calculateScale(double valScaled, double minScaled, double maxScaled, double minOriginal,
+ double maxOriginal) {
+ return ((valScaled - minScaled) * (maxOriginal - minOriginal) / (maxScaled - minScaled)//
+ ) + minOriginal;
+ }
+
+ /**
+ * Normalize a 2D array of data using standardization (z-score normalization).
+ * This method normalizes a 2D array of data by applying standardization
+ * (z-score normalization) to each row independently. The result is a new 2D
+ * array of normalized data.
+ *
+ * @param data The 2D array of data to be normalized.
+ * @param hyperParameters instance of class HyperParameters
+ * @return A new 2D array containing the standardized (normalized) data.
+ */
+ public static double[][] normalizeData(double[][] data, HyperParameters hyperParameters) {
+ double[][] standData;
+ standData = new double[data.length][data[0].length];// Here error
+ for (int i = 0; i < data.length; i++) {
+ standData[i] = standardize(data[i], hyperParameters);
+ }
+ return standData;
+ }
+
+ /**
+ * Normalizes the data based on the given target values, using standardization.
+ * This method calculates the standardization of each data point in the input
+ * data array with respect to the corresponding target value. It utilizes the
+ * mean and standard deviation of the input data array to perform the
+ * standardization.
+ *
+ * @param data The input data array containing the features to be
+ * normalized.
+ * @param target The target values to which the data will be
+ * standardized.
+ * @param hyperParameters The {@link HyperParameters} required for
+ * normalization.
+ * @return A double array containing the normalized data.
+ */
+
+ public static double[] normalizeData(double[][] data, double[] target, HyperParameters hyperParameters) {
+ double[] standData;
+ standData = new double[target.length];
+ for (int i = 0; i < data.length; i++) {
+ standData[i] = standardize(target[i], getMean(data[i]), getStandardDeviation(data[i]), hyperParameters);
+ }
+ return standData;
+ }
+
+ /**
+ * Standardizes a 1D array of data using Z-score normalization. This method
+ * standardizes a 1D array of data by applying Z-score normalization. It
+ * calculates the mean and standard deviation of the input data and then
+ * standardizes each data point.
+ *
+ * @param inputData The 1D array of data to be standardized.
+ * @param hyperParameters instance of {@link HyperParameters}
+ * @return A new 1D array containing the standardized (normalized) data.
+ */
+ public static double[] standardize(double[] inputData, HyperParameters hyperParameters) {
+ double meanCurrent = getMean(inputData);
+
+ double stdDeviationCurrent = getStandardDeviation(inputData);
+ double meanTarget = hyperParameters.getMean();
+ double standerDeviationTarget = hyperParameters.getStanderDeviation();
+
+ double[] standardizedData = new double[inputData.length];
+ for (int i = 0; i < inputData.length; i++) {
+ standardizedData[i] = meanTarget
+ + ((inputData[i] - meanCurrent) * (standerDeviationTarget / stdDeviationCurrent));
+ }
+ return standardizedData;
+ }
+
+ /**
+ * Standardizes a given input data point using mean and standard deviation. This
+ * method standardizes the input data point based on the provided mean and
+ * standard deviation of the current data and the target mean and standard
+ * deviation specified in the {@link HyperParameters}.
+ *
+ * @param inputData The input data point to be standardized.
+ * @param mean The mean of the current data.
+ * @param standerdDev The standard deviation of the current data.
+ * @param hyperParameters The {@link HyperParameters} containing the target mean
+ * and standard deviation.
+ * @return The standardized value of the input data point.
+ */
+ public static double standardize(double inputData, double mean, double standerdDev,
+ HyperParameters hyperParameters) {
+
+ double meanCurrent = mean;
+
+ double stdDeviationCurrent = standerdDev;
+ double meanTarget = hyperParameters.getMean();
+ double standerDeviationTarget = hyperParameters.getStanderDeviation();
+ return meanTarget + ((inputData - meanCurrent) * (standerDeviationTarget / stdDeviationCurrent));
+
+ }
+
+ /**
+ * Reverse standardizes a data point that was previously standardized using
+ * Z-score normalization. This method reverses the standardization process for a
+ * single data point that was previously standardized using Z-score
+ * normalization. It requires the mean and standard deviation of the original
+ * data along with the Z-score value (zvalue) to perform the reverse
+ * standardization.
+ *
+ * @param mean The mean of the original data.
+ * @param standardDeviation The standard deviation of the original data.
+ * @param zvalue The Z-score value of the standardized data point.
+ * @param hyperParameters instance of {@link HyperParameters}
+ * @return The reverse standardized value in the original data's scale.
+ */
+ public static double reverseStandrize(double zvalue, double mean, double standardDeviation,
+ HyperParameters hyperParameters) {
+
+ double reverseStand = 0;
+ double meanTarget = hyperParameters.getMean();
+ double standardDeviationTarget = hyperParameters.getStanderDeviation();
+
+ reverseStand = ((zvalue - meanTarget) * (standardDeviation / standardDeviationTarget) + mean);
+ return reverseStand;
+ }
+
+ /**
+ * Reverse standardizes a list of data points based on given mean, standard
+ * deviation, and {@link HyperParameters}. This method reverse standardizes each
+ * data point in the input list based on the provided mean, standard deviation,
+ * and {@link HyperParameters}. It returns a new Array containing the reverse
+ * standardized values.
+ *
+ * @param data The list of data points to be reverse standardized.
+ * @param mean The list of means corresponding to the data points.
+ * @param standDeviation The list of standard deviations corresponding to the
+ * data points.
+ * @param hyperParameters The {@link HyperParameters} containing the target mean
+ * and standard deviation.
+ * @return A new list containing the reverse standardized values.
+ */
+ public static double[] reverseStandrize(ArrayList data, ArrayList mean,
+ ArrayList standDeviation, HyperParameters hyperParameters) {
+ double[] revNorm = new double[data.size()];
+ for (int i = 0; i < data.size(); i++) {
+ revNorm[i] = (reverseStandrize(data.get(i), mean.get(i), standDeviation.get(i), hyperParameters));
+ }
+ return revNorm;
+ }
+
+ /**
+ * Reverse standardizes a list of data points based on given mean, standard
+ * deviation, and {@link HyperParameters}. This method reverse standardizes each
+ * data point in the input list based on the provided mean, standard deviation,
+ * and {@link HyperParameters}. It returns a new list containing the reverse
+ * standardized values.
+ *
+ * @param data The Array of data points to be reverse standardized.
+ * @param mean The Array of means corresponding to the data points.
+ * @param standDeviation The Array of standard deviations corresponding to the
+ * data points.
+ * @param hyperParameters The {@link HyperParameters} containing the target mean
+ * and standard deviation.
+ * @return A new Array containing the reverse standardized values.
+ */
+ public static double[] reverseStandrize(double[] data, double[] mean, double[] standDeviation,
+ HyperParameters hyperParameters) {
+ double[] revNorm = new double[data.length];
+ for (int i = 0; i < data.length; i++) {
+ revNorm[i] = (reverseStandrize(data[i], mean[i], standDeviation[i], hyperParameters));
+ }
+ return revNorm;
+ }
+
+ /**
+ * Reverse standardizes a list of data points based on given mean, standard
+ * deviation, and {@link HyperParameters}. This method reverse standardizes each
+ * data point in the input list based on the provided mean, standard deviation,
+ * and {@link HyperParameters}. It returns a new Array containing the reverse
+ * standardized values.
+ *
+ * @param data The Array of data points to be reverse standardized.
+ * @param mean The mean corresponding to the data points.
+ * @param standDeviation The standard deviation corresponding to the data
+ * points.
+ * @param hyperParameters The {@link HyperParameters} containing the target mean
+ * and standard deviation.
+ * @return A new Array containing the reverse standardized values.
+ */
+ public static double[] reverseStandrize(ArrayList data, double mean, double standDeviation,
+ HyperParameters hyperParameters) {
+ double[] revNorm = new double[data.size()];
+ for (int i = 0; i < data.size(); i++) {
+ revNorm[i] = (reverseStandrize(data.get(i), mean, standDeviation, hyperParameters));
+ }
+ return revNorm;
+ }
+
+ /**
+ * Reverse standardizes a list of data points based on given mean, standard
+ * deviation, and {@link HyperParameters}. This method reverse standardizes each
+ * data point in the input list based on the provided mean, standard deviation,
+ * and {@link HyperParameters}. It returns a new list containing the reverse
+ * standardized values.
+ *
+ * @param data The list of data points to be reverse standardized.
+ * @param mean The mean corresponding to the data points.
+ * @param standDeviation The standard deviation corresponding to the data
+ * points.
+ * @param hyperParameters The {@link HyperParameters} containing the target mean
+ * and standard deviation.
+ * @return A new list containing the reverse standardized values.
+ */
+ public static double[] reverseStandrize(double[] data, double mean, double standDeviation,
+ HyperParameters hyperParameters) {
+ double[] revNorm = new double[data.length];
+ for (int i = 0; i < data.length; i++) {
+ revNorm[i] = (reverseStandrize(data[i], mean, standDeviation, hyperParameters));
+ }
+ return revNorm;
+ }
+
+ /**
+ * Modifies the given time-series data for long-term prediction by grouping it
+ * based on hours and minutes.
+ *
+ * @param data The {@link ArrayList} of Double values representing the
+ * time-series data.
+ * @param date The {@link ArrayList} of OffsetDateTime objects corresponding to
+ * the timestamps of the data.
+ * @return An {@link ArrayList} of {@link ArrayList} of {@link ArrayList},
+ * representing the modified data grouped by hours and minutes.
+ */
+
+ public static ArrayList>> groupDataByHourAndMinute(ArrayList data,
+ ArrayList date) {
+
+ ArrayList>> dataGroupedByMinute = new ArrayList<>();
+ ArrayList>> dateGroupedByMinute = new ArrayList<>();
+
+ GroupBy groupByHour = new GroupBy(data, date);
+ groupByHour.hour();
+
+ for (int i = 0; i < groupByHour.getGroupedDataByHour().size(); i++) {
+ GroupBy groupByMinute = new GroupBy(groupByHour.getGroupedDataByHour().get(i),
+ groupByHour.getGroupedDateByHour().get(i));
+
+ groupByMinute.minute();
+ dataGroupedByMinute.add(groupByMinute.getGroupedDataByMinute());
+ dateGroupedByMinute.add(groupByMinute.getGroupedDateByMinute());
+ }
+ return dataGroupedByMinute;
+ }
+
+ /**
+ * Modify the data for trend term prediction.
+ *
+ * @param data The ArrayList of Double values data.
+ * @param date The ArrayList of Double values date.
+ * @param hyperParameters The {@link HyperParameters}
+ * @return The ArrayList of modified values
+ */
+ public static ArrayList> modifyFortrendPrediction(ArrayList data,
+ ArrayList date, HyperParameters hyperParameters) {
+
+ ArrayList>> firstModification = groupDataByHourAndMinute(data, date);
+
+ // Flatten the structure of the first modification
+ ArrayList> secondModification = flatten3dto2d(firstModification);
+
+ // Apply windowing to create the third modification
+ ArrayList> thirdModification = applyWindowing(secondModification, hyperParameters);
+
+ return thirdModification;
+ }
+
+ private static ArrayList> flatten3dto2d(//
+ ArrayList>> data) {
+ return data.stream()//
+ .flatMap(twoDList -> twoDList.stream())//
+ .collect(Collectors.toCollection(ArrayList::new));
+ }
+
+ /**
+ * Decreases the dimensionality of a 4D ArrayList to a 3D ArrayList. This method
+ * flattens the input 4D ArrayList to a 3D ArrayList by merging the innermost
+ * ArrayLists into one. It returns the resulting 3D ArrayList.
+ *
+ * @param model The 4D ArrayList to decrease in dimensionality.
+ * @return The resulting 3D ArrayList after decreasing the dimensionality.
+ */
+ public static ArrayList>> flattern4dto3d(
+ ArrayList>>> model) {
+
+ return model.stream()//
+ .flatMap(threeDList -> threeDList.stream())//
+ .collect(Collectors.toCollection(ArrayList::new));
+ }
+
+ private static ArrayList> applyWindowing(ArrayList> data,
+ HyperParameters hyperParameters) {
+ ArrayList> windowedData = new ArrayList<>();
+ int windowSize = hyperParameters.getWindowSizeTrend();
+
+ for (int i = 0; i < data.size(); i++) {
+ ArrayList> toCombine = new ArrayList<>();
+
+ for (int j = 0; j <= windowSize; j++) {
+ int index = (j + i) % data.size();
+ toCombine.add(data.get(index));
+ }
+ windowedData.add(combinedArray(toCombine));
+ }
+ return windowedData;
+ }
+
+ /**
+ * Flatten the array by combining.
+ *
+ * @param values The ArrayList of Double values.
+ * @return reGroupedsecond Teh Flattened ArrayList
+ */
+ public static ArrayList combinedArray(ArrayList> values) {
+ int minSize = values.stream()//
+ .mapToInt(ArrayList::size)//
+ .min()//
+ .orElse(0);
+
+ ArrayList reGroupedsecond = new ArrayList<>();
+
+ for (int i = 0; i < minSize; i++) {
+ for (ArrayList innerList : values) {
+ reGroupedsecond.add(innerList.get(i));
+ }
+ }
+ return reGroupedsecond;
+ }
+
+ /**
+ * Splits a list of Double values into multiple batches and returns the batches.
+ * The method divides the original list into a specified number of groups,
+ * ensuring that each group has an approximately equal number of elements. It
+ * handles any remainder by distributing the extra elements among the first few
+ * groups.
+ *
+ * @param originalList The original list of Double values to be split into
+ * batches.
+ * @param numberOfGroups The desired number of groups to split the list into.
+ * @return An ArrayList of ArrayLists, where each inner ArrayList represents a
+ * batch of Double values.
+ */
+ public static ArrayList> getDataInBatch(ArrayList originalList, int numberOfGroups) {
+ ArrayList> splitGroups = new ArrayList<>();
+
+ int originalSize = originalList.size();
+ int groupSize = originalSize / numberOfGroups;
+ int remainder = originalSize % numberOfGroups;
+
+ int currentIndex = 0;
+ for (int i = 0; i < numberOfGroups; i++) {
+ int groupCount = groupSize + (i < remainder ? 1 : 0);
+ ArrayList group = new ArrayList<>(originalList.subList(currentIndex, currentIndex + groupCount));
+ splitGroups.add(group);
+ currentIndex += groupCount;
+ }
+ return splitGroups;
+ }
+
+ /**
+ * Splits a list of OffsetDateTime into multiple batches and returns the
+ * batches. The method divides the original list into a specified number of
+ * groups, ensuring that each group has an approximately equal number of
+ * elements. It handles any remainder by distributing the extra elements among
+ * the first few groups.
+ *
+ * @param originalList The original list of OffsetDateTime to be split into
+ * batches.
+ * @param numberOfGroups The desired number of groups to split the list into.
+ * @return An ArrayList of ArrayLists, where each inner ArrayList represents a
+ * batch of OffsetDateTime objects.
+ */
+ public static ArrayList> getDateInBatch(ArrayList originalList,
+ int numberOfGroups) {
+ ArrayList> splitGroups = new ArrayList<>();
+
+ int originalSize = originalList.size();
+ int groupSize = originalSize / numberOfGroups;
+ int remainder = originalSize % numberOfGroups;
+
+ int currentIndex = 0;
+ for (int i = 0; i < numberOfGroups; i++) {
+ int groupCount = groupSize + (i < remainder ? 1 : 0);
+ ArrayList group = new ArrayList<>(
+ originalList.subList(currentIndex, currentIndex + groupCount));
+ splitGroups.add(group);
+ currentIndex += groupCount;
+ }
+
+ return splitGroups;
+ }
+
+ /**
+ * Removes negative values from the given ArrayList of Doubles by replacing them
+ * with 0.
+ *
+ * @param data The ArrayList of Doubles containing numeric values.
+ * @return ArrayList<Double> A new ArrayList<Double> with negative
+ * values replaced by zero.
+ */
+ public static ArrayList removeNegatives(ArrayList data) {
+ return data.stream()//
+ // Replace negative values with 0
+ .map(value -> value == null || Double.isNaN(value) ? Double.NaN : Math.max(value, 0))
+ .collect(Collectors.toCollection(ArrayList::new));
+
+ }
+
+ /**
+ * Replaces all negative values in the input array with 0. NaN values in the
+ * array remain unchanged.
+ *
+ * @param data the input array of doubles
+ * @return a new array with negative values replaced by 0
+ */
+ public static double[] removeNegatives(double[] data) {
+ return Arrays.stream(data)//
+ .map(value -> Double.isNaN(value) ? Double.NaN : Math.max(value, 0))//
+ .toArray();
+ }
+
+ /**
+ * Scales each element in the input ArrayList by a specified scaling factor.
+ *
+ * @param data The ArrayList of Double values to be scaled.
+ * @param scalingFactor The factor by which each element in the data ArrayList
+ * will be multiplied.
+ * @return A new ArrayList containing the scaled values.
+ */
+ public static ArrayList constantScaling(ArrayList data, double scalingFactor) {
+ return data.stream().map(val -> val * scalingFactor).collect(Collectors.toCollection(ArrayList::new));
+ }
+
+ /**
+ * Scales each element in the input ArrayList by a specified scaling factor.
+ *
+ * @param data The Array of Double values to be scaled.
+ * @param scalingFactor The factor by which each element in the data Array will
+ * be multiplied.
+ * @return A new Array containing the scaled values.
+ */
+ public static double[] constantScaling(double[] data, double scalingFactor) {
+ return Arrays.stream(data).map(val -> val * scalingFactor).toArray();
+ }
+
+ /**
+ * Reshapes a 3D ArrayList into a 4D ArrayList structure. This method takes a
+ * three-dimensional ArrayList of data and reshapes it into a four-dimensional
+ * ArrayList structure. The reshaping is performed by dividing the original data
+ * into blocks of size 4x24. The resulting four-dimensional ArrayList contains
+ * these blocks.
+ *
+ *
+ * @param dataList The 3D list to be reshaped.
+ * @param hyperParameters The hyperparameters containing the interval used to
+ * reshape the list.
+ * @return A reshaped 4D list.
+ */
+ public static ArrayList>>> reshape(
+ ArrayList>> dataList, HyperParameters hyperParameters) {
+
+ // Calculate the dimensions for reshaping
+ int rowsPerDay = 60 / hyperParameters.getInterval() * 24;
+ int numDays = dataList.size() / rowsPerDay;
+
+ // Initialize the reshaped 4D list
+ ArrayList>>> reshapedData = new ArrayList<>();
+
+ int dataIndex = 0;
+ for (int day = 0; day < numDays; day++) {
+ ArrayList>> dailyData = new ArrayList<>();
+ for (int row = 0; row < rowsPerDay; row++) {
+ dailyData.add(dataList.get(dataIndex));
+ dataIndex++;
+ }
+ reshapedData.add(dailyData);
+ }
+
+ return reshapedData;
+ }
+
+ /**
+ * Updates the model with the specified weights based on the given indices and
+ * model type. This method extracts the optimum weights from the provided 4D
+ * ArrayList of models using the given indices and model type. It updates the
+ * hyperparameters with the extracted weights based on the model type.
+ *
+ * @param allModel The 4D ArrayList containing all models.
+ * @param indices The list of indices specifying the location of optimum
+ * weights in the models.
+ * @param fileName The name of the file to save the final model.
+ * @param modelType The type of the model ("trend.txt" or
+ * "seasonality.txt").
+ * @param hyperParameters The hyperparameters to update with the extracted
+ * weights.
+ */
+ public static void updateModel(ArrayList>>> allModel, //
+ List> indices, //
+ String fileName, //
+ String modelType, //
+ HyperParameters hyperParameters) {
+
+ ArrayList>> optimumWeights = new ArrayList>>();
+
+ for (List idx : indices) {
+ ArrayList> tempWeights = allModel//
+ .get(idx.get(0))//
+ .get(idx.get(1));
+ optimumWeights.add(tempWeights);
+ }
+
+ switch (modelType.toLowerCase()) {
+ case "trend":
+ hyperParameters.updatModelTrend(optimumWeights);
+ break;
+ case "seasonality":
+ hyperParameters.updateModelSeasonality(optimumWeights);
+ break;
+ default:
+ throw new IllegalArgumentException("Invalid model type: " + modelType);
+ }
+ }
+
+ /**
+ * Performs element-wise multiplication of two arrays.
+ *
+ * @param featureA the first array
+ * @param featureB the second array
+ * @return a new array where each element is the product of the corresponding
+ * elements of featureA and featureB
+ * @throws IllegalArgumentException if the input arrays are of different lengths
+ */
+ public static double[] elementWiseMultiplication(double[] featureA, double[] featureB) {
+ if (featureA.length != featureB.length) {
+ throw new IllegalArgumentException("The input arrays must have the same length.");
+ }
+ return IntStream.range(0, featureA.length)//
+ .mapToDouble(i -> featureA[i] * featureB[i])//
+ .toArray();
+ }
+
+ /**
+ * Performs element-wise multiplication of two ArrayLists.
+ *
+ * @param featureA the first ArrayList
+ * @param featureB the second ArrayList
+ * @return a new ArrayList where each element is the result of multiplying the
+ * corresponding elements of featureA and featureB
+ * @throws IllegalArgumentException if the input ArrayLists are of different
+ * lengths
+ */
+ public static ArrayList elementWiseMultiplication(ArrayList featureA, ArrayList featureB) {
+ if (featureA.size() != featureB.size()) {
+ throw new IllegalArgumentException("The input ArrayLists must have the same length.");
+ }
+ ArrayList result = new ArrayList<>();
+ IntStream.range(0, featureA.size()).forEach(i -> result.add(featureA.get(i) * featureB.get(i)));
+ return result;
+ }
+
+ /**
+ * Performs element-wise division of two ArrayLists. If an element in featureB
+ * is zero, the corresponding element in the result will be zero.
+ *
+ * @param featureA the first ArrayList
+ * @param featureB the second ArrayList
+ * @return a new ArrayList where each element is the result of dividing the
+ * corresponding elements of featureA by featureB or zero if the element
+ * in featureB is zero
+ * @throws IllegalArgumentException if the input ArrayLists are of different
+ * lengths
+ */
+ public static ArrayList elementWiseDiv(ArrayList featureA, ArrayList featureB) {
+ if (featureA.size() != featureB.size()) {
+ throw new IllegalArgumentException("The input ArrayLists must have the same length.");
+ }
+ ArrayList result = new ArrayList<>();
+ IntStream.range(0, featureA.size())
+ .forEach(i -> result.add((featureB.get(i) == 0) ? featureA.get(i) : featureA.get(i) / featureB.get(i)));
+ return result;
+ }
+
+ /**
+ * Performs element-wise division of two arrays. If an element in featureB is
+ * zero, the corresponding element in the result will be zero.
+ *
+ * @param featureA the first array
+ * @param featureB the second array
+ * @return a new array where each element is the result of dividing the
+ * corresponding elements of featureA by featureB or zero if the element
+ * in featureB is zero
+ * @throws IllegalArgumentException if the input arrays are of different lengths
+ */
+ public static double[] elementWiseDiv(double[] featureA, double[] featureB) {
+ if (featureA.length != featureB.length) {
+ throw new IllegalArgumentException("The input arrays must have the same length.");
+ }
+ return IntStream.range(0, featureA.length)//
+ .mapToDouble(i -> (featureB[i] == 0) ? featureA[i] : featureA[i] / featureB[i])//
+ .toArray();
+ }
+
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/Differencing.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/Differencing.java
new file mode 100644
index 00000000000..9e4a61cd611
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/Differencing.java
@@ -0,0 +1,55 @@
+package io.openems.edge.predictor.lstmmodel.preprocessing;
+
+import java.util.stream.IntStream;
+
+public class Differencing {
+
+ /**
+ * First order Differencing.
+ *
+ * @param data data for Differencing
+ * @return the first order Differencing
+ */
+ public static double[] firstOrderDifferencing(double[] data) {
+ if (data.length < 2) {
+ throw new IllegalArgumentException("Data array must contain at least two elements.");
+ }
+
+ return IntStream.range(0, data.length - 1)//
+ .mapToDouble(i -> data[i] - data[i + 1])//
+ .toArray();
+ }
+
+ /**
+ * first Order Accumulating.
+ *
+ * @param data data for Differencing
+ * @param init data for init
+ * @return the first order Differencing
+ */
+ public static double[] firstOrderAccumulating(double[] data, double init) {
+ if (data.length == 0) {
+ throw new IllegalArgumentException("Data array must not be empty.");
+ }
+
+ double[] accumulating = new double[data.length];
+
+ accumulating[0] = data[0] + init;
+
+ IntStream.range(1, data.length)//
+ .forEach(i -> accumulating[i] = accumulating[i - 1] + data[i]);
+
+ return accumulating;
+ }
+
+ /**
+ * first Order Accumulating.
+ *
+ * @param data data for Differencing
+ * @param init data for init
+ * @return the first order Differencing
+ */
+ public static double firstOrderAccumulating(double data, double init) {
+ return data + init;
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/FilterOutliers.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/FilterOutliers.java
new file mode 100644
index 00000000000..192c2623df9
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/FilterOutliers.java
@@ -0,0 +1,97 @@
+package io.openems.edge.predictor.lstmmodel.preprocessing;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.stream.IntStream;
+
+import org.apache.commons.math3.stat.descriptive.rank.Percentile;
+
+import io.openems.edge.predictor.lstmmodel.utilities.MathUtils;
+
+public class FilterOutliers {
+
+ /**
+ * Filters out outliers from the dataset until no outliers are detected.
+ *
+ * @param data the input dataset
+ * @return the filtered dataset with outliers removed
+ */
+ public static double[] filterOutlier(double[] data) {
+ if (data == null || data.length == 0) {
+ throw new IllegalArgumentException("Input data must not be null or empty.");
+ }
+
+ double[] filteredData = Arrays.copyOf(data, data.length);
+ int iterationCount = 0;
+ boolean hasOutliers = true;
+
+ while (hasOutliers && iterationCount <= 100) {
+ var outlierIndices = detect(filteredData);
+
+ if (outlierIndices.isEmpty()) {
+ hasOutliers = false;
+ } else {
+ filteredData = filter(filteredData, outlierIndices);
+ }
+
+ iterationCount++;
+ }
+
+ return filteredData;
+ }
+
+ /**
+ * Applies the hyperbolic tangent function to data points at the specified
+ * indices.
+ *
+ * @param data the input dataset
+ * @param indices the indices of data points to be transformed
+ * @return the transformed dataset
+ */
+ public static double[] filter(double[] data, ArrayList indices) {
+
+ if (data == null || indices == null) {
+ throw new IllegalArgumentException("Input data and indices must not be null.");
+ }
+
+ if (indices.isEmpty()) {
+ return data;
+ }
+
+ double[] result = data.clone();
+ for (int index : indices) {
+ if (index >= 0 && index < result.length) {
+ result[index] = MathUtils.tanh(result[index]);
+ } else {
+ throw new IllegalArgumentException("Index out of bounds: " + index);
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Detects outliers in the dataset using the interquartile range (IQR) method.
+ *
+ * @param data the input dataset
+ * @return a list of indices of the detected outliers
+ */
+ public static ArrayList detect(double[] data) {
+
+ if (data == null || data.length == 0) {
+ throw new IllegalArgumentException("Input data must not be null or empty.");
+ }
+
+ Percentile perc = new Percentile();
+ var q1 = perc.evaluate(data, 25);// 25th percentile (Q1) (First percentile)
+ var q3 = perc.evaluate(data, 75);// 75th percentile (Q3) (Third percentile)
+ var iqr = q3 - q1;
+ var upperLimit = q3 + 1.5 * iqr;
+ var lowerLimit = q1 - 1.5 * iqr;
+
+ // Detect outliers
+ return IntStream.range(0, data.length)//
+ .filter(i -> data[i] < lowerLimit || data[i] > upperLimit)
+ .collect(ArrayList::new, ArrayList::add, ArrayList::addAll);
+ }
+
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/GroupBy.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/GroupBy.java
new file mode 100644
index 00000000000..c6cc5de3ae6
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/GroupBy.java
@@ -0,0 +1,98 @@
+package io.openems.edge.predictor.lstmmodel.preprocessing;
+
+import java.time.OffsetDateTime;
+import java.time.temporal.ChronoField;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+public class GroupBy {
+ private final ArrayList data;
+ private final ArrayList date;
+
+ private final ArrayList> groupedDateByMin = new ArrayList<>();
+ private final ArrayList> groupedDataByMin = new ArrayList<>();
+ private final ArrayList> groupedDateByHour = new ArrayList<>();
+ private final ArrayList> groupedDataByHour = new ArrayList<>();
+
+ /**
+ * Group by Temporal filed.
+ *
+ * @param chronoField {@link ChronoField}
+ * @param groupedDateList The list of groupedDateList.
+ * @param groupedDataList The list of groupedDataList.
+ */
+ public void groupByTemporalField(ChronoField chronoField, List> groupedDateList,
+ List> groupedDataList) {
+
+ List uniqueList = this.extractUniqueAndSortedValues(chronoField);
+
+ for (Integer uniqueValue : uniqueList) {
+ List groupedDateTemp = this.groupDatesByUniqueValue(uniqueValue, chronoField);
+ List groupedDataTemp = this.groupDataByUniqueValue(uniqueValue, chronoField);
+
+ groupedDateList.add(new ArrayList<>(groupedDateTemp));
+ groupedDataList.add(new ArrayList<>(groupedDataTemp));
+ }
+ }
+
+ private List extractUniqueAndSortedValues(ChronoField chronoField) {
+ return this.date.stream()//
+ .map(date -> date.get(chronoField))//
+ .distinct()//
+ .sorted()//
+ .collect(Collectors.toList());
+ }
+
+ private List groupDatesByUniqueValue(Integer uniqueValue, ChronoField chronoField) {
+ return this.date.stream()//
+ .filter(date -> uniqueValue.equals(date.get(chronoField)))//
+ .collect(Collectors.toList());
+ }
+
+ private List groupDataByUniqueValue(Integer uniqueValue, ChronoField chronoField) {
+ return IntStream.range(0, this.data.size())//
+ .filter(i -> {
+ double dateValue = this.date.get(i).get(chronoField);
+ return Double.compare(dateValue, uniqueValue.doubleValue()) == 0;
+ })//
+ .mapToObj(i -> this.data.get(i))//
+ .collect(Collectors.toList());
+ }
+
+ /**
+ * grouping by hour.
+ */
+ public void hour() {
+ this.groupByTemporalField(ChronoField.HOUR_OF_DAY, this.groupedDateByHour, this.groupedDataByHour);
+ }
+
+ /**
+ * grouping by minute.
+ */
+ public void minute() {
+ this.groupByTemporalField(ChronoField.MINUTE_OF_HOUR, this.groupedDateByMin, this.groupedDataByMin);
+ }
+
+ public ArrayList> getGroupedDataByHour() {
+ return this.groupedDataByHour;
+ }
+
+ public ArrayList> getGroupedDateByHour() {
+ return this.groupedDateByHour;
+ }
+
+ public ArrayList> getGroupedDataByMinute() {
+ return this.groupedDataByMin;
+ }
+
+ public ArrayList> getGroupedDateByMinute() {
+ return this.groupedDateByMin;
+ }
+
+ public GroupBy(ArrayList data, List date) {
+ this.data = new ArrayList<>(data);
+ this.date = new ArrayList<>(date);
+ }
+}
\ No newline at end of file
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/MovingAverage.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/MovingAverage.java
new file mode 100644
index 00000000000..2b6e4bea70a
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/MovingAverage.java
@@ -0,0 +1,30 @@
+package io.openems.edge.predictor.lstmmodel.preprocessing;
+
+public class MovingAverage {
+
+ public static final int WINDOW_SIZE = 3;
+
+ /**
+ * Compute the Moving average for the data array.
+ *
+ * @param data the data for calculating the Moving average
+ * @return the moving average
+ */
+ public static double[] movingAverage(double[] data) {
+
+ double[] paddedInputData = new double[data.length + WINDOW_SIZE - 1];
+ System.arraycopy(data, 0, paddedInputData, WINDOW_SIZE / 2, data.length);
+
+ double[] movingAverages = new double[data.length];
+
+ for (int i = 0; i < data.length; i++) {
+ double sum = 0;
+ for (int j = 0; j < WINDOW_SIZE; j++) {
+ sum += paddedInputData[i + j];
+ }
+ movingAverages[i] = sum / WINDOW_SIZE;
+ }
+
+ return movingAverages;
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/Shuffle.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/Shuffle.java
new file mode 100644
index 00000000000..4b19f331df3
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessing/Shuffle.java
@@ -0,0 +1,69 @@
+package io.openems.edge.predictor.lstmmodel.preprocessing;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.CompletableFuture;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+public class Shuffle {
+
+ private double[][] data;
+ private double[] target;
+
+ public Shuffle(double[][] data, double[] target) {
+ this.data = this.copy2DArray(data);
+ this.target = Arrays.copyOf(target, target.length);
+ this.shuffleIt();
+ }
+
+ /**
+ * Shuffles the data and target arrays to randomize the order of elements. This
+ * method shuffles the data and target arrays simultaneously, ensuring that the
+ * corresponding data and target values remain aligned.
+ */
+ public void shuffleIt() {
+ List indices = IntStream.range(0, this.data.length)//
+ .boxed()//
+ .collect(Collectors.toList());
+
+ Collections.shuffle(indices, new Random(100));
+
+ CompletableFuture dataFuture = CompletableFuture
+ .runAsync(() -> this.shuffleData(new ArrayList<>(indices)));
+ CompletableFuture targetFuture = CompletableFuture
+ .runAsync(() -> this.shuffleTarget(new ArrayList<>(indices)));
+
+ CompletableFuture combinedFuture = CompletableFuture.allOf(dataFuture, targetFuture);
+ combinedFuture.join();
+ }
+
+ private void shuffleData(List indices) {
+ this.data = indices.stream()//
+ .map(i -> Arrays.copyOf(this.data[i], this.data[i].length))//
+ .toArray(double[][]::new);
+ }
+
+ private void shuffleTarget(List indices) {
+ this.target = indices.stream()//
+ .mapToDouble(i -> this.target[i])//
+ .toArray();
+ }
+
+ public double[] getTarget() {
+ return this.target;
+ }
+
+ public double[][] getData() {
+ return this.data;
+ }
+
+ private double[][] copy2DArray(double[][] array) {
+ return Arrays.stream(array)//
+ .map(row -> Arrays.copyOf(row, row.length))//
+ .toArray(double[][]::new);
+ }
+}
diff --git a/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessingpipeline/ConstantScalingPipe.java b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessingpipeline/ConstantScalingPipe.java
new file mode 100644
index 00000000000..5e3789f1588
--- /dev/null
+++ b/io.openems.edge.predictor.lstmmodel/src/io/openems/edge/predictor/lstmmodel/preprocessingpipeline/ConstantScalingPipe.java
@@ -0,0 +1,19 @@
+package io.openems.edge.predictor.lstmmodel.preprocessingpipeline;
+
+import static io.openems.edge.predictor.lstmmodel.preprocessing.DataModification.constantScaling;
+
+public class ConstantScalingPipe implements Stage