Izanagi
Izanagi

Reputation: 1

LSTM pulse signal prediction

i'm trying to capture long-term dependencies using LSTM, by creating a unit pulse signal every 62 points.

The idea is to go back 62 time-steps and copy the value for the next time-step, so as to predict the pulse, but lstm is not doing this...

import sys
import os
import numpy as np
import math
import pandas as pd
from matplotlib import pyplot as plt

from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error

from keras.models import Sequential, load_model
from keras.layers import LSTM, Dense, Flatten, Dropout
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K
import tensorflow as tf
from tensorflow.python.client import device_lib

K.clear_session() #pulire eventuali sessioni precedenti (cosi i nomi dei layer ripartono da 0)

print(K.tensorflow_backend._get_available_gpus())
print(device_lib.list_local_devices())
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
config = tf.ConfigProto( device_count = {'GPU': 1 , 'CPU': 4} ) 
sess = tf.Session(config=config) 
K.set_session(sess)
# hyper-parametri
params = {
    "batch_size": 20,
    "epochs": 1000,
    "time_steps": 70,
}

OUTPUT_PATH = "/home/..."
TIME_STEPS = params["time_steps"]
BATCH_SIZE = params["batch_size"]

def generate_impulse(dim):
arr = np.zeros(dim)
frequency = 62
for i in range(0, len(arr)):
    if i % frequency == 0:
        arr[i] = 1
return arr
y = generate_impulse(1300)

plt.figure(figsize=(20,5))
plt.plot(y)
plt.title('unit impulse')
plt.ylabel('y')
plt.xlabel('x')
plt.show()

dataset

def create_timeseries(arr):
    # Costruzione time series univariata, predict di un single-step. 
    # Prende i primi TIME_STEPS valori come input e calcola il sin del valore TIME_STEPS+1
    dim_0 = len(arr) - TIME_STEPS

    x = np.zeros((dim_0, TIME_STEPS))
    y = np.zeros((dim_0,))

    for i in range(dim_0):
        x[i] = arr[i:TIME_STEPS+i] #TIME_STEPS+i non compreso
        y[i] = arr[TIME_STEPS+i]
        #print(x[i], y[i])
    print("length of time-series i/o",x.shape,y.shape)
    return x, y

x_ts, y_ts = create_timeseries(y)

len_train = int(len(x_ts)*80/100)
len_val = int(len(x_ts)*10/100)
#DATASET DI TRAINING: 80%
x_train = x_ts[0:len_train]
y_train = y_ts[0:len_train]
#DATASET DI VALIDATION: 10%
x_val = x_ts[len_train:len_train+len_val]
y_val = y_ts[len_train:len_train+len_val]
#DATASET DI TEST 10%
x_test = x_ts[len_train+len_val:]
y_test = y_ts[len_train+len_val:]

x_train =x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_val =x_val.reshape((x_val.shape[0], x_val.shape[1], 1))
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)

def create_model():
    model = Sequential()
    model.add(LSTM(1, input_shape=(TIME_STEPS, 1)))

    model.compile(optimizer='adam', loss='mse')
    return model 

es = EarlyStopping(monitor='val_loss', mode='min', verbose=1,
                       patience=50, min_delta=0.0001)
model = create_model()

history = model.fit(x_train, y_train, epochs=params["epochs"], verbose=2, batch_size=BATCH_SIZE, shuffle=False,
                    validation_data=(x_val, y_val), callbacks=[es])

plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('MSE LOSS')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.show()

mse loss

y_pred = model.predict(x_test, batch_size=BATCH_SIZE)
y_pred = y_pred.flatten()

error = mean_squared_error(y_test, y_pred)

plt.figure(figsize=(20,5))
plt.plot(y_pred)
plt.plot(y_test)
plt.title('PREDICTION ON TEST SET')
plt.ylabel('sin(x)')
plt.xlabel('x')
plt.legend(['Prediction', 'Real'], loc='upper left')
plt.show()

prediction test set

Training set give me the same results (it is the same signal..). I tried others LSTM models with more neurons but it doesn't work anyway.

Upvotes: 0

Views: 233

Answers (1)

ad2004
ad2004

Reputation: 809

You might consider training for more epochs. I created a simplified model and training set based on what I believe is the core of your idea:

from keras.models import Sequential
from keras.layers import LSTM
import numpy as np

TIME_STEPS=10

x_train = np.array([ [ [1],[0],[0],[0],[0],[0],[0],[0],[0],[0] ],
                     [ [0],[1],[0],[0],[0],[0],[0],[0],[0],[0] ],
                     [ [0],[0],[1],[0],[0],[0],[0],[0],[0],[0] ],
                     [ [0],[0],[0],[1],[0],[0],[0],[0],[0],[0] ],
                     [ [0],[0],[0],[0],[1],[0],[0],[0],[0],[0] ],
                     [ [0],[0],[0],[0],[0],[1],[0],[0],[0],[0] ],
                     [ [0],[0],[0],[0],[0],[0],[1],[0],[0],[0] ],
                     [ [0],[0],[0],[0],[0],[0],[0],[1],[0],[0] ],
                     [ [0],[0],[0],[0],[0],[0],[0],[0],[1],[0] ],
                     [ [0],[0],[0],[0],[0],[0],[0],[0],[0],[1] ]])

y_train = np.array([[1],[0],[0],[0],[0],[0],[0],[0],[0],[0]])
print(x_train.shape)
print(y_train.shape)

model = Sequential()
model.add(LSTM(1, input_shape=(TIME_STEPS,1)))
model.compile(optimizer='adam', loss='mse', metrics=['mse'])

model.fit(x_train, y_train, epochs=10000, verbose=0)

After training, I get the following predictions:

model.predict(x_train)
array([[ 0.9870746 ],
       [ 0.00665453],
       [-0.00303702],
       [ 0.00697759],
       [-0.02432432],
       [-0.00701594],
       [ 0.01387464],
       [ 0.02281112],
       [ 0.00439195],
       [-0.04109564]], dtype=float32)

I'm not sure if it solves your problem completely, but it might give you a suggested direction to investigate. I hope this helps.

Upvotes: 1

Related Questions