1. Programme Python TensorFlowLSTM.py
#
Le programme Python suivant est le programme utilisé pour soumettre
un job avec le système submit
.
le contenu du script Python TensorFlowLSTM.py
pour un calcul
de Machine Learning avec TensorFlow est donnée ci-dessous:
#!/usr/bin/env python
# coding: utf-8
# Test TensorFlow LSTM
# Marc Buffat, dpt mécanique, Lyon 1
#
# - prédiction d'un signal périodique
#
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense
import matplotlib.pyplot as plt
gpus = tf.config.experimental.list_physical_devices('GPU')
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
# select GPU
gpunum=1
memlim=4*1024
tf.config.set_visible_devices(gpus[gpunum],"GPU")
# limit MEM
tf.config.experimental.set_virtual_device_configuration(gpus[gpunum],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memlim)])
print("GPU select: ", gpus[gpunum], "Mem GPU: ",memlim, "Mo")
# definition du modele
# nbre de pas / sequence (optimal a partir de 20, trop faible pour 15 et 10
n_steps = 30
model = Sequential()
model.add(LSTM(10, activation='tanh'))
model.add(Dense(1, activation='tanh'))
model.compile(optimizer='adam', loss='mse')
# preparation des donnéees
def train_function(x):
return np.sin(x)
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
end_ix = i + n_steps
if end_ix > len(sequence)-1:
break
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
# define input sequence
xaxis = np.arange(-50*np.pi, 50*np.pi, 0.1)
train_seq = train_function(xaxis)
print("train function : ",train_seq.shape, n_steps)
# donnée 20 valeurs avant dans X pour predire la valeur de y
X, y = split_sequence(train_seq, n_steps)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = 1
X = X.reshape((X.shape[0], X.shape[1], n_features))
print("X.shape = {}".format(X.shape))
print("y.shape = {}".format(y.shape))
plt.figure(figsize=(12,6))
for i in range(10):
plt.scatter(xaxis[i],X[i,0,0])
plt.plot(xaxis[i:i+n_steps],X[i,:,0],label=f"seq {i}")
plt.scatter(xaxis[i+n_steps],y[i],marker='x',c='k')
plt.title(f"séquences de {n_steps} valeurs")
plt.legend();
plt.savefig("fig1.png")
# train the model with 20 epochs
history = model.fit(X, y, epochs=20, verbose=1)
plt.plot(history.history['loss'], label="loss")
plt.legend(loc="upper right")
plt.savefig("fig2.png")
# test du model
test_xaxis = np.arange(0, 10*np.pi, 0.1)
def test_function(x):
return np.cos(x)
calc_y = test_function(test_xaxis)
# start with initial n values, rest will be predicted
test_y = calc_y[:n_steps]
results = []
for i in range( len(test_xaxis) - n_steps ):
net_input = test_y[i : i + n_steps]
net_input = net_input.reshape((1, n_steps, n_features))
y = model.predict(net_input, verbose=0)
test_y = np.append(test_y, y)
print("nbre de sequences ",len(test_xaxis)-n_steps)
# tracer des résultats
plt.figure(figsize=(14,6))
plt.subplot(1,2,1)
plt.plot(test_xaxis[n_steps:], test_y[n_steps:], label="predictions")
plt.plot(test_xaxis, calc_y, label="mesure")
plt.plot(test_xaxis[:n_steps], test_y[:n_steps], 'xr',label="sequence init.")
plt.legend(loc='upper left')
plt.ylim(-1.5, 1.5)
plt.title("prediction")
plt.subplot(1,2,2)
plt.plot(test_xaxis[n_steps:], test_y[n_steps:]-calc_y[n_steps:])
plt.title("Erreur");
plt.savefig("fig3.png")