Commit f1904284 authored by Eva Lina Fesefeldt's avatar Eva Lina Fesefeldt
Browse files

Stand nach Präsentation

parent 206ed83c
......@@ -11,9 +11,9 @@ input_dim = 28*28
epochs = 100
# Modell erzeugen
from tf.keras.models import Sequential
from tf.keras.layers import Dense
from tf.keras import backend as k
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as k
loss_fn = tf.keras.losses.MeanSquaredError()
......
......@@ -3,7 +3,7 @@ import matplotlib.pyplot as plt
loss_vs_hidden_layer = np.load("results/mini_loss_vs_size_hidden_layer.npy")
for size_hidden_layer in range(0,51, 5):
for size_hidden_layer in range(0,400, 50):
plt.semilogy(loss_vs_hidden_layer[size_hidden_layer,:], label="n="+str(size_hidden_layer+1))
plt.xlabel("Epoche")
......
#!/bin/bash -l
#SBATCH -p gpu
#SBATCH --ntasks 1
#SBATCH --cpus-per-task 4
#SBATCH --gres gpu:2
#SBATCH --constraint gpu_k20
#SBTACH --mem-per-cpu 2000
#SBATCH --time 30:00:00
#SBATCH --mail-type END,FAIL,REQUEUE
#SBATCH --mail-user eva.fesefeldt@tuhh.de
conda activate tf-gpu
python3 main.py
exit
......@@ -7,6 +7,10 @@ from jvp import _back_over_back_hvp
from helper import *
from helper import reelles_skalarprodukt_trainable_shape
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as k
def norm_trainable_shape(x):
return math.sqrt(reelles_skalarprodukt_trainable_shape(x,x))
......@@ -75,11 +79,8 @@ x_train = tf.reshape(x_train, (60000, 28*28))
y_train = tf.one_hot(y_train, depth=10)
# Modell erzeugen
size_hidden_layer = 30
number_of_epochs = 100
from keras.models import Sequential
from keras.layers import Dense
from keras import backend as k
size_hidden_layer = 200
number_of_epochs = 200
model = Sequential()
model.add(Dense(size_hidden_layer, input_dim = input_dim, activation='sigmoid'))
......@@ -90,13 +91,14 @@ filename_W1 = "initializers/W_1_n" + str(size_hidden_layer) + ".npy"
filename_b1 = "initializers/b_1_n" + str(size_hidden_layer) + ".npy"
filename_W2 = "initializers/W_2_n" + str(size_hidden_layer) + ".npy"
filename_b2 = "initializers/b_2_n" + str(size_hidden_layer) + ".npy"
#W_1 = np.load(filename_W1)
#b_1 = np.load(filename_b1)
#W_2 = np.load(filename_W2)
#b_2 = np.load(filename_b2)
#list_of_weights_and_biases = [W_1, b_1, W_2, b_2]
#model.set_weights(list_of_weights_and_biases)
W_1 = np.load(filename_W1)
b_1 = np.load(filename_b1)
W_2 = np.load(filename_W2)
b_2 = np.load(filename_b2)
list_of_weights_and_biases = [W_1, b_1, W_2, b_2]
model.set_weights(list_of_weights_and_biases)
loss_fn = tf.keras.losses.MeanSquaredError()
......@@ -105,12 +107,17 @@ model.compile(optimizer='adam', loss=loss_fn)
loss = loss_fn(model.predict(x_train), y_train)
loss_numpy = np.zeros(number_of_epochs)
filename = "results/MSE_loss_NCG_backtracking_epochs" + str(number_of_epochs) + ".npy"
loss_numpy = np.zeros(number_of_epochs+1)
filename = "results/MSE_loss_NCG_backtracking_epochs" + str(number_of_epochs) + "_n" + str(size_hidden_layer) + ".npy"
# Initialen Loss abspeichern
loss = loss_fn(model.predict(x_train), y_train)
loss_numpy[0] = loss.numpy()
np.save(filename, loss_numpy)
b = [0,0,0,0]
for epoch in range(number_of_epochs):
for epoch in range(1, number_of_epochs+1):
x = model.get_weights()
......
......@@ -5,17 +5,20 @@ import matplotlib.pyplot as plt
from numpy.linalg import norm
from helper import matrix_trainable_shape_to_flat_shape
# N-CG
loss_NCG = np.load("results/MSE_loss_NCG_backtracking_epochs100.npy")
# Loss für n=30 aus dem Layer Size Test
loss_vs_hidden_layer = np.load("results/mini_loss_vs_size_hidden_layer.npy")
loss_vs_hidden_layer[30,:]
# Daten laden, alle Optimizer zum Vergleich stammen aus Tensorflow_Optimizer/results
loss_NCG = np.load("results/MSE_loss_NCG_backtracking_epochs200_n200.npy")
loss_adam = np.load("results/MSE_loss_adam_batch32_LR0.01_epochs200_n200.npy")
loss_sgd = np.load("results/MSE_loss_sgd_batch32_LR10.0_epochs200_n200.npy")
loss_sgd_decay = np.load("results/MSE_loss_sgd_with_decay_batch32_LR10.0_epochs200_n200.npy")
plt.rcParams.update({'font.size': 8})
plt.figure(figsize=(6, 8), constrained_layout=True)
plt.figure(figsize=(6, 4), constrained_layout=True)
plt.semilogy(loss_NCG, label="N-CG")
plt.semilogy(loss_vs_hidden_layer[30,:], label="Adam")
plt.semilogy(loss_adam, label="Adam")
plt.semilogy(loss_sgd, label="SGD")
plt.semilogy(loss_sgd_decay, label="SDG Decay")
plt.xlabel("Epochen")
plt.ylabel("MSE")
plt.legend()
plt.show()
\ No newline at end of file
#!/bin/bash -l
#SBATCH -p gpu
#SBATCH --ntasks 1
#SBATCH --cpus-per-task 4
#SBATCH --gres gpu:2
#SBATCH --constraint gpu_k20
#SBTACH --mem-per-cpu 2000
#SBATCH --time 30:00:00
#SBATCH --mail-type END,FAIL,REQUEUE
#SBATCH --mail-user eva.fesefeldt@tuhh.de
conda activate tf-gpu
python3 main.py adam --batch_size 32 --LR 0.01 --epochs 200
python3 main.py sgd --batch_size 32 --LR 10 --epochs 200
python3 main.py sgd_with_decay --batch_size 32 --LR 10 --epochs 200
exit
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment