Commit 9a5b4279 authored by Eva Lina Fesefeldt's avatar Eva Lina Fesefeldt
Browse files

[dev] Auswertung Aotencoder Relu3

parent 457c7dfb
......@@ -10,6 +10,33 @@ def dict_to_numpy(dictionary):
A = np.append(A, dictionary[i])
return A
# Ein Dictionary (meta["best_weights"]) aus der hessianlearn output pickle übergeben
# Output: Gewichte und Biase als numpy arrays
def dict_to_weights_and_biases(dictionary):
W_1 = dictionary["dense"][0]
b_1 = dictionary["dense"][1]
W_2 = dictionary["dense_1"][0]
b_2 = dictionary["dense_1"][1]
return W_1, b_1, W_2, b_2
# Aktivierungsfunktion ReLu^3
def ReLu3(x):
return (np.clip(x, a_min=0, a_max=None))**3
# Auwertung KNN an x_j
def KNN(W_1, b_1, W_2, b_2, x_j):
return ReLu3((ReLu3(x_j@W_1 + b_1))@W_2 + b_2)
#lokale Kostenfunktionen
def C_j(W_1, b_1, W_2, b_2, x_j):
temp = (1/2) * np.linalg.norm(x_j - KNN(W_1, b_1, W_2, b_2, x_j))**2
return (1/2) * np.linalg.norm(x_j - KNN(W_1, b_1, W_2, b_2, x_j))**2
# Für Gewichte und Biase den MSE Loss berechnen, Datenset sind die EInheitsvektoren aus |R^4
def mean_squared_loss(W_1, b_1, W_2, b_2):
X = np.identity(4)
return 1/4 * (C_j(W_1, b_1, W_2, b_2, X[:,0]) + C_j(W_1, b_1, W_2, b_2, X[:,1]) + C_j(W_1, b_1, W_2, b_2, X[:,2]) + C_j(W_1, b_1, W_2, b_2, X[:,3]))
infile = open('/home/lina/Dokumente/Uni/Masterarbeit/Code/Vergleich GD hessianlearn/ReLu3/_logging/-2021-05-04-adam-dW=22-alpha=0.05.pkl','rb')
meta = pickle.load(infile)
infile.close()
......@@ -25,6 +52,9 @@ infile = open('/home/lina/Dokumente/Uni/Masterarbeit/Code/Vergleich GD hessianle
meta = pickle.load(infile)
infile.close()
INCG = dict_to_numpy(meta["train_loss"])
INCG_best_weights = meta["best_weights"]
W_1, b_1, W_2, b_2 = dict_to_weights_and_biases(meta["best_weights"])
MSE = mean_squared_loss(W_1, b_1, W_2, b_2)
infile = open('/home/lina/Dokumente/Uni/Masterarbeit/Code/Vergleich GD hessianlearn/ReLu3/_logging/-2021-05-04-lrsfn-dW=22-rank=2.pkl','rb')
meta = pickle.load(infile)
......
......@@ -26,9 +26,8 @@ print("This is the main_adam_Autoencoder_ReLu3.py file")
encoding_dim = 2
dim = 4
x_train = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
x_test = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
y_train = np.array([0, 0, 0, 1])
x_train = np.identity(4)
x_test = np.identity(4)
from keras.models import Sequential
from keras.layers import Dense
......
......@@ -30,9 +30,8 @@ settings['hess_batch_size'] = 4
encoding_dim = 2
dim = 4
x_train = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
x_test = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
y_train = np.array([0, 0, 0, 1])
x_train = np.identity(4)
x_test = np.identity(4)
from keras.models import Sequential
from keras.layers import Dense
......
......@@ -26,9 +26,8 @@ print("This is the main_INCG_Autoencoder_ReLu3.py file")
encoding_dim = 2
dim = 4
x_train = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
x_test = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
y_train = np.array([0, 0, 0, 1])
x_train = np.identity(4)
x_test = np.identity(4)
from keras.models import Sequential
from keras.layers import Dense
......
......@@ -26,9 +26,8 @@ print("This is the main_INCG_Autoencoder_ReLu3.py file")
encoding_dim = 2
dim = 4
x_train = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
x_test = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
y_train = np.array([0, 0, 0, 1])
x_train = np.identity(4)
x_test = np.identity(4)
from keras.models import Sequential
from keras.layers import Dense
......
No preview for this file type
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment