Commit d84e17af authored by Eva Lina Fesefeldt's avatar Eva Lina Fesefeldt
Browse files

[dev] ReLu3 hinzugefügt

parent 83c1324d
...@@ -10,14 +10,14 @@ def dict_to_numpy(dictionary): ...@@ -10,14 +10,14 @@ def dict_to_numpy(dictionary):
A = np.append(A, dictionary[i]) A = np.append(A, dictionary[i])
return A return A
infile = open('_logging/-2021-04-30-gd-dW=22.pkl','rb') infile = open('/home/lina/Dokumente/Uni/Masterarbeit/Code/Vergleich GD hessianlearn/Sigmoid/_logging/-2021-05-04-gd-dW=22.pkl','rb')
meta = pickle.load(infile) meta = pickle.load(infile)
infile.close() infile.close()
GD = dict_to_numpy(meta["train_loss"]) GD = dict_to_numpy(meta["train_loss"])
no_epochs_gd = GD.shape no_epochs_gd = GD.shape
infile = open('_logging/-2021-04-30-incg-dW=22.pkl','rb') infile = open('/home/lina/Dokumente/Uni/Masterarbeit/Code/Vergleich GD hessianlearn/Sigmoid/_logging/-2021-05-04-incg-dW=22.pkl','rb')
meta = pickle.load(infile) meta = pickle.load(infile)
infile.close() infile.close()
...@@ -33,7 +33,7 @@ INCG = INCG[:no_epochs] ...@@ -33,7 +33,7 @@ INCG = INCG[:no_epochs]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
plt.semilogy(INCG, 'r.', label="HF-Methode mit E-W-Abbruchbedingung, L2-Regularisierung und Liniensuche") plt.semilogy(INCG, 'r.', label="Inexaktes Newton-CG mit E-W-Abbruchbedingung, L2-Regularisierung und Liniensuche")
plt.semilogy(GD, 'g.', label="Gradient Descent mit Liniensuche und L2-Regularisierung") plt.semilogy(GD, 'g.', label="Gradient Descent mit Liniensuche und L2-Regularisierung")
plt.title("Training Autoencoder d=2 (One Hot Encoding)") plt.title("Training Autoencoder d=2 (One Hot Encoding)")
plt.xlabel("Epochen") plt.xlabel("Epochen")
......
import pickle
import numpy as np
# Python pickle Datein laden und Ergebnisse plotten
def dict_to_numpy(dictionary):
A = np.array([])
for i in range(len(dictionary)):
A = np.append(A, dictionary[i])
return A
infile = open('/home/lina/Dokumente/Uni/Masterarbeit/Code/Vergleich GD hessianlearn/ReLu3/_logging/-2021-05-04-adam-dW=22-alpha=0.05.pkl','rb')
meta = pickle.load(infile)
infile.close()
adam = dict_to_numpy(meta["train_loss"])
infile = open('/home/lina/Dokumente/Uni/Masterarbeit/Code/Vergleich GD hessianlearn/ReLu3/_logging/-2021-05-04-gd-dW=22.pkl','rb')
meta = pickle.load(infile)
infile.close()
GD = dict_to_numpy(meta["train_loss"])
infile = open('/home/lina/Dokumente/Uni/Masterarbeit/Code/Vergleich GD hessianlearn/ReLu3/_logging/-2021-05-04-incg-dW=22.pkl','rb')
meta = pickle.load(infile)
infile.close()
INCG = dict_to_numpy(meta["train_loss"])
infile = open('/home/lina/Dokumente/Uni/Masterarbeit/Code/Vergleich GD hessianlearn/ReLu3/_logging/-2021-05-04-lrsfn-dW=22-rank=2.pkl','rb')
meta = pickle.load(infile)
infile.close()
LRSFN = dict_to_numpy(meta["train_loss"])
# auf die gleiche Länge bringen
no_epochs_incg = INCG.shape
no_epochs_adam = adam.shape
no_epochs_gd = GD.shape
no_epochs_lrsfn = LRSFN.shape
no_epochs = min(no_epochs_gd[0], no_epochs_incg[0], no_epochs_adam[0], no_epochs_lrsfn[0])
GD = GD[:no_epochs]
INCG = INCG[:no_epochs]
LRSFN = LRSFN[:no_epochs]
adam = adam[:no_epochs]
import matplotlib.pyplot as plt
plt.semilogy(INCG, 'r.', label="Inexaktes Newton-CG mit E-W-Abbruchbedingung, L2-Regularisierung und Liniensuche")
plt.semilogy(GD, 'g.', label="Gradient Descent mit Liniensuche und L2-Regularisierung")
plt.semilogy(adam, 'b.', label="Adam")
plt.semilogy(LRSFN, 'y.', label="Low Rank Saddle Free Newton, Hesse-Matrix Rang=2")
plt.title("Training Autoencoder d=2 (One Hot Encoding)")
plt.xlabel("Epochen")
plt.ylabel("Least Squared Error")
plt.legend()
plt.show()
#plt.savefig("HF_vs_GD.pdf")
import numpy as np
import os, sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['KMP_DUPLICATE_LIB_OK']='True'
os.environ["KMP_WARNINGS"] = "FALSE"
import tensorflow as tf
import time
# Import hessianlearn repository
sys.path.append( os.environ.get('HESSIANLEARN_PATH', "../../"))
from hessianlearn import *
# Custom activation function
from keras.layers import Activation
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
def custom_activation(x):
return (K.relu(x) ** 3)
get_custom_objects().update({'custom_activation': Activation(custom_activation)})
print("This is the main_adam_Autoencoder_ReLu3.py file")
encoding_dim = 2
dim = 4
x_train = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
x_test = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
y_train = np.array([0, 0, 0, 1])
from keras.models import Sequential
from keras.layers import Dense
autoencoder = tf.keras.models.Sequential()
autoencoder.add(Dense(encoding_dim, input_dim=dim))
autoencoder.add(Activation(custom_activation, name='ReLu3_1'))
autoencoder.add(Dense(dim, input_dim = encoding_dim))
autoencoder.add(Activation(custom_activation, name='ReLu3_2'))
settings = {}
settings['batch_size'] = 4
settings['hess_batch_size'] = 4
problem = AutoencoderProblem(autoencoder,dtype=tf.float32)
settings['tikhonov_gamma'] = 0.0
regularization = L2Regularization(problem,gamma = settings['tikhonov_gamma'])
# Instante the data object
train_data = {problem.x:x_train}
validation_data = {problem.x:x_test}
data = Data(train_data,settings['batch_size'],\
validation_data = validation_data,hessian_batch_size = settings['hess_batch_size'])
HLModelSettings = HessianlearnModelSettings()
HLModelSettings['optimizer'] = 'adam'
HLModelSettings['fixed_step'] = False
HLModelSettings['max_sweeps'] = 400
HLModelSettings['alpha'] = 5e-2
HLModelSettings['printing_sweep_frequency'] = 10
HLModel = HessianlearnModel(problem,regularization,data,settings = HLModelSettings)
HLModel.fit()
...@@ -44,12 +44,6 @@ settings['batch_size'] = 4 ...@@ -44,12 +44,6 @@ settings['batch_size'] = 4
settings['hess_batch_size'] = 4 settings['hess_batch_size'] = 4
encoding_dim = 2
input_img = tf.keras.layers.Input(shape=(4,))
encoded = tf.keras.layers.Dense(encoding_dim, activation='softplus')(input_img)
decoded = tf.keras.layers.Dense(4, activation='sigmoid')(encoded)
autoencoder = tf.keras.models.Model(input_img, decoded)
problem = AutoencoderProblem(autoencoder,dtype=tf.float32) problem = AutoencoderProblem(autoencoder,dtype=tf.float32)
settings['tikhonov_gamma'] = 0.0 settings['tikhonov_gamma'] = 0.0
......
import numpy as np
import os, sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['KMP_DUPLICATE_LIB_OK']='True'
os.environ["KMP_WARNINGS"] = "FALSE"
import tensorflow as tf
import time
# Import hessianlearn repository
sys.path.append( os.environ.get('HESSIANLEARN_PATH', "../../"))
from hessianlearn import *
# Custom activation function
from keras.layers import Activation
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
def custom_activation(x):
return (K.relu(x) ** 3)
get_custom_objects().update({'custom_activation': Activation(custom_activation)})
print("This is the main_INCG_Autoencoder_ReLu3.py file")
encoding_dim = 2
dim = 4
x_train = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
x_test = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])
y_train = np.array([0, 0, 0, 1])
from keras.models import Sequential
from keras.layers import Dense
autoencoder = tf.keras.models.Sequential()
autoencoder.add(Dense(encoding_dim, input_dim=dim))
autoencoder.add(Activation(custom_activation, name='ReLu3_1'))
autoencoder.add(Dense(dim, input_dim = encoding_dim))
autoencoder.add(Activation(custom_activation, name='ReLu3_2'))
settings = {}
settings['batch_size'] = 4
settings['hess_batch_size'] = 4
problem = AutoencoderProblem(autoencoder,dtype=tf.float32)
settings['tikhonov_gamma'] = 0.0
regularization = L2Regularization(problem,gamma = settings['tikhonov_gamma'])
# Instante the data object
train_data = {problem.x:x_train}
validation_data = {problem.x:x_test}
data = Data(train_data,settings['batch_size'],\
validation_data = validation_data,hessian_batch_size = settings['hess_batch_size'])
HLModelSettings = HessianlearnModelSettings()
HLModelSettings['optimizer'] = 'lrsfn'
HLModelSettings['fixed_step'] = False
HLModelSettings['max_sweeps'] = 400
HLModelSettings['alpha'] = 5e-2
HLModelSettings['printing_sweep_frequency'] = 10
HLModelSettings['hessian_low_rank'] = 2
HLModel = HessianlearnModel(problem,regularization,data,settings = HLModelSettings)
HLModel.fit()
...@@ -42,7 +42,7 @@ data = Data(train_data,settings['batch_size'],\ ...@@ -42,7 +42,7 @@ data = Data(train_data,settings['batch_size'],\
HLModelSettings = HessianlearnModelSettings() HLModelSettings = HessianlearnModelSettings()
HLModelSettings['optimizer'] = 'gd' HLModelSettings['optimizer'] = 'gd'
HLModelSettings['fixed_step'] = False HLModelSettings['fixed_step'] = False
HLModelSettings['max_sweeps'] = 10000 HLModelSettings['max_sweeps'] = 400
HLModelSettings['alpha'] = 5e-2 HLModelSettings['alpha'] = 5e-2
HLModelSettings['printing_sweep_frequency'] = 10 HLModelSettings['printing_sweep_frequency'] = 10
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment