Commit 6bfef82d authored by Eva Lina Fesefeldt's avatar Eva Lina Fesefeldt
Browse files

Vergleich hess Incg Plots

parent 8a83923e

Too many changes to show.

To preserve performance only 1000 of 1000+ files are displayed.
import numpy as np
from scipy.special import comb
import matplotlib.pyplot as plt
import math
# Finde alle Vektoren x \in {0,1}^n mit genau 5 Einträgen = 1 (weiß)
# Gibt eine Matrix der Größe (N,n) zurück, deren Zeilen die gesuchten Vektoren sind
......@@ -83,5 +85,5 @@ def generate_tictactoe():
X = np.zeros((N,n))
X = find_combinations(n,k).astype(float)
labels = tictactoe_labels(X)
return X.astype('float32'), labels.astype('float32')
return X, labels
import tensorflow as tf
import numpy as np
def reelles_skalarprodukt_trainable_shape(v_1, v_2):
sum = np.array([0]).astype('float32')
for i in range(len(v_1)):
sum += np.sum(v_1[i]*v_2[i])
return sum
# Todo umschreiben allgemeines Model
def vector_flat_shape_to_trainable_shape(v):
try:
p,m = v.shape
except:
p = v.shape[0]
n = int((p-3)/13) # Größe der versteckten Schicht
slice1 = 9*n
slice2 = 9*n+n
slice3 = 9*n+n+n*3
v1 = tf.reshape(v[:slice1], (9,n))
v2 = tf.reshape(v[slice1:slice2], (n,))
v3 = tf.reshape(v[slice2:slice3], (n,3))
v4 = tf.reshape(v[slice3:], (3,))
unit_vector_trainable_shape = [v1, v2, v3, v4]
return unit_vector_trainable_shape
# Todo umschreiben für allgemeines n, allgemeines Model
def vector_trainable_shape_to_flat_shape(list):
p = 0
#Number of Parameters p
for i in range(len(list)):
try:
n,m = list[i].shape
except:
n = list[i].shape[0]
m = 1
finally:
p += n*m
n = int((p-3)/13) # Größe der versteckten Schicht
v1 = list[0]
v2 = list[1]
v3 = list[2]
v4 = list[3]
slice1 = 9*n
slice2 = 9*n+n
slice3 = 9*n+n+n*3
v = np.zeros((p,))
v[:slice1] = np.reshape(v1, (9*n,))
v[slice1:slice2] = np.reshape(v2, (n,))
v[slice2:slice3] = np.reshape(v3, (3*n,))
v[slice3:] = np.reshape(v4, (3,))
return v
def matrix_trainable_shape_to_flat_shape(model, h):
layer1 = model.layers[0]
layer2 = model.layers[1]
n_params = tf.reduce_prod(layer1.kernel.shape) + tf.reduce_prod(layer2.kernel.shape) + tf.reduce_prod(layer1.bias.shape) + tf.reduce_prod(layer2.bias.shape)
#h[0] ist die Ableitung des Gradienten nach den Gewichten Layer 1
n_params_D_weights_1 = tf.reduce_prod(layer1.kernel.shape)
H_weights_1 = tf.reshape(h[0], [n_params, n_params_D_weights_1])
#h[1] ist die Ableitung des Gradienten nach den Biasen Layer 1
n_params_D_bias_1 = tf.reduce_prod(layer1.bias.shape)
H_bias_1 = tf.reshape(h[1], [n_params, n_params_D_bias_1])
#h[2] ist die Ableitung des Gradienten nach den Gewichten Layer 2
n_params_D_weights_2 = tf.reduce_prod(layer2.kernel.shape)
H_weights_2 = tf.reshape(h[2], [n_params, n_params_D_weights_2])
#h[3] ist die Ableitung des Gradienten nach den Biasen Layer 2
n_params_D_bias_2 = tf.reduce_prod(layer2.bias.shape)
H_bias_2 = tf.reshape(h[3], [n_params, n_params_D_bias_2])
# Hesse-Matrix zusammensetzen ToDo vorher allokieren
h_mat = tf.concat([H_weights_1, H_bias_1, H_weights_2, H_bias_2], axis = 1)
return h_mat
def matrix_flat_shape_to_trainable_shape(model, A):
layer1 = model.layers[0]
layer2 = model.layers[1]
n_params, m = A.shape
A_trainable = []
#A_trainable.append(tf.reshape())
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment