Commit eff0cce1 authored by Eva Lina Fesefeldt's avatar Eva Lina Fesefeldt
Browse files

Training auf TicTacToe, Hesse-Matrix berechnet

parent 595b5496
import numpy as np
from numpy.linalg import norm
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
from generate_dataset import generate_tictactoe, show_fields
X, labels = generate_tictactoe()
norms = norm(labels, ord=1, axis=0)
show_fields(X, labels, 20)
\ No newline at end of file
def imshow_zero_center(image, n):
lim = tf.reduce_max(abs(image))
plt.imshow(image, vmin=-lim, vmax=lim, cmap='seismic')
plt.title("Hesse-Matrix für n = " + str(n))
plt.colorbar()
plt.show()
def show_eigenvalues(A, n):
plt.loglog(tf.math.real(A), tf.math.imag(A), '.')
plt.xlabel("Realteil")
plt.ylabel("Imaginärteil")
plt.title("Eigenwerte der Hesse-Matrix für n = " + str(n))
plt.show()
# Erstelle Daten und zeige einige Felder
train_set, train_labels = generate_tictactoe()
norms = norm(train_labels, ord=1, axis=0)
#show_fields(train_set, train_labels, 20)
# Eigene Aktivierungsfunktion: ReLu^3
size_hidden_layer = 50
from keras.layers import Activation
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
def custom_activation(x):
return (K.relu(x) ** 3)
get_custom_objects().update({'custom_activation': Activation(custom_activation)})
# KNN erzeugen
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(size_hidden_layer, input_dim = 9,activation='sigmoid'))
model.add(Dense(3, input_dim=size_hidden_layer, activation='softmax'))
model.summary()
dataset = tf.data.Dataset.from_tensor_slices((train_set, train_labels))
# Klassifizierer trainieren
loss_fn = tf.keras.losses.CategoricalCrossentropy()
model.compile(optimizer='adam', loss=loss_fn)
model.fit(train_set, train_labels, batch_size=32, epochs=500, verbose=0)
weights_and_bias = model.get_weights()
predictions = model.predict(train_set)
print("Loss: ", loss_fn(train_labels, predictions).numpy())
# Hesse-Matrix berechnen mit nested Gradient Tapes
layer1 = model.layers[0]
layer2 = model.layers[1]
x = train_set
with tf.GradientTape() as t2:
with tf.GradientTape() as t1:
x = layer1(x)
x = layer2(x)
loss = loss_fn(train_labels, x)
g = t1.gradient(loss, [layer1.kernel, layer1.bias, layer2.kernel, layer2.bias])
grad = tf.concat([tf.reshape(g[0], [9*size_hidden_layer,1]), tf.reshape(g[1], [size_hidden_layer,1]), tf.reshape(g[2], [size_hidden_layer*3, 1]), tf.reshape(g[3], [3,1])], axis=0)
h = t2.jacobian(grad, [layer1.kernel, layer1.bias, layer2.kernel, layer2.bias])
n_params = tf.reduce_prod(layer1.kernel.shape) + tf.reduce_prod(layer2.kernel.shape) + tf.reduce_prod(layer1.bias.shape) + tf.reduce_prod(layer2.bias.shape)
#h[0] ist die Ableitung des Gradienten nach den Gewichten Layer 1
n_params_D_weights_1 = tf.reduce_prod(layer1.kernel.shape)
H_weights_1 = tf.reshape(h[0], [n_params, n_params_D_weights_1])
#h[1] ist die Ableitung des Gradienten nach den Biasen Layer 1
n_params_D_bias_1 = tf.reduce_prod(layer1.bias.shape)
H_bias_1 = tf.reshape(h[1], [n_params, n_params_D_bias_1])
#h[2] ist die Ableitung des Gradienten nach den Gewichten Layer 2
n_params_D_weights_2 = tf.reduce_prod(layer2.kernel.shape)
H_weights_2 = tf.reshape(h[2], [n_params, n_params_D_weights_2])
#h[3] ist die Ableitung des Gradienten nach den Biasen Layer 2
n_params_D_bias_2 = tf.reduce_prod(layer2.bias.shape)
H_bias_2 = tf.reshape(h[3], [n_params, n_params_D_bias_2])
# Hesse-Matrix zusammensetzen
h_mat = tf.concat([H_weights_1, H_bias_1, H_weights_2, H_bias_2], axis = 1)
imshow_zero_center(h_mat, n=size_hidden_layer)
# Eigenwerte berechnen und plotten
eig = tf.linalg.eig(h_mat)
show_eigenvalues(eig[0], size_hidden_layer)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment