Final Version MS2

This commit is contained in:
Torben Zwinge 2023-06-06 17:38:22 +02:00
parent b40e937341
commit 50b71750ab
12 changed files with 161 additions and 34 deletions

6
.idea/other.xml Normal file
View file

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="PySciProjectComponent">
<option name="PY_SCI_VIEW_SUGGESTED" value="true" />
</component>
</project>

View file

@ -7,6 +7,8 @@ import seaborn as sns
import pickle import pickle
import random import random
import os import os
import numpy as np
from keras.utils import np_utils
with open(os.path.join("dataset", "train.p"), mode='rb') as training_data: with open(os.path.join("dataset", "train.p"), mode='rb') as training_data:
train = pickle.load(training_data) train = pickle.load(training_data)
@ -33,6 +35,10 @@ mask = np.isin(y_valid, range(20))
X_valid_subset = X_valid_norm[mask] X_valid_subset = X_valid_norm[mask]
y_valid_subset = y_valid[mask] y_valid_subset = y_valid[mask]
num_classes = 20 # Anzahl der Straßenschilder-Klassen
y_train_subset = np_utils.to_categorical(y_train_subset, num_classes)
y_valid_subset = np_utils.to_categorical(y_valid_subset, num_classes)
from tensorflow.keras import datasets, layers, models from tensorflow.keras import datasets, layers, models
@ -43,23 +49,24 @@ model = models.Sequential()
model.add(layers.Conv2D( filters = 2 , kernel_size = ( 3 , 3 ), padding = "same" , activation = 'relu' , input_shape = ( 32 , 32 , 3))) model.add(layers.Conv2D( filters = 2 , kernel_size = ( 3 , 3 ), padding = "same" , activation = 'relu' , input_shape = ( 32 , 32 , 3)))
# TODO: Add layers to the model: # TODO: Add layers to the model:
#model.add(layers.AveragePooling2D(pool_size=(2, 2),strides=(1, 1), padding='same')) model.add(layers.MaxPooling2D(pool_size=(2, 2))) # Max-Pooling-Schicht
#model.add(layers.MaxPool2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)) model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same')) # Zweite Faltungs- und Aktivierungsschicht
#model.add(layers.Dropout(.2, input_shape=(2,))) model.add(layers.MaxPooling2D(pool_size=(2, 2))) # Max-Pooling-Schicht
model.add(layers.Flatten()) model.add(layers.Flatten()) # Flachschicht
#model.add(layers.Dense(43, activation='softmax')) model.add(layers.Dense(128, activation='relu')) # Versteckte Schicht mit 128 Neuronen und ReLU-Aktivierung
model.add(layers.Dense(20, activation='softmax')) # Ausgabeschicht mit Anzahl der Klassen und Softmax-Aktivierung
# Prints a summary of your network # Prints a summary of your network
model.summary() model.summary()
model.compile(optimizer = 'Adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy']) model.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# TODO: Choose the batch size and the epochs # TODO: Choose the batch size and the epochs
history = model.fit(x = X_train_subset, history = model.fit(x = X_train_subset,
y = y_train_subset, y = y_train_subset,
batch_size = 32, batch_size = 32,
epochs = 1000, epochs = 5,
verbose = 1, verbose = 1,
validation_data = (X_valid_subset, y_valid_subset)) validation_data = (X_valid_subset, y_valid_subset))

View file

@ -7,6 +7,8 @@ import seaborn as sns
import pickle import pickle
import random import random
import os import os
import numpy as np
from keras.utils import np_utils
with open(os.path.join("dataset", "train.p"), mode='rb') as training_data: with open(os.path.join("dataset", "train.p"), mode='rb') as training_data:
train = pickle.load(training_data) train = pickle.load(training_data)
@ -33,33 +35,31 @@ mask = np.isin(y_valid, range(20))
X_valid_subset = X_valid_norm[mask] X_valid_subset = X_valid_norm[mask]
y_valid_subset = y_valid[mask] y_valid_subset = y_valid[mask]
num_classes = 20 # Anzahl der Straßenschilder-Klassen
y_train_subset = np_utils.to_categorical(y_train_subset, num_classes)
y_valid_subset = np_utils.to_categorical(y_valid_subset, num_classes)
from tensorflow.keras import datasets, layers, models from tensorflow.keras import datasets, layers, models
model = models.Sequential() model = models.Sequential()
# Only in the first layer you have to select the input_shape of the data (image).
# TODO: Replace the question marks:
model.add(layers.Conv2D( filters = 2 , kernel_size = ( 3 , 3 ), padding = "same" , activation = 'relu' , input_shape = ( 32 , 32 , 3)))
# TODO: Add layers to the model: # TODO: Add layers to the model:
model.add(layers.AveragePooling2D(pool_size=(2, 2),strides=(1, 1), padding='same')) model.add(layers.Flatten(input_shape=(32, 32, 3))) # Eingabeschicht, Bildgröße: 32x32, 3 Kanäle (RGB)
model.add(layers.MaxPool2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)) model.add(layers.Dense(128, activation='relu')) # Versteckte Schicht mit 128 Neuronen und ReLU-Aktivierung
model.add(layers.Dropout(.2, input_shape=(2,))) model.add(layers.Dense(20, activation='softmax')) # Ausgabeschicht mit Anzahl der Klassen und Softmax-Aktivierung
model.add(layers.Flatten())
model.add(layers.Dense(43, activation='softmax'))
# Prints a summary of your network # Prints a summary of your network
model.summary() model.summary()
model.compile(optimizer = 'Adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy']) model.compile(optimizer = 'rmsprop', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# TODO: Choose the batch size and the epochs # TODO: Choose the batch size and the epochs
history = model.fit(x = X_train_subset, history = model.fit(x = X_train_subset,
y = y_train_subset, y = y_train_subset,
batch_size = 32, batch_size = 32,
epochs = 1500, epochs = 5,
verbose = 1, verbose = 1,
validation_data = (X_valid_subset, y_valid_subset)) validation_data = (X_valid_subset, y_valid_subset))

View file

@ -7,6 +7,8 @@ import seaborn as sns
import pickle import pickle
import random import random
import os import os
import numpy as np
from keras.utils import np_utils
with open(os.path.join("dataset", "train.p"), mode='rb') as training_data: with open(os.path.join("dataset", "train.p"), mode='rb') as training_data:
train = pickle.load(training_data) train = pickle.load(training_data)
@ -33,6 +35,10 @@ mask = np.isin(y_valid, range(20))
X_valid_subset = X_valid_norm[mask] X_valid_subset = X_valid_norm[mask]
y_valid_subset = y_valid[mask] y_valid_subset = y_valid[mask]
num_classes = 20 # Anzahl der Straßenschilder-Klassen
y_train_subset = np_utils.to_categorical(y_train_subset, num_classes)
y_valid_subset = np_utils.to_categorical(y_valid_subset, num_classes)
from tensorflow.keras import datasets, layers, models from tensorflow.keras import datasets, layers, models
@ -47,19 +53,19 @@ model.add(layers.AveragePooling2D(pool_size=(2, 2),strides=(1, 1), padding='same
model.add(layers.MaxPool2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)) model.add(layers.MaxPool2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None))
model.add(layers.Dropout(.2, input_shape=(2,))) model.add(layers.Dropout(.2, input_shape=(2,)))
model.add(layers.Flatten()) model.add(layers.Flatten())
#model.add(layers.Dense(43, activation='softmax')) model.add(layers.Dense(20, activation='softmax'))
# Prints a summary of your network # Prints a summary of your network
model.summary() model.summary()
model.compile(optimizer = 'Adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy']) model.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# TODO: Choose the batch size and the epochs # TODO: Choose the batch size and the epochs
history = model.fit(x = X_train_subset, history = model.fit(x = X_train_subset,
y = y_train_subset, y = y_train_subset,
batch_size = 32, batch_size = 32,
epochs = 1000, epochs = 500,
verbose = 1, verbose = 1,
validation_data = (X_valid_subset, y_valid_subset)) validation_data = (X_valid_subset, y_valid_subset))

53
TestMitTest.py Normal file
View file

@ -0,0 +1,53 @@
#Import libaries and datasets
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import pandas as pd
import seaborn as sns
import pickle
import random
import os
import numpy as np
from keras.utils import np_utils
with open(os.path.join("test", "test.p"), mode='rb') as validation_data:
valid = pickle.load(validation_data)
X_valid, y_valid = valid['features'], valid['labels']
from sklearn.utils import shuffle
X_valid, y_valid = shuffle(X_valid, y_valid)
# Normalize image to [0, 1]
X_valid_norm = X_valid / 255
#Wähle Klassen 0-19
mask = np.isin(y_valid, range(20))
X_valid_subset = X_valid_norm[mask]
y_valid_subset = y_valid[mask]
num_classes = 20 # Anzahl der Straßenschilder-Klassen
y_valid_subset = np_utils.to_categorical(y_valid_subset, num_classes)
#ownModel
#convolutionalNeuralNetwork
#fullyConnectedNeuralNetwork
model = tf.keras.models.load_model('saved_model/fullyConnectedNeuralNetwork.h5')
score = model.evaluate(X_valid_subset, y_valid_subset)
print('Test Accuracy: {}'.format(score[1]))
from sklearn.metrics import confusion_matrix
predicted_classes = np.argmax(model.predict(X_valid_subset), axis=-1)
y_true = np.argmax(y_valid_subset, axis=-1)
cm = confusion_matrix(y_true, predicted_classes)
plt.figure(figsize = (25, 25))
sns.heatmap(cm, annot = True)
plt.show()

View file

@ -7,21 +7,18 @@ import seaborn as sns
import pickle import pickle
import random import random
import os import os
import numpy as np
from keras.utils import np_utils
with open(os.path.join("dataset", "train.p"), mode='rb') as training_data:
train = pickle.load(training_data)
with open(os.path.join("dataset", "valid.p"), mode='rb') as validation_data: with open(os.path.join("dataset", "valid.p"), mode='rb') as validation_data:
valid = pickle.load(validation_data) valid = pickle.load(validation_data)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels'] X_valid, y_valid = valid['features'], valid['labels']
from sklearn.utils import shuffle from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
X_valid, y_valid = shuffle(X_valid, y_valid) X_valid, y_valid = shuffle(X_valid, y_valid)
# Normalize image to [0, 1] # Normalize image to [0, 1]
X_train_norm = X_train / 255
X_valid_norm = X_valid / 255 X_valid_norm = X_valid / 255
#Wähle Klassen 0-19 #Wähle Klassen 0-19
@ -29,6 +26,10 @@ mask = np.isin(y_valid, range(20))
X_valid_subset = X_valid_norm[mask] X_valid_subset = X_valid_norm[mask]
y_valid_subset = y_valid[mask] y_valid_subset = y_valid[mask]
num_classes = 20 # Anzahl der Straßenschilder-Klassen
y_valid_subset = np_utils.to_categorical(y_valid_subset, num_classes)
#ownModel #ownModel
#convolutionalNeuralNetwork #convolutionalNeuralNetwork
@ -42,7 +43,7 @@ print('Test Accuracy: {}'.format(score[1]))
from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix
predicted_classes = np.argmax(model.predict(X_valid_subset), axis=-1) predicted_classes = np.argmax(model.predict(X_valid_subset), axis=-1)
y_true = y_valid_subset y_true = np.argmax(y_valid_subset, axis=-1)
cm = confusion_matrix(y_true, predicted_classes) cm = confusion_matrix(y_true, predicted_classes)

54
TestOnePictureTest.py Normal file
View file

@ -0,0 +1,54 @@
#Import libaries and datasets
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import pandas as pd
import seaborn as sns
import pickle
import random
import os
import numpy as np
from keras.utils import np_utils
with open(os.path.join("test", "test.p"), mode='rb') as validation_data:
valid = pickle.load(validation_data)
X_valid, y_valid = valid['features'], valid['labels']
from sklearn.utils import shuffle
X_valid, y_valid = shuffle(X_valid, y_valid)
# Normalize image to [0, 1]
X_valid_norm = X_valid / 255
#TakeOnePicture (Klasse 0-19)
filtered_indices = [i for i, label in enumerate(y_valid) if label >= 0 and label <= 19]
indice = random.choice(filtered_indices)
X_valid_subset = X_valid[indice][np.newaxis, ...]
y_valid_subset = y_valid[indice][np.newaxis, ...]
num_classes = 20 # Anzahl der Straßenschilder-Klassen
y_valid_subset = np_utils.to_categorical(y_valid_subset, num_classes)
#ownModel
#convolutionalNeuralNetwork
#fullyConnectedNeuralNetwork
model = tf.keras.models.load_model('saved_model/fullyConnectedNeuralNetwork.h5')
score = model.evaluate(X_valid_subset, y_valid_subset)
print('Test Accuracy: {}'.format(score[1]))
from sklearn.metrics import confusion_matrix
predicted_classes = np.argmax(model.predict(X_valid_subset), axis=-1)
y_true = np.argmax(y_valid_subset, axis=-1)
cm = confusion_matrix(y_true, predicted_classes)
plt.figure(figsize = (25, 25))
sns.heatmap(cm, annot = True)
plt.show()

View file

@ -7,21 +7,18 @@ import seaborn as sns
import pickle import pickle
import random import random
import os import os
import numpy as np
from keras.utils import np_utils
with open(os.path.join("dataset", "train.p"), mode='rb') as training_data:
train = pickle.load(training_data)
with open(os.path.join("dataset", "valid.p"), mode='rb') as validation_data: with open(os.path.join("dataset", "valid.p"), mode='rb') as validation_data:
valid = pickle.load(validation_data) valid = pickle.load(validation_data)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels'] X_valid, y_valid = valid['features'], valid['labels']
from sklearn.utils import shuffle from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
X_valid, y_valid = shuffle(X_valid, y_valid) X_valid, y_valid = shuffle(X_valid, y_valid)
# Normalize image to [0, 1] # Normalize image to [0, 1]
X_train_norm = X_train / 255
X_valid_norm = X_valid / 255 X_valid_norm = X_valid / 255
#TakeOnePicture (Klasse 0-19) #TakeOnePicture (Klasse 0-19)
@ -29,7 +26,10 @@ filtered_indices = [i for i, label in enumerate(y_valid) if label >= 0 and label
indice = random.choice(filtered_indices) indice = random.choice(filtered_indices)
X_valid_subset = X_valid[indice][np.newaxis, ...] X_valid_subset = X_valid[indice][np.newaxis, ...]
y_valid_subset = y_valid[indice][np.newaxis, ...] y_valid_subset = y_valid[indice][np.newaxis, ...]
print(y_valid_subset)
num_classes = 20 # Anzahl der Straßenschilder-Klassen
y_valid_subset = np_utils.to_categorical(y_valid_subset, num_classes)
#ownModel #ownModel
@ -44,7 +44,7 @@ print('Test Accuracy: {}'.format(score[1]))
from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix
predicted_classes = np.argmax(model.predict(X_valid_subset), axis=-1) predicted_classes = np.argmax(model.predict(X_valid_subset), axis=-1)
y_true = y_valid_subset y_true = np.argmax(y_valid_subset, axis=-1)
cm = confusion_matrix(y_true, predicted_classes) cm = confusion_matrix(y_true, predicted_classes)

Binary file not shown.

BIN
test/test.p Normal file

Binary file not shown.