ki-Praktikum-MS2/OwnModel.py
2023-06-06 17:38:22 +02:00

74 lines
No EOL
2.3 KiB
Python

#Import libaries and datasets
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import pandas as pd
import seaborn as sns
import pickle
import random
import os
import numpy as np
from keras.utils import np_utils
with open(os.path.join("dataset", "train.p"), mode='rb') as training_data:
train = pickle.load(training_data)
with open(os.path.join("dataset", "valid.p"), mode='rb') as validation_data:
valid = pickle.load(validation_data)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
X_valid, y_valid = shuffle(X_valid, y_valid)
# Normalize image to [0, 1]
X_train_norm = X_train / 255
X_valid_norm = X_valid / 255
#Wähle Klassen 0-19
mask = np.isin(y_train, range(20))
X_train_subset = X_train_norm[mask]
y_train_subset = y_train[mask]
mask = np.isin(y_valid, range(20))
X_valid_subset = X_valid_norm[mask]
y_valid_subset = y_valid[mask]
num_classes = 20 # Anzahl der Straßenschilder-Klassen
y_train_subset = np_utils.to_categorical(y_train_subset, num_classes)
y_valid_subset = np_utils.to_categorical(y_valid_subset, num_classes)
from tensorflow.keras import datasets, layers, models
model = models.Sequential()
# Only in the first layer you have to select the input_shape of the data (image).
# TODO: Replace the question marks:
model.add(layers.Conv2D( filters = 2 , kernel_size = ( 3 , 3 ), padding = "same" , activation = 'relu' , input_shape = ( 32 , 32 , 3)))
# TODO: Add layers to the model:
model.add(layers.AveragePooling2D(pool_size=(2, 2),strides=(1, 1), padding='same'))
model.add(layers.MaxPool2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None))
model.add(layers.Dropout(.2, input_shape=(2,)))
model.add(layers.Flatten())
model.add(layers.Dense(20, activation='softmax'))
# Prints a summary of your network
model.summary()
model.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# TODO: Choose the batch size and the epochs
history = model.fit(x = X_train_subset,
y = y_train_subset,
batch_size = 32,
epochs = 500,
verbose = 1,
validation_data = (X_valid_subset, y_valid_subset))
model.save('saved_model/ownModel.h5')