Skip to content
Snippets Groups Projects
Commit bafce188 authored by Jamie's avatar Jamie
Browse files

Removed Vertical Flipping

parent 2ec95613
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id: tags:
``` python
#Imports
import numpy as np
import os
import PIL as pillow # used for image resizing
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.optimizers import Adadelta
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import shutil
```
%% Cell type:code id: tags:
``` python
# Data Pre-processing function declared here
def make_train_test_files(input_folder, tag):
#Make the training and testing paths
os.mkdir(str(train_path + "\\" + tag))
os.mkdir(str(test_path + "\\" + tag))
train_count = 0
test_count = 0
test_flag = 0
# Get all the files and shuffle them for random selections of training and testing
all_files = os.listdir(input_folder)
random.shuffle(all_files)
if len(all_files) < (train_size + test_size): # Going to need to augment some files
augment_count = (train_size + test_size) - len(all_files) # number needed to augment
for filename in all_files:
if test_flag == 0:
if (os.path.getsize(input_folder+ "\\" + filename) < 7115): # Removes any of the files smaller than 7kb, junk files
continue
image = pillow.Image.open(input_folder+ "\\" + filename)
if train_count < augment_count:
image2 = image
rand = random.randint(0, 360)
image2 = image2.rotate(rand)
rand2 = random.randint(0,2)
rand2 = random.randint(0,1)
if rand2 == 1:
image2 = image2.transpose(pillow.Image.FLIP_LEFT_RIGHT)
elif rand2 ==2:
image2 = image2.transpose(pillow.Image.FLIP_TOP_BOTTOM)
else:
pass
image2 = image2.resize(dimensions)
image2.save(train_path + "\\" + tag + "\\" + "aug" + filename)
train_count = train_count + 1
image = image.resize(dimensions)
image.save(train_path + "\\" + tag + "\\" + filename)
train_count = train_count + 1
if train_count == train_size: # got enough training files
test_flag = 1
else:
if (os.path.getsize(input_folder+ "\\" + filename) < 7115): # Removes any of the files smaller than 7kb, junk files
continue # stops the loop and moves on to next file without altering the training count
image = pillow.Image.open(input_folder+ "\\" + filename)
image = image.resize(dimensions)
image.save(test_path + "\\" + tag + "\\" + filename)
del image
test_count = test_count+1
if test_count == test_size: # got enough training files
return
# No need to augment files
else:
for filename in all_files: # Get the files
if test_flag == 0:
if (os.path.getsize(input_folder+ "\\" + filename) < 7115): # Removes any of the files smaller than 7kb, junk files
continue # stops the loop and moves on to next file without altering the training count
image = pillow.Image.open(input_folder+ "\\" + filename)
image = image.resize(dimensions)
image.save(train_path + "\\" + tag + "\\" + filename)
del image
train_count = train_count+1
if train_count == train_size: # got enough training files
test_flag = 1
else:
if (os.path.getsize(input_folder+ "\\" + filename) < 7115): # Removes any of the files smaller than 7kb, junk files
continue # stops the loop and moves on to next file without altering the training count
image = pillow.Image.open(input_folder+ "\\" + filename)
image = image.resize(dimensions)
image.save(test_path + "\\" + tag + "\\" + filename)
del image
test_count = test_count+1
if test_count == test_size: # got enough training files
return
```
%% Cell type:code id: tags:
``` python
# Import the data / Preprocess via the function
# For result replication purposes uncomment the line below and pick a suitable seed
#random.seed(42)
# These are the parameters to alter: number of training/testing sample,
# the paths defined for Training and Testing
# and the dimension all images are resized to
# Images are resized for consistency across all images
path = "Flickr" #Folder directory, if you unzip the Flickr.tar get the 'Flickr/Flickr', just change the value of path.
train_size = 1200
test_size = 400
dimensions = (450,450)
train_path = "Flickr\\train\\"
test_path = "Flickr\\test\\"
# # Create training and testing folders
# all_folders= os.listdir(path)
# if "train" in all_folders:
# shutil.rmtree(str(path+"\\"+"train"))
# os.mkdir(str(path+"\\"+"train"))
# else:
# os.mkdir(str(path+"\\"+"train"))
# if "test" in all_folders:
# shutil.rmtree(str(path+"\\"+"test"))
# os.mkdir(str(path+"\\"+"test"))
# else:
# os.mkdir(str(path+"\\"+"test"))
# # Populate the Training and Testing Folders
# make_train_test_files("Flickr\\amusement","amusement")
# make_train_test_files("Flickr\\anger","anger")
# make_train_test_files("Flickr\\awe","awe")
# make_train_test_files("Flickr\\contentment","contentment")
# make_train_test_files("Flickr\\disgust","disgust")
# make_train_test_files("Flickr\\excitement","excitement")
# make_train_test_files("Flickr\\fear","fear")
# make_train_test_files("Flickr\\sadness","sadness")
```
%% Cell type:code id: tags:
``` python
# This is the code to bring the images into the system ready for Keras
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
batchsize = 32
train_generator = train_datagen.flow_from_directory(
train_path,
target_size=(96,96),
# batch_size=128,
color_mode="rgb",
class_mode='categorical')
validation_generator = val_datagen.flow_from_directory(
test_path,
target_size=(96,96),
# batch_size=128,
color_mode="rgb",
class_mode='categorical')
```
%% Output
Found 9600 images belonging to 8 classes.
Found 2667 images belonging to 8 classes.
%% Cell type:code id: tags:
``` python
# Build the Keras Model here
# emotion_model = Sequential()
# emotion_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,3)))
# emotion_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))
# emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
# emotion_model.add(Dropout(0.25))
# emotion_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))
# emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
# emotion_model.add(Conv2D(16, kernel_size=(3, 3), activation='relu'))
# emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
# emotion_model.add(Dropout(0.25))
# emotion_model.add(Flatten())
# emotion_model.add(Dense(256, activation='relu'))
# emotion_model.add(Dropout(0.5))
# emotion_model.add(Dense(8, activation='softmax')) # This needs to be here at the end!
# emotion_model.summary()
emotion_model = Sequential()
#Do some CNN stuff
emotion_model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(96,96,3),padding="valid"))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Conv2D(128, kernel_size=(3, 3), activation='relu',padding="valid"))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Conv2D(256, kernel_size=(3, 3), activation='relu',padding="valid"))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Flatten())
#Do som dense layers here
emotion_model.add(Dense(512, activation='relu'))
emotion_model.add(Dropout(0.5))
emotion_model.add(Dense(8, activation='softmax')) # This needs to be here at the end!
emotion_model.summary()
```
%% Output
Model: "sequential_5"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_15 (Conv2D) (None, 94, 94, 64) 1792
_________________________________________________________________
max_pooling2d_15 (MaxPooling (None, 47, 47, 64) 0
_________________________________________________________________
conv2d_16 (Conv2D) (None, 45, 45, 128) 73856
_________________________________________________________________
max_pooling2d_16 (MaxPooling (None, 22, 22, 128) 0
_________________________________________________________________
conv2d_17 (Conv2D) (None, 20, 20, 256) 295168
_________________________________________________________________
max_pooling2d_17 (MaxPooling (None, 10, 10, 256) 0
_________________________________________________________________
flatten_5 (Flatten) (None, 25600) 0
_________________________________________________________________
dense_10 (Dense) (None, 512) 13107712
_________________________________________________________________
dropout_5 (Dropout) (None, 512) 0
_________________________________________________________________
dense_11 (Dense) (None, 8) 4104
=================================================================
Total params: 13,482,632
Trainable params: 13,482,632
Non-trainable params: 0
_________________________________________________________________
%% Cell type:code id: tags:
``` python
# Model Training here
emotion_model.compile(loss='categorical_crossentropy',optimizer=Adam(learning_rate=0.001),metrics=['accuracy'])
emotion_model_info = emotion_model.fit(
train_generator,
steps_per_epoch=9600 // batchsize,
epochs=50,
validation_data=validation_generator,
validation_steps=2667 // batchsize)
```
%% Output
Epoch 1/50
300/300 [==============================] - 289s 962ms/step - loss: 1.9752 - accuracy: 0.2245 - val_loss: 1.9438 - val_accuracy: 0.2715
Epoch 2/50
300/300 [==============================] - 320s 1s/step - loss: 1.8511 - accuracy: 0.2875 - val_loss: 1.8180 - val_accuracy: 0.3155
Epoch 3/50
300/300 [==============================] - 366s 1s/step - loss: 1.7653 - accuracy: 0.3271 - val_loss: 1.7916 - val_accuracy: 0.3441
Epoch 4/50
300/300 [==============================] - 337s 1s/step - loss: 1.6756 - accuracy: 0.3721 - val_loss: 1.7981 - val_accuracy: 0.3347
Epoch 5/50
300/300 [==============================] - 454s 2s/step - loss: 1.5755 - accuracy: 0.4110 - val_loss: 1.7750 - val_accuracy: 0.3611
Epoch 6/50
300/300 [==============================] - 493s 2s/step - loss: 1.4259 - accuracy: 0.4774 - val_loss: 1.8246 - val_accuracy: 0.3358
Epoch 7/50
300/300 [==============================] - 297s 991ms/step - loss: 1.2324 - accuracy: 0.5568 - val_loss: 2.0196 - val_accuracy: 0.3230
Epoch 8/50
300/300 [==============================] - 323s 1s/step - loss: 1.0206 - accuracy: 0.6423 - val_loss: 2.1305 - val_accuracy: 0.3362
Epoch 9/50
300/300 [==============================] - 1049s 4s/step - loss: 0.7794 - accuracy: 0.7315 - val_loss: 2.4012 - val_accuracy: 0.3445
Epoch 10/50
300/300 [==============================] - 286s 953ms/step - loss: 0.6215 - accuracy: 0.7870 - val_loss: 2.6961 - val_accuracy: 0.3306
Epoch 11/50
300/300 [==============================] - 307s 1s/step - loss: 0.4364 - accuracy: 0.8537 - val_loss: 3.2084 - val_accuracy: 0.3189
Epoch 12/50
300/300 [==============================] - 321s 1s/step - loss: 0.3638 - accuracy: 0.8766 - val_loss: 3.3143 - val_accuracy: 0.3095
Epoch 13/50
72/300 [======>.......................] - ETA: 4:09 - loss: 0.2677 - accuracy: 0.9154
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
<ipython-input-16-e4744bd9d618> in <module>
6 epochs=50,
7 validation_data=validation_generator,
----> 8 validation_steps=2667 // batchsize)
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1181 _r=1):
1182 callbacks.on_train_batch_begin(step)
-> 1183 tmp_logs = self.train_function(iterator)
1184 if data_handler.should_sync:
1185 context.async_wait()
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
887
888 with OptionalXlaContext(self._jit_compile):
--> 889 result = self._call(*args, **kwds)
890
891 new_tracing_count = self.experimental_get_tracing_count()
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
915 # In this case we have created variables on the first call, so we run the
916 # defunned version which is guaranteed to never create variables.
--> 917 return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
918 elif self._stateful_fn is not None:
919 # Release the lock early so that multiple threads can perform the call
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs)
3022 filtered_flat_args) = self._maybe_define_function(args, kwargs)
3023 return graph_function._call_flat(
-> 3024 filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access
3025
3026 @property
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1959 # No tape is watching; skip to running the function.
1960 return self._build_call_outputs(self._inference_function.call(
-> 1961 ctx, args, cancellation_manager=cancellation_manager))
1962 forward_backward = self._select_forward_and_backward_functions(
1963 args,
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\function.py in call(self, ctx, args, cancellation_manager)
594 inputs=args,
595 attrs=attrs,
--> 596 ctx=ctx)
597 else:
598 outputs = execute.execute_with_cancellation(
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
KeyboardInterrupt:
%% Cell type:code id: tags:
``` python
# Calculate Results and such here
```
%% Cell type:code id: tags:
``` python
# Maybe add a random testing thing here about the darkness of an image with likelihood of fear being picked
```
%% Cell type:code id: tags:
``` python
#save the model
# emotion_model.save('model.h5')
```
%% Cell type:code id: tags:
``` python
# from keras.models import load_model
# emotion_model = load_model('model.h5')
```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment