-
Notifications
You must be signed in to change notification settings - Fork 6
/
keras_train.py
170 lines (137 loc) · 5.92 KB
/
keras_train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import os
import time
import numpy as np
# from matplotlib import pyplot as plt
from keras.callbacks import ModelCheckpoint, CSVLogger
from keras.models import load_model
from sklearn.metrics import classification_report
from models.firenet_tf import firenet_tf
from utils.dataset import load_dataset
from utils.dataset import load_firenet_test_dataset
from utils.dataset import preprocess
from utils.training import plot_history
from contextlib import redirect_stdout
# Set a seed value
seed_value= 666
# 1. Set `PYTHONHASHSEED` environment variable at a fixed value
os.environ['PYTHONHASHSEED']=str(seed_value)
# 2. Set `python` built-in pseudo-random generator at a fixed value
import random
random.seed(seed_value)
# 3. Set `numpy` pseudo-random generator at a fixed value
np.random.seed(seed_value)
# 4. Set `tensorflow` pseudo-random generator at a fixed value
import tensorflow as tf
tf.set_random_seed(seed_value)
# 5. For layers that introduce randomness like dropout, make sure to set seed values
# model.add(Dropout(0.25, seed=seed_value))
#6 Configure a new global `tensorflow` session
from keras import backend as K
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
must_train = True
must_test = True
base_model = 'firenet_tf'
dataset = 'fismo_black' # int(time.time())
ds_folder, get_dataset = load_dataset(dataset)
save_path = os.path.join('.', 'models', 'saved', base_model, dataset)
if not os.path.exists(save_path):
os.makedirs(save_path)
### Training
if must_train:
ds_path = os.path.join('.', 'datasets', ds_folder)
x_train, y_train, x_val, y_val = get_dataset(ds_path, resize=(64,64))
# Normalize data.
x_train = preprocess(x_train)
x_val = preprocess(x_val)
# summary
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(y_train[y_train==1].shape[0], 'fire')
print(y_train[y_train==0].shape[0], 'no_fire')
print('x_val shape:', x_val.shape)
print(x_val.shape[0], 'test samples')
print(y_val[y_val==1].shape[0], 'fire')
print(y_val[y_val==0].shape[0], 'no_fire')
num_classes = 2
input_shape = x_train.shape[1:]
print('num_classes', num_classes, 'input_shape', input_shape)
# Convert class vectors to binary class matrices.
# y_train = utils.to_categorical(y_train, num_classes)
# y_val = utils.to_categorical(y_val, num_classes)
def prepare_callbacks(save_dir, suffix):
# Prepare model model saving directory.
model_name = 'model_%s.h5' % suffix
history_name = 'history.csv'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
historypath = os.path.join(save_dir, history_name)
# Prepare callbacks for saving and for learning rate adjustment.
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True)
csv_logger = CSVLogger(filename=historypath,
separator=',',
append=False)
return [csv_logger, checkpoint]
# end prepare_callbacks
model = firenet_tf(input_shape=input_shape)
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
# exit()
#
print('Initiating training, models will be saved at {}'.format(save_path))
time_elapsed = 0
since = time.time()
with open(os.path.join(save_path, 'training.log'), 'a+') as f:
with redirect_stdout(f):
# since = time.time()
history = model.fit(x_train, y_train, batch_size=32, epochs=100,
validation_data=(x_val, y_val), callbacks=prepare_callbacks(save_path, base_model))
best_idx = np.argmax(history.history['val_acc'])
best_acc = history.history['val_acc'][best_idx]
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best accuracy on epoch {}: {:4f}'.format(best_idx, best_acc))
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best accuracy on epoch {}: {:4f}'.format(best_idx, best_acc))
plot_history(history.history, folder_path=save_path)
### Test
if must_test:
firenet_path = os.path.join('.', 'datasets', 'FireNetDataset')
x_test, y_test = load_firenet_test_dataset(firenet_path, resize=(64,64))
# Normalize data.
x_test = preprocess(x_test)
# summary
print('x_test shape:', x_test.shape)
print(x_test.shape[0], 'test samples')
print(y_test[y_test==1].shape[0], 'fire')
print(y_test[y_test==0].shape[0], 'no_fire')
num_classes = 2
input_shape = x_test.shape[1:]
print('num_classes', num_classes, 'input_shape', input_shape)
model = firenet_tf(input_shape=input_shape)
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.load_weights(os.path.join(save_path, 'model_{}.h5'.format(base_model)))
score = model.evaluate(x_test, y_test, verbose=2)
#Confusion Matrix and Classification Report
y_score = model.predict(x_test, verbose=0)
y_class = np.argmax(y_score, axis=1)
# Y_test = np.argmax(y_test, axis=1)
target_names = ['No Fire', 'Fire']
class_report = classification_report(y_test, y_class,
target_names=target_names)#, output_dict=True)
with open(os.path.join(save_path, 'test.log'), 'a+') as f:
with redirect_stdout(f):
print(score)
print('Classification Report')
print(class_report)