tensorflow-keras-cpu-gpu/02. Simple Neural Network t...

10 KiB
Raw Permalink Blame History

In [5]:
import matplotlib
%matplotlib inline
matplotlib.rcParams['figure.figsize'] = (20, 10)
matplotlib.rcParams['font.size'] = 24
matplotlib.rcParams['lines.linewidth'] = 5
matplotlib.rcParams['lines.markersize'] = 20

import tensorflow as tf
from tensorflow.python.client import device_lib

import time
import numpy as np
from matplotlib import pyplot
from collections import defaultdict
from ipy_table import make_table, set_row_style, set_column_style
from jupyter_progressbar import ProgressBar

from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Activation
from keras.optimizers import SGD, Adam
from keras.layers import Dense, Dropout, Flatten
from keras.regularizers import l1, l2
from keras import regularizers
import numpy
import pickle
import tensorflow as tf
In [6]:
devices = device_lib.list_local_devices()

make_table([["name", "type"]] + [
    [device.name, device.device_type]
    for device in devices
])
set_row_style(0, bold=True)
Out[6]:
nametype
/device:CPU:0CPU
/device:GPU:0GPU
In [11]:
results = defaultdict(dict)

for device in devices:
    with tf.device(device.name):
        with open("CatsAndDogs.pickle", 'rb') as f:
            data = pickle.load(f)

        X = data["dataset"]
        y = data["labels"]
        del data

        (X_train, X_test, y_train, y_test) = train_test_split(
            X, y, test_size = 0.25, random_state = 42)

        model = Sequential()
        model.add(Flatten(input_shape=X_train.shape[1:]))

        model.add(Dense(768, kernel_initializer="uniform", activation="relu"))
        model.add(Dropout(0.2))

        model.add(Dense(384, kernel_initializer="uniform", activation="relu"))
        model.add(Dropout(0.2))

        model.add(Dense(128, kernel_initializer="uniform", activation="relu"))
        model.add(Dropout(0.2))

        model.add(Dense(2, activation="softmax"))

        model.compile(loss="binary_crossentropy", optimizer=Adam(lr=0.001), metrics=["accuracy"])
        
        t0 = time.time()
        model.fit(X_train, y_train, validation_split=0.25, epochs=5, batch_size=128, verbose=0)
        t1 = time.time()
        (loss, accuracy) = model.evaluate(X_test, y_test, verbose=0)
        t2 = time.time()
        
    results[device.name]['training'] = t1 - t0
    results[device.name]['evaluating'] = t2 - t1
6250/6250 [==============================] - 1s 167us/step
6250/6250 [==============================] - 0s 60us/step
In [12]:
make_table([["device", "execution time (s) training", "execution time (s) evaluating"]] + [
    [device, result['training'], result['evaluating']]
    for device, result in results.items()
])
set_column_style(0, bold=True, align='right')
set_row_style(0, bold=True, align='right')
Out[12]:
deviceexecution time (s) trainingexecution time (s) evaluating
/device:GPU:08.33810.3781
/device:CPU:038.56781.0451