Explainable-Federated-Learn.../Attack detection via randon...

1964 lines
190 KiB
Plaintext
Raw Permalink Normal View History

2021-02-01 13:45:17 +01:00
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"#IMPORTS\n",
"\n",
"import numpy as np\n",
"import random\n",
"import tensorflow as tf\n",
"import tensorflow.keras as kr\n",
"import tensorflow.keras.backend as K\n",
"from tensorflow.keras.models import Model\n",
"from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense\n",
"from tensorflow.keras.datasets import mnist\n",
"import os\n",
"import csv\n",
"\n",
"from scipy.spatial.distance import euclidean\n",
"from sklearn.metrics import confusion_matrix\n",
"\n",
"from time import sleep\n",
"from tqdm import tqdm\n",
"\n",
"import copy\n",
"import numpy\n",
"from sklearn.datasets import make_classification\n",
"from sklearn.ensemble import RandomForestClassifier\n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"import math\n",
"import seaborn as sns\n",
"from numpy.random import RandomState\n",
"import scipy as scp\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.compose import ColumnTransformer\n",
"from sklearn.preprocessing import OneHotEncoder, LabelEncoder\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense\n",
"from keras import optimizers\n",
"from keras.callbacks import EarlyStopping,ModelCheckpoint\n",
"from keras.utils import to_categorical\n",
"from keras import backend as K\n",
"from itertools import product\n",
"from sklearn.metrics import accuracy_score\n",
"from sklearn.metrics import precision_score\n",
"from sklearn.metrics import recall_score\n",
"from sklearn.metrics import f1_score\n",
"from sklearn.metrics import roc_auc_score\n",
"from sklearn.metrics import confusion_matrix\n",
"\n",
"from sklearn import mixture\n",
"\n",
"from mpl_toolkits.mplot3d import Axes3D\n",
"import matplotlib.pyplot as plt\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# Enter here the data set you want to explain (adult, activity, or synthatic)\n",
"\n",
"data_set = 'activity'\n",
"\n",
"# Enter here the numb er of peers you want in the experiments\n",
"\n",
"n_peers = 100\n",
"\n",
"# Enter here the type of the attack (Byzantine, poisoning, label_flipping)\n",
"attack_type = 'Byzantine'\n",
"\n",
"# the targeted features in case the attack is poisoning attack \n",
"feature_attacked = [3,5,8]\n",
"\n",
"# Enter here the number of attacker peers you want, keep the number of attacker less that 1/2 of the n_peers\n",
"number_attackers = 3\n",
"\n",
"# enter here the Number of global training epochs, the start and ending epochs of the attacks\n",
"n_rounds = 10\n",
"start_attack_round = 3\n",
"end_attack_round = 7\n",
"\n",
"# the threshold for attack detection\n",
"\n",
"alpha = 1.2\n",
"\n",
"beta = 1/4"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# the random state we will use in the experiments. It can be changed \n",
"\n",
"rs = RandomState(92)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# preprocessing adults data set\n",
"\n",
"if data_set == 'adult':\n",
" #Load dataset into a pandas DataFrame\n",
" adult_data = pd.read_csv('adult_data.csv', na_values='?')\n",
" # Drop all records with missing values\n",
" adult_data.dropna(inplace=True)\n",
" adult_data.reset_index(drop=True, inplace=True)\n",
"\n",
" # Drop fnlwgt, not interesting for ML\n",
" adult_data.drop('fnlwgt', axis=1, inplace=True)\n",
" adult_data.drop('education', axis=1, inplace=True)\n",
"\n",
"# merging some similar features.\n",
" adult_data['marital-status'].replace('Married-civ-spouse', 'Married', inplace=True)\n",
" adult_data['marital-status'].replace('Divorced', 'Unmarried', inplace=True)\n",
" adult_data['marital-status'].replace('Never-married', 'Unmarried', inplace=True)\n",
" adult_data['marital-status'].replace('Separated', 'Unmarried', inplace=True)\n",
" adult_data['marital-status'].replace('Widowed', 'Unmarried', inplace=True)\n",
" adult_data['marital-status'].replace('Married-spouse-absent', 'Married', inplace=True)\n",
" adult_data['marital-status'].replace('Married-AF-spouse', 'Married', inplace=True)\n",
" \n",
" adult_data = pd.concat([adult_data,pd.get_dummies(adult_data['income'], prefix='income')],axis=1)\n",
" adult_data.drop('income', axis=1, inplace=True)\n",
" obj_columns = adult_data.select_dtypes(['object']).columns\n",
" adult_data[obj_columns] = adult_data[obj_columns].astype('category')\n",
" # Convert numerics to floats and normalize\n",
" num_columns = adult_data.select_dtypes(['int64']).columns\n",
" adult_data[num_columns] = adult_data[num_columns].astype('float64')\n",
" for c in num_columns:\n",
" #adult[c] -= adult[c].mean()\n",
" #adult[c] /= adult[c].std()\n",
" adult_data[c] = (adult_data[c] - adult_data[c].min()) / (adult_data[c].max()-adult_data[c].min())\n",
" # 'workclass', 'marital-status', 'occupation', 'relationship' ,'race', 'gender', 'native-country'\n",
" # adult_data['income'] = adult_data['income'].cat.codes\n",
" adult_data['marital-status'] = adult_data['marital-status'].cat.codes\n",
" adult_data['occupation'] = adult_data['occupation'].cat.codes\n",
" adult_data['relationship'] = adult_data['relationship'].cat.codes\n",
" adult_data['race'] = adult_data['race'].cat.codes\n",
" adult_data['gender'] = adult_data['gender'].cat.codes\n",
" adult_data['native-country'] = adult_data['native-country'].cat.codes\n",
" adult_data['workclass'] = adult_data['workclass'].cat.codes\n",
"\n",
" num_columns = adult_data.select_dtypes(['int8']).columns\n",
" adult_data[num_columns] = adult_data[num_columns].astype('float64')\n",
" for c in num_columns:\n",
" #adult[c] -= adult[c].mean()\n",
" #adult[c] /= adult[c].std()\n",
" adult_data[c] = (adult_data[c] - adult_data[c].min()) / (adult_data[c].max()-adult_data[c].min())\n",
" display(adult_data.info())\n",
" display(adult_data.head(10))\n",
" \n",
" adult_data = adult_data.to_numpy()\n",
" \n",
"# splite the data to train and test datasets\n",
" X_train, X_test, y_train, y_test = train_test_split(adult_data[:,:-2],adult_data[:,-2:], test_size=0.07, random_state=rs)\n",
"# the names of the features\n",
" names = ['age','workclass','educational-num','marital-status','occupation',\n",
" 'relationship','race','gender','capital-gain','capital-loss','hours-per-week','native-country']\n",
" Features_number = len(X_train[0])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"if data_set == 'synthatic':\n",
" #generate the data\n",
" X, y = make_classification(n_samples=1000000, n_features=10, n_redundant=3, n_repeated=2, #n_classes=3, \n",
" n_informative=5, n_clusters_per_class=4, \n",
" random_state=42)\n",
" y = pd.DataFrame(data=y, columns=[\"y\"])\n",
" y = pd.get_dummies(y['y'], prefix='y')\n",
" y = y.to_numpy()\n",
" X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.07, random_state=rs)\n",
" # the names of the features\n",
" names = ['X(0)','X(1)','X(2)','X(3)','X(4)','X(5)','X(6)','X(7)','X(8)','X(9)']\n",
" Features_number = len(X_train[0])"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['temp_hand',\n",
" 'acceleration_16_x_hand',\n",
" 'acceleration_16_y_hand',\n",
" 'acceleration_16_z_hand',\n",
" 'acceleration_6_x_hand',\n",
" 'acceleration_6_y_hand',\n",
" 'acceleration_6_z_hand',\n",
" 'gyroscope_x_hand',\n",
" 'gyroscope_y_hand',\n",
" 'gyroscope_z_hand',\n",
" 'magnetometer_x_hand',\n",
" 'magnetometer_y_hand',\n",
" 'magnetometer_z_hand',\n",
" 'temp_chest',\n",
" 'acceleration_16_x_chest',\n",
" 'acceleration_16_y_chest',\n",
" 'acceleration_16_z_chest',\n",
" 'acceleration_6_x_chest',\n",
" 'acceleration_6_y_chest',\n",
" 'acceleration_6_z_chest',\n",
" 'gyroscope_x_chest',\n",
" 'gyroscope_y_chest',\n",
" 'gyroscope_z_chest',\n",
" 'magnetometer_x_chest',\n",
" 'magnetometer_y_chest',\n",
" 'magnetometer_z_chest',\n",
" 'temp_ankle',\n",
" 'acceleration_16_x_ankle',\n",
" 'acceleration_16_y_ankle',\n",
" 'acceleration_16_z_ankle',\n",
" 'acceleration_6_x_ankle',\n",
" 'acceleration_6_y_ankle',\n",
" 'acceleration_6_z_ankle',\n",
" 'gyroscope_x_ankle',\n",
" 'gyroscope_y_ankle',\n",
" 'gyroscope_z_ankle',\n",
" 'magnetometer_x_ankle',\n",
" 'magnetometer_y_ankle',\n",
" 'magnetometer_z_ankle']"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"if data_set == 'activity':\n",
" #Load dataset into a pandas DataFrame\n",
" activity = pd.read_csv(\"activity_3_original.csv\", sep=',')\n",
"# drop some features that have non value in the majority of the samples\n",
" to_drop = ['subject', 'timestamp', 'heart_rate','activityID']\n",
" activity.drop(axis=1, columns=to_drop, inplace=True)\n",
"# prepare the truth\n",
" activity = pd.concat([activity,pd.get_dummies(activity['motion'], prefix='motion')],axis=1)\n",
" activity.drop('motion', axis=1, inplace=True)\n",
" class_label = [ 'motion_n', 'motion_y']\n",
" predictors = [a for a in activity.columns.values if a not in class_label]\n",
"\n",
" for p in predictors:\n",
" activity[p].fillna(activity[p].mean(), inplace=True)\n",
"\n",
" display(predictors)\n",
" for p in predictors:\n",
" activity[p] = (activity[p]-activity[p].min()) / (activity[p].max() - activity[p].min())\n",
" activity[p].astype('float32')\n",
" activity = activity.to_numpy()\n",
" X_train, X_test, y_train, y_test = train_test_split(activity[:,:-2],activity[:,-2:], test_size=0.07, random_state=rs)\n",
" # the names of the features\n",
" names = ['temp_hand','acceleration_16_x_hand',\n",
" 'acceleration_16_y_hand','acceleration_16_z_hand','acceleration_6_x_hand',\n",
" 'acceleration_6_y_hand','acceleration_6_z_hand','gyroscope_x_hand','gyroscope_y_hand',\n",
" 'gyroscope_z_hand','magnetometer_x_hand','magnetometer_y_hand','magnetometer_z_hand',\n",
" 'temp_chest','acceleration_16_x_chest','acceleration_16_y_chest','acceleration_16_z_chest','acceleration_6_x_chest',\n",
" 'acceleration_6_y_chest','acceleration_6_z_chest','gyroscope_x_chest','gyroscope_y_chest','gyroscope_z_chest',\n",
" 'magnetometer_x_chest','magnetometer_y_chest','magnetometer_z_chest','temp_ankle','acceleration_16_x_ankle',\n",
" 'acceleration_16_y_ankle','acceleration_16_z_ankle','acceleration_6_x_ankle','acceleration_6_y_ankle',\n",
" 'acceleration_6_z_ankle','gyroscope_x_ankle','gyroscope_y_ankle','gyroscope_z_ankle','magnetometer_x_ankle',\n",
" 'magnetometer_y_ankle','magnetometer_z_ankle']\n",
" Features_number = len(X_train[0])\n"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Train on 1806870 samples, validate on 136002 samples\n",
"Epoch 1/2\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" 225888/1806870 [==>...........................] - ETA: 3:38:09 - loss: 0.6677 - accuracy: 0.718 - ETA: 6:38 - loss: 0.6753 - accuracy: 0.5789 - ETA: 4:21 - loss: 0.6609 - accuracy: 0.58 - ETA: 3:24 - loss: 0.6429 - accuracy: 0.61 - ETA: 2:57 - loss: 0.6137 - accuracy: 0.64 - ETA: 2:38 - loss: 0.5775 - accuracy: 0.68 - ETA: 2:22 - loss: 0.5492 - accuracy: 0.70 - ETA: 2:10 - loss: 0.5185 - accuracy: 0.72 - ETA: 2:04 - loss: 0.4951 - accuracy: 0.74 - ETA: 1:58 - loss: 0.4725 - accuracy: 0.76 - ETA: 1:52 - loss: 0.4519 - accuracy: 0.77 - ETA: 1:48 - loss: 0.4357 - accuracy: 0.78 - ETA: 1:46 - loss: 0.4264 - accuracy: 0.79 - ETA: 1:42 - loss: 0.4195 - accuracy: 0.79 - ETA: 1:42 - loss: 0.4110 - accuracy: 0.80 - ETA: 1:39 - loss: 0.4011 - accuracy: 0.80 - ETA: 1:38 - loss: 0.3931 - accuracy: 0.81 - ETA: 1:36 - loss: 0.3829 - accuracy: 0.82 - ETA: 1:34 - loss: 0.3757 - accuracy: 0.82 - ETA: 1:32 - loss: 0.3680 - accuracy: 0.82 - ETA: 1:31 - loss: 0.3609 - accuracy: 0.83 - ETA: 1:30 - loss: 0.3570 - accuracy: 0.83 - ETA: 1:28 - loss: 0.3509 - accuracy: 0.83 - ETA: 1:27 - loss: 0.3470 - accuracy: 0.84 - ETA: 1:26 - loss: 0.3417 - accuracy: 0.84 - ETA: 1:26 - loss: 0.3383 - accuracy: 0.84 - ETA: 1:25 - loss: 0.3346 - accuracy: 0.84 - ETA: 1:24 - loss: 0.3299 - accuracy: 0.85 - ETA: 1:23 - loss: 0.3249 - accuracy: 0.85 - ETA: 1:22 - loss: 0.3214 - accuracy: 0.85 - ETA: 1:22 - loss: 0.3169 - accuracy: 0.85 - ETA: 1:21 - loss: 0.3134 - accuracy: 0.86 - ETA: 1:20 - loss: 0.3120 - accuracy: 0.86 - ETA: 1:20 - loss: 0.3086 - accuracy: 0.86 - ETA: 1:19 - loss: 0.3042 - accuracy: 0.86 - ETA: 1:20 - loss: 0.3019 - accuracy: 0.86 - ETA: 1:20 - loss: 0.2993 - accuracy: 0.86 - ETA: 1:20 - loss: 0.2957 - accuracy: 0.86 - ETA: 1:20 - loss: 0.2940 - accuracy: 0.87 - ETA: 1:19 - loss: 0.2916 - accuracy: 0.87 - ETA: 1:19 - loss: 0.2897 - accuracy: 0.87 - ETA: 1:19 - loss: 0.2882 - accuracy: 0.87 - ETA: 1:19 - loss: 0.2866 - accuracy: 0.87 - ETA: 1:19 - loss: 0.2841 - accuracy: 0.87 - ETA: 1:19 - loss: 0.2828 - accuracy: 0.87 - ETA: 1:18 - loss: 0.2797 - accuracy: 0.87 - ETA: 1:18 - loss: 0.2772 - accuracy: 0.87 - ETA: 1:18 - loss: 0.2758 - accuracy: 0.88 - ETA: 1:18 - loss: 0.2737 - accuracy: 0.88 - ETA: 1:17 - loss: 0.2716 - accuracy: 0.88 - ETA: 1:17 - loss: 0.2696 - accuracy: 0.88 - ETA: 1:16 - loss: 0.2688 - accuracy: 0.88 - ETA: 1:17 - loss: 0.2675 - accuracy: 0.88 - ETA: 1:16 - loss: 0.2657 - accuracy: 0.88 - ETA: 1:16 - loss: 0.2637 - accuracy: 0.88 - ETA: 1:16 - loss: 0.2624 - accuracy: 0.88 - ETA: 1:16 - loss: 0.2611 - accuracy: 0.88 - ETA: 1:16 - loss: 0.2590 - accuracy: 0.88 - ETA: 1:15 - loss: 0.2577 - accuracy: 0.88 - ETA: 1:15 - loss: 0.2557 - accuracy: 0.89 - ETA: 1:14 - loss: 0.2545 - accuracy: 0.89 - ETA: 1:14 - loss: 0.2532 - accuracy: 0.89 - ETA: 1:14 - loss: 0.2513 - accuracy: 0.89 - ETA: 1:13 - loss: 0.2499 - accuracy: 0.89 - ETA: 1:14 - loss: 0.2492 - accuracy: 0.89 - ETA: 1:14 - loss: 0.2482 - accuracy: 0.89 - ETA: 1:13 - loss: 0.2465 - accuracy: 0.89 - ETA: 1:13 - loss: 0.2449 - accuracy: 0.89 - ETA: 1:13 - loss: 0.2439 - accuracy: 0.89 - ETA: 1:13 - loss: 0.2433 - accuracy: 0.89 - ETA: 1:13 - loss: 0.2422 - accuracy: 0.89 - ETA: 1:13 - loss: 0.2412 - accuracy: 0.89 - ETA: 1:13 - loss: 0.2402 - accuracy: 0.89 - ETA: 1:13 - loss: 0.2389 - accuracy: 0.89 - ETA: 1:12 - loss: 0.2375 - accuracy: 0.89 - ETA: 1:12 - loss: 0.2363 - accuracy: 0.90 - ETA: 1:12 - loss: 0.2352 - accuracy: 0.90 - ETA: 1:12 - loss: 0.2346 - accuracy: 0.90 - ETA: 1:12 - loss: 0.2340 - accuracy: 0.90 - ETA: 1:12 - loss: 0.2327 - accuracy: 0.90 - ETA: 1:12 - loss: 0.2323 - accuracy: 0.90 - ETA: 1:12 - loss: 0.2313 - accuracy: 0.90 - ETA: 1:12 - loss: 0.2300 - accuracy: 0.90 - ETA: 1:12 - loss: 0.2290 - accuracy: 0.90 - ETA: 1:12 - loss: 0.2281 - accuracy: 0.90 - ETA: 1:11 - loss: 0.2273 - accuracy: 0.90 - ETA: 1:11 - loss: 0.2265 - accuracy: 0.90 - ETA: 1:11 - loss: 0.2258 - accuracy: 0.90 - ETA: 1:11 - loss: 0.2246 - accuracy: 0.90 - ETA: 1:11 - loss: 0.2236 - accuracy: 0.90 - ETA: 1:11 - loss: 0.2227 - accuracy: 0.90 - ETA: 1:11 - loss: 0.2220 -
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" 466080/1806870 [======>.......................] - ETA: 1:07 - loss: 0.1747 - accuracy: 0.93 - ETA: 1:07 - loss: 0.1745 - accuracy: 0.93 - ETA: 1:07 - loss: 0.1744 - accuracy: 0.93 - ETA: 1:07 - loss: 0.1740 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1735 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1733 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1731 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1727 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1721 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1717 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1713 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1709 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1707 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1705 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1702 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1699 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1698 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1695 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1694 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1693 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1690 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1687 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1685 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1683 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1682 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1679 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1677 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1674 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1671 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1667 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1666 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1664 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1662 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1660 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1658 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1655 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1654 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1653 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1652 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1649 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1648 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1647 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1645 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1641 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1638 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1632 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1628 - accuracy: 0.93 - ETA: 1:06 - loss: 0.1625 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1622 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1619 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1616 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1613 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1611 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1611 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1609 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1606 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1603 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1601 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1599 - accuracy: 0.93 - ETA: 1:05 - loss: 0.1597 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1594 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1591 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1588 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1586 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1584 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1579 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1576 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1572 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1571 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1570 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1567 - accuracy: 0.93 - ETA: 1:04 - loss: 0.1564 - accuracy: 0.93 - ETA: 1:03 - loss: 0.1561 - accuracy: 0.93 - ETA: 1:03 - loss: 0.1558 - accuracy: 0.93 - ETA: 1:03 - loss: 0.1556 - accuracy: 0.93 - ETA: 1:03 - loss: 0.1553 - accuracy: 0.93 - ETA: 1:03 - loss: 0.1551 - accuracy: 0.93 - ETA: 1:03 - loss: 0.1550 - accuracy: 0.93 - ETA: 1:03 - loss: 0.1548 - accuracy: 0.93 - ETA: 1:03 - loss: 0.1546 - accuracy: 0.93 - ETA: 1:03 - loss: 0.1544 - accuracy: 0.93 - ETA: 1:03 - loss: 0.1542 - accuracy: 0.93 - ETA: 1:03 - loss: 0.1541 - accuracy: 0.94 - ETA: 1:03 - loss: 0.1538 - accuracy: 0.94 - ETA: 1:03 - loss: 0.1536 - accuracy: 0.94 - ETA: 1:03 - loss: 0.1533 - accuracy: 0.94 - ETA: 1:02 - loss: 0.1529 - accuracy: 0.94 - ETA: 1:02 - loss: 0.1527 - accuracy: 0.94 - ETA: 1:02 - loss: 0.1524 - accuracy: 0.94 - ETA: 1:02 - loss: 0.1523 - accuracy: 0.94 - ETA: 1:02 - loss: 0.1521 - accuracy: 0.94 - ETA: 1:02 - loss: 0.1519 - accura
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" 743456/1806870 [===========>..................] - ETA: 54s - loss: 0.1345 - accuracy: 0.948 - ETA: 54s - loss: 0.1343 - accuracy: 0.948 - ETA: 54s - loss: 0.1342 - accuracy: 0.948 - ETA: 54s - loss: 0.1340 - accuracy: 0.949 - ETA: 54s - loss: 0.1338 - accuracy: 0.949 - ETA: 54s - loss: 0.1337 - accuracy: 0.949 - ETA: 54s - loss: 0.1334 - accuracy: 0.949 - ETA: 54s - loss: 0.1333 - accuracy: 0.949 - ETA: 54s - loss: 0.1331 - accuracy: 0.949 - ETA: 54s - loss: 0.1329 - accuracy: 0.949 - ETA: 54s - loss: 0.1327 - accuracy: 0.949 - ETA: 53s - loss: 0.1324 - accuracy: 0.949 - ETA: 53s - loss: 0.1323 - accuracy: 0.949 - ETA: 53s - loss: 0.1323 - accuracy: 0.949 - ETA: 53s - loss: 0.1321 - accuracy: 0.949 - ETA: 53s - loss: 0.1320 - accuracy: 0.949 - ETA: 53s - loss: 0.1318 - accuracy: 0.949 - ETA: 53s - loss: 0.1317 - accuracy: 0.950 - ETA: 53s - loss: 0.1315 - accuracy: 0.950 - ETA: 53s - loss: 0.1313 - accuracy: 0.950 - ETA: 53s - loss: 0.1311 - accuracy: 0.950 - ETA: 53s - loss: 0.1310 - accuracy: 0.950 - ETA: 52s - loss: 0.1308 - accuracy: 0.950 - ETA: 52s - loss: 0.1307 - accuracy: 0.950 - ETA: 52s - loss: 0.1305 - accuracy: 0.950 - ETA: 52s - loss: 0.1303 - accuracy: 0.950 - ETA: 52s - loss: 0.1301 - accuracy: 0.950 - ETA: 52s - loss: 0.1299 - accuracy: 0.950 - ETA: 52s - loss: 0.1298 - accuracy: 0.950 - ETA: 52s - loss: 0.1296 - accuracy: 0.950 - ETA: 52s - loss: 0.1295 - accuracy: 0.950 - ETA: 52s - loss: 0.1294 - accuracy: 0.951 - ETA: 52s - loss: 0.1292 - accuracy: 0.951 - ETA: 52s - loss: 0.1290 - accuracy: 0.951 - ETA: 51s - loss: 0.1289 - accuracy: 0.951 - ETA: 51s - loss: 0.1287 - accuracy: 0.951 - ETA: 51s - loss: 0.1286 - accuracy: 0.951 - ETA: 51s - loss: 0.1285 - accuracy: 0.951 - ETA: 51s - loss: 0.1283 - accuracy: 0.951 - ETA: 51s - loss: 0.1282 - accuracy: 0.951 - ETA: 51s - loss: 0.1280 - accuracy: 0.951 - ETA: 51s - loss: 0.1279 - accuracy: 0.951 - ETA: 51s - loss: 0.1278 - accuracy: 0.951 - ETA: 51s - loss: 0.1276 - accuracy: 0.951 - ETA: 51s - loss: 0.1275 - accuracy: 0.951 - ETA: 50s - loss: 0.1274 - accuracy: 0.951 - ETA: 50s - loss: 0.1272 - accuracy: 0.951 - ETA: 50s - loss: 0.1270 - accuracy: 0.952 - ETA: 50s - loss: 0.1268 - accuracy: 0.952 - ETA: 50s - loss: 0.1267 - accuracy: 0.952 - ETA: 50s - loss: 0.1266 - accuracy: 0.952 - ETA: 50s - loss: 0.1264 - accuracy: 0.952 - ETA: 50s - loss: 0.1262 - accuracy: 0.952 - ETA: 50s - loss: 0.1261 - accuracy: 0.952 - ETA: 50s - loss: 0.1259 - accuracy: 0.952 - ETA: 50s - loss: 0.1257 - accuracy: 0.952 - ETA: 50s - loss: 0.1255 - accuracy: 0.952 - ETA: 50s - loss: 0.1254 - accuracy: 0.952 - ETA: 50s - loss: 0.1253 - accuracy: 0.952 - ETA: 49s - loss: 0.1251 - accuracy: 0.952 - ETA: 49s - loss: 0.1249 - accuracy: 0.952 - ETA: 49s - loss: 0.1247 - accuracy: 0.953 - ETA: 49s - loss: 0.1246 - accuracy: 0.953 - ETA: 49s - loss: 0.1243 - accuracy: 0.953 - ETA: 49s - loss: 0.1243 - accuracy: 0.953 - ETA: 49s - loss: 0.1241 - accuracy: 0.953 - ETA: 49s - loss: 0.1239 - accuracy: 0.953 - ETA: 49s - loss: 0.1238 - accuracy: 0.953 - ETA: 49s - loss: 0.1237 - accuracy: 0.953 - ETA: 49s - loss: 0.1235 - accuracy: 0.953 - ETA: 49s - loss: 0.1234 - accuracy: 0.953 - ETA: 49s - loss: 0.1233 - accuracy: 0.953 - ETA: 49s - loss: 0.1232 - accuracy: 0.953 - ETA: 49s - loss: 0.1230 - accuracy: 0.953 - ETA: 48s - loss: 0.1229 - accuracy: 0.953 - ETA: 48s - loss: 0.1228 - accuracy: 0.953 - ETA: 48s - loss: 0.1227 - accuracy: 0.953 - ETA: 48s - loss: 0.1226 - accuracy: 0.953 - ETA: 48s - loss: 0.1225 - accuracy: 0.953 - ETA: 48s - loss: 0.1224 - accuracy: 0.953 - ETA: 48s - loss: 0.1223 - accuracy: 0.953 - ETA: 48s - loss: 0.1221 - accuracy: 0.954 - ETA: 48s - loss: 0.1220 - accuracy: 0.954 - ETA: 48s - loss: 0.1218 - accuracy: 0.954 - ETA: 48s - loss: 0.1217 - accuracy: 0.954 - ETA: 48s - loss: 0.1216 - accuracy: 0.954 - ETA: 48s - loss: 0.1215 - accuracy: 0.954 - ETA: 48s - loss: 0.1213 - accuracy: 0.954 - ETA: 48s - loss: 0.1211 - accuracy: 0.954 - ETA: 47s - loss: 0.1209 - accuracy: 0.954 - ETA: 47s - loss: 0.1208 - accuracy: 0.954 - ETA: 47s - loss: 0.1206 - accurac
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"1020928/1806870 [===============>..............] - ETA: 40s - loss: 0.1083 - accuracy: 0.959 - ETA: 40s - loss: 0.1082 - accuracy: 0.959 - ETA: 40s - loss: 0.1080 - accuracy: 0.959 - ETA: 40s - loss: 0.1079 - accuracy: 0.959 - ETA: 40s - loss: 0.1077 - accuracy: 0.959 - ETA: 40s - loss: 0.1076 - accuracy: 0.959 - ETA: 40s - loss: 0.1075 - accuracy: 0.959 - ETA: 40s - loss: 0.1074 - accuracy: 0.960 - ETA: 40s - loss: 0.1073 - accuracy: 0.960 - ETA: 40s - loss: 0.1072 - accuracy: 0.960 - ETA: 39s - loss: 0.1071 - accuracy: 0.960 - ETA: 39s - loss: 0.1070 - accuracy: 0.960 - ETA: 39s - loss: 0.1069 - accuracy: 0.960 - ETA: 39s - loss: 0.1068 - accuracy: 0.960 - ETA: 39s - loss: 0.1067 - accuracy: 0.960 - ETA: 39s - loss: 0.1068 - accuracy: 0.960 - ETA: 39s - loss: 0.1067 - accuracy: 0.960 - ETA: 39s - loss: 0.1066 - accuracy: 0.960 - ETA: 39s - loss: 0.1066 - accuracy: 0.960 - ETA: 39s - loss: 0.1065 - accuracy: 0.960 - ETA: 39s - loss: 0.1064 - accuracy: 0.960 - ETA: 39s - loss: 0.1063 - accuracy: 0.960 - ETA: 39s - loss: 0.1062 - accuracy: 0.960 - ETA: 39s - loss: 0.1061 - accuracy: 0.960 - ETA: 38s - loss: 0.1059 - accuracy: 0.960 - ETA: 38s - loss: 0.1058 - accuracy: 0.960 - ETA: 38s - loss: 0.1057 - accuracy: 0.960 - ETA: 38s - loss: 0.1056 - accuracy: 0.960 - ETA: 38s - loss: 0.1055 - accuracy: 0.960 - ETA: 38s - loss: 0.1054 - accuracy: 0.960 - ETA: 38s - loss: 0.1053 - accuracy: 0.960 - ETA: 38s - loss: 0.1052 - accuracy: 0.960 - ETA: 38s - loss: 0.1050 - accuracy: 0.960 - ETA: 38s - loss: 0.1049 - accuracy: 0.961 - ETA: 38s - loss: 0.1048 - accuracy: 0.961 - ETA: 38s - loss: 0.1047 - accuracy: 0.961 - ETA: 38s - loss: 0.1046 - accuracy: 0.961 - ETA: 38s - loss: 0.1046 - accuracy: 0.961 - ETA: 38s - loss: 0.1046 - accuracy: 0.961 - ETA: 38s - loss: 0.1045 - accuracy: 0.961 - ETA: 37s - loss: 0.1044 - accuracy: 0.961 - ETA: 37s - loss: 0.1043 - accuracy: 0.961 - ETA: 37s - loss: 0.1042 - accuracy: 0.961 - ETA: 37s - loss: 0.1041 - accuracy: 0.961 - ETA: 37s - loss: 0.1039 - accuracy: 0.961 - ETA: 37s - loss: 0.1038 - accuracy: 0.961 - ETA: 37s - loss: 0.1037 - accuracy: 0.961 - ETA: 37s - loss: 0.1036 - accuracy: 0.961 - ETA: 37s - loss: 0.1035 - accuracy: 0.961 - ETA: 37s - loss: 0.1034 - accuracy: 0.961 - ETA: 37s - loss: 0.1033 - accuracy: 0.961 - ETA: 37s - loss: 0.1032 - accuracy: 0.961 - ETA: 37s - loss: 0.1031 - accuracy: 0.961 - ETA: 37s - loss: 0.1030 - accuracy: 0.961 - ETA: 36s - loss: 0.1028 - accuracy: 0.961 - ETA: 36s - loss: 0.1027 - accuracy: 0.961 - ETA: 36s - loss: 0.1026 - accuracy: 0.961 - ETA: 36s - loss: 0.1025 - accuracy: 0.961 - ETA: 36s - loss: 0.1024 - accuracy: 0.962 - ETA: 36s - loss: 0.1023 - accuracy: 0.962 - ETA: 36s - loss: 0.1022 - accuracy: 0.962 - ETA: 36s - loss: 0.1021 - accuracy: 0.962 - ETA: 36s - loss: 0.1020 - accuracy: 0.962 - ETA: 36s - loss: 0.1019 - accuracy: 0.962 - ETA: 36s - loss: 0.1019 - accuracy: 0.962 - ETA: 36s - loss: 0.1018 - accuracy: 0.962 - ETA: 36s - loss: 0.1017 - accuracy: 0.962 - ETA: 36s - loss: 0.1016 - accuracy: 0.962 - ETA: 36s - loss: 0.1015 - accuracy: 0.962 - ETA: 36s - loss: 0.1014 - accuracy: 0.962 - ETA: 36s - loss: 0.1014 - accuracy: 0.962 - ETA: 35s - loss: 0.1013 - accuracy: 0.962 - ETA: 35s - loss: 0.1012 - accuracy: 0.962 - ETA: 35s - loss: 0.1011 - accuracy: 0.962 - ETA: 35s - loss: 0.1010 - accuracy: 0.962 - ETA: 35s - loss: 0.1009 - accuracy: 0.962 - ETA: 35s - loss: 0.1008 - accuracy: 0.962 - ETA: 35s - loss: 0.1007 - accuracy: 0.962 - ETA: 35s - loss: 0.1006 - accuracy: 0.962 - ETA: 35s - loss: 0.1005 - accuracy: 0.962 - ETA: 35s - loss: 0.1004 - accuracy: 0.962 - ETA: 35s - loss: 0.1003 - accuracy: 0.962 - ETA: 35s - loss: 0.1003 - accuracy: 0.962 - ETA: 35s - loss: 0.1002 - accuracy: 0.962 - ETA: 35s - loss: 0.1001 - accuracy: 0.962 - ETA: 35s - loss: 0.1001 - accuracy: 0.962 - ETA: 35s - loss: 0.1000 - accuracy: 0.962 - ETA: 34s - loss: 0.0999 - accuracy: 0.963 - ETA: 34s - loss: 0.0998 - accuracy: 0.963 - ETA: 34s - loss: 0.0997 - accuracy: 0.963 - ETA: 34s - loss: 0.0996 - accuracy: 0.963 - ETA: 34s - loss: 0.0995 - accurac
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"1303488/1806870 [====================>.........] - ETA: 29s - loss: 0.0921 - accuracy: 0.966 - ETA: 29s - loss: 0.0921 - accuracy: 0.966 - ETA: 29s - loss: 0.0920 - accuracy: 0.966 - ETA: 29s - loss: 0.0919 - accuracy: 0.966 - ETA: 29s - loss: 0.0919 - accuracy: 0.966 - ETA: 29s - loss: 0.0918 - accuracy: 0.966 - ETA: 28s - loss: 0.0918 - accuracy: 0.966 - ETA: 28s - loss: 0.0917 - accuracy: 0.966 - ETA: 28s - loss: 0.0917 - accuracy: 0.966 - ETA: 28s - loss: 0.0916 - accuracy: 0.966 - ETA: 28s - loss: 0.0915 - accuracy: 0.966 - ETA: 28s - loss: 0.0915 - accuracy: 0.966 - ETA: 28s - loss: 0.0914 - accuracy: 0.966 - ETA: 28s - loss: 0.0913 - accuracy: 0.966 - ETA: 28s - loss: 0.0912 - accuracy: 0.966 - ETA: 28s - loss: 0.0912 - accuracy: 0.966 - ETA: 28s - loss: 0.0911 - accuracy: 0.966 - ETA: 28s - loss: 0.0910 - accuracy: 0.966 - ETA: 28s - loss: 0.0909 - accuracy: 0.966 - ETA: 28s - loss: 0.0909 - accuracy: 0.966 - ETA: 28s - loss: 0.0908 - accuracy: 0.966 - ETA: 28s - loss: 0.0908 - accuracy: 0.966 - ETA: 28s - loss: 0.0907 - accuracy: 0.966 - ETA: 27s - loss: 0.0906 - accuracy: 0.966 - ETA: 27s - loss: 0.0906 - accuracy: 0.966 - ETA: 27s - loss: 0.0905 - accuracy: 0.966 - ETA: 27s - loss: 0.0904 - accuracy: 0.966 - ETA: 27s - loss: 0.0904 - accuracy: 0.966 - ETA: 27s - loss: 0.0903 - accuracy: 0.966 - ETA: 27s - loss: 0.0903 - accuracy: 0.966 - ETA: 27s - loss: 0.0902 - accuracy: 0.966 - ETA: 27s - loss: 0.0901 - accuracy: 0.966 - ETA: 27s - loss: 0.0900 - accuracy: 0.966 - ETA: 27s - loss: 0.0899 - accuracy: 0.966 - ETA: 27s - loss: 0.0899 - accuracy: 0.966 - ETA: 27s - loss: 0.0898 - accuracy: 0.967 - ETA: 27s - loss: 0.0897 - accuracy: 0.967 - ETA: 27s - loss: 0.0897 - accuracy: 0.967 - ETA: 27s - loss: 0.0897 - accuracy: 0.967 - ETA: 27s - loss: 0.0896 - accuracy: 0.967 - ETA: 27s - loss: 0.0895 - accuracy: 0.967 - ETA: 27s - loss: 0.0895 - accuracy: 0.967 - ETA: 27s - loss: 0.0894 - accuracy: 0.967 - ETA: 26s - loss: 0.0894 - accuracy: 0.967 - ETA: 26s - loss: 0.0894 - accuracy: 0.967 - ETA: 26s - loss: 0.0894 - accuracy: 0.967 - ETA: 26s - loss: 0.0893 - accuracy: 0.967 - ETA: 26s - loss: 0.0892 - accuracy: 0.967 - ETA: 26s - loss: 0.0892 - accuracy: 0.967 - ETA: 26s - loss: 0.0891 - accuracy: 0.967 - ETA: 26s - loss: 0.0890 - accuracy: 0.967 - ETA: 26s - loss: 0.0890 - accuracy: 0.967 - ETA: 26s - loss: 0.0889 - accuracy: 0.967 - ETA: 26s - loss: 0.0888 - accuracy: 0.967 - ETA: 26s - loss: 0.0888 - accuracy: 0.967 - ETA: 26s - loss: 0.0887 - accuracy: 0.967 - ETA: 26s - loss: 0.0886 - accuracy: 0.967 - ETA: 26s - loss: 0.0885 - accuracy: 0.967 - ETA: 26s - loss: 0.0884 - accuracy: 0.967 - ETA: 25s - loss: 0.0883 - accuracy: 0.967 - ETA: 25s - loss: 0.0882 - accuracy: 0.967 - ETA: 25s - loss: 0.0882 - accuracy: 0.967 - ETA: 25s - loss: 0.0881 - accuracy: 0.967 - ETA: 25s - loss: 0.0880 - accuracy: 0.967 - ETA: 25s - loss: 0.0879 - accuracy: 0.967 - ETA: 25s - loss: 0.0879 - accuracy: 0.967 - ETA: 25s - loss: 0.0878 - accuracy: 0.967 - ETA: 25s - loss: 0.0878 - accuracy: 0.967 - ETA: 25s - loss: 0.0877 - accuracy: 0.967 - ETA: 25s - loss: 0.0877 - accuracy: 0.967 - ETA: 25s - loss: 0.0876 - accuracy: 0.967 - ETA: 25s - loss: 0.0876 - accuracy: 0.967 - ETA: 25s - loss: 0.0875 - accuracy: 0.967 - ETA: 25s - loss: 0.0874 - accuracy: 0.967 - ETA: 25s - loss: 0.0874 - accuracy: 0.967 - ETA: 25s - loss: 0.0873 - accuracy: 0.968 - ETA: 24s - loss: 0.0872 - accuracy: 0.968 - ETA: 24s - loss: 0.0871 - accuracy: 0.968 - ETA: 24s - loss: 0.0871 - accuracy: 0.968 - ETA: 24s - loss: 0.0870 - accuracy: 0.968 - ETA: 24s - loss: 0.0870 - accuracy: 0.968 - ETA: 24s - loss: 0.0869 - accuracy: 0.968 - ETA: 24s - loss: 0.0869 - accuracy: 0.968 - ETA: 24s - loss: 0.0868 - accuracy: 0.968 - ETA: 24s - loss: 0.0867 - accuracy: 0.968 - ETA: 24s - loss: 0.0866 - accuracy: 0.968 - ETA: 24s - loss: 0.0865 - accuracy: 0.968 - ETA: 24s - loss: 0.0865 - accuracy: 0.968 - ETA: 24s - loss: 0.0864 - accuracy: 0.968 - ETA: 24s - loss: 0.0864 - accuracy: 0.968 - ETA: 24s - loss: 0.0863 - accuracy: 0.968 - ETA: 24s - loss: 0.0862 - accurac
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"1592480/1806870 [=========================>....] - ETA: 18s - loss: 0.0808 - accuracy: 0.970 - ETA: 18s - loss: 0.0808 - accuracy: 0.970 - ETA: 18s - loss: 0.0807 - accuracy: 0.970 - ETA: 18s - loss: 0.0807 - accuracy: 0.970 - ETA: 18s - loss: 0.0806 - accuracy: 0.970 - ETA: 18s - loss: 0.0806 - accuracy: 0.970 - ETA: 17s - loss: 0.0805 - accuracy: 0.970 - ETA: 17s - loss: 0.0804 - accuracy: 0.970 - ETA: 17s - loss: 0.0804 - accuracy: 0.970 - ETA: 17s - loss: 0.0803 - accuracy: 0.970 - ETA: 17s - loss: 0.0802 - accuracy: 0.970 - ETA: 17s - loss: 0.0802 - accuracy: 0.970 - ETA: 17s - loss: 0.0802 - accuracy: 0.970 - ETA: 17s - loss: 0.0802 - accuracy: 0.970 - ETA: 17s - loss: 0.0801 - accuracy: 0.970 - ETA: 17s - loss: 0.0801 - accuracy: 0.970 - ETA: 17s - loss: 0.0800 - accuracy: 0.970 - ETA: 17s - loss: 0.0799 - accuracy: 0.970 - ETA: 17s - loss: 0.0799 - accuracy: 0.970 - ETA: 17s - loss: 0.0798 - accuracy: 0.970 - ETA: 17s - loss: 0.0798 - accuracy: 0.970 - ETA: 17s - loss: 0.0797 - accuracy: 0.970 - ETA: 17s - loss: 0.0797 - accuracy: 0.970 - ETA: 17s - loss: 0.0796 - accuracy: 0.970 - ETA: 16s - loss: 0.0796 - accuracy: 0.970 - ETA: 16s - loss: 0.0796 - accuracy: 0.971 - ETA: 16s - loss: 0.0795 - accuracy: 0.971 - ETA: 16s - loss: 0.0795 - accuracy: 0.971 - ETA: 16s - loss: 0.0794 - accuracy: 0.971 - ETA: 16s - loss: 0.0793 - accuracy: 0.971 - ETA: 16s - loss: 0.0793 - accuracy: 0.971 - ETA: 16s - loss: 0.0792 - accuracy: 0.971 - ETA: 16s - loss: 0.0792 - accuracy: 0.971 - ETA: 16s - loss: 0.0791 - accuracy: 0.971 - ETA: 16s - loss: 0.0791 - accuracy: 0.971 - ETA: 16s - loss: 0.0790 - accuracy: 0.971 - ETA: 16s - loss: 0.0790 - accuracy: 0.971 - ETA: 16s - loss: 0.0789 - accuracy: 0.971 - ETA: 16s - loss: 0.0789 - accuracy: 0.971 - ETA: 16s - loss: 0.0788 - accuracy: 0.971 - ETA: 16s - loss: 0.0788 - accuracy: 0.971 - ETA: 15s - loss: 0.0787 - accuracy: 0.971 - ETA: 15s - loss: 0.0786 - accuracy: 0.971 - ETA: 15s - loss: 0.0786 - accuracy: 0.971 - ETA: 15s - loss: 0.0785 - accuracy: 0.971 - ETA: 15s - loss: 0.0785 - accuracy: 0.971 - ETA: 15s - loss: 0.0784 - accuracy: 0.971 - ETA: 15s - loss: 0.0784 - accuracy: 0.971 - ETA: 15s - loss: 0.0783 - accuracy: 0.971 - ETA: 15s - loss: 0.0783 - accuracy: 0.971 - ETA: 15s - loss: 0.0782 - accuracy: 0.971 - ETA: 15s - loss: 0.0782 - accuracy: 0.971 - ETA: 15s - loss: 0.0781 - accuracy: 0.971 - ETA: 15s - loss: 0.0781 - accuracy: 0.971 - ETA: 15s - loss: 0.0780 - accuracy: 0.971 - ETA: 15s - loss: 0.0780 - accuracy: 0.971 - ETA: 15s - loss: 0.0780 - accuracy: 0.971 - ETA: 14s - loss: 0.0779 - accuracy: 0.971 - ETA: 14s - loss: 0.0779 - accuracy: 0.971 - ETA: 14s - loss: 0.0779 - accuracy: 0.971 - ETA: 14s - loss: 0.0778 - accuracy: 0.971 - ETA: 14s - loss: 0.0777 - accuracy: 0.971 - ETA: 14s - loss: 0.0777 - accuracy: 0.971 - ETA: 14s - loss: 0.0777 - accuracy: 0.971 - ETA: 14s - loss: 0.0776 - accuracy: 0.971 - ETA: 14s - loss: 0.0776 - accuracy: 0.971 - ETA: 14s - loss: 0.0775 - accuracy: 0.971 - ETA: 14s - loss: 0.0774 - accuracy: 0.971 - ETA: 14s - loss: 0.0774 - accuracy: 0.971 - ETA: 14s - loss: 0.0773 - accuracy: 0.971 - ETA: 14s - loss: 0.0773 - accuracy: 0.971 - ETA: 14s - loss: 0.0772 - accuracy: 0.971 - ETA: 14s - loss: 0.0772 - accuracy: 0.971 - ETA: 14s - loss: 0.0771 - accuracy: 0.971 - ETA: 13s - loss: 0.0770 - accuracy: 0.971 - ETA: 13s - loss: 0.0770 - accuracy: 0.971 - ETA: 13s - loss: 0.0770 - accuracy: 0.972 - ETA: 13s - loss: 0.0769 - accuracy: 0.972 - ETA: 13s - loss: 0.0769 - accuracy: 0.972 - ETA: 13s - loss: 0.0768 - accuracy: 0.972 - ETA: 13s - loss: 0.0768 - accuracy: 0.972 - ETA: 13s - loss: 0.0767 - accuracy: 0.972 - ETA: 13s - loss: 0.0767 - accuracy: 0.972 - ETA: 13s - loss: 0.0766 - accuracy: 0.972 - ETA: 13s - loss: 0.0766 - accuracy: 0.972 - ETA: 13s - loss: 0.0765 - accuracy: 0.972 - ETA: 13s - loss: 0.0765 - accuracy: 0.972 - ETA: 13s - loss: 0.0765 - accuracy: 0.972 - ETA: 13s - loss: 0.0764 - accuracy: 0.972 - ETA: 13s - loss: 0.0764 - accuracy: 0.972 - ETA: 13s - loss: 0.0764 - accuracy: 0.972 - ETA: 13s - loss: 0.0763 - accurac
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"1806870/1806870 [==============================] - ETA: 7s - loss: 0.0723 - accuracy: 0.97 - ETA: 7s - loss: 0.0722 - accuracy: 0.97 - ETA: 7s - loss: 0.0722 - accuracy: 0.97 - ETA: 7s - loss: 0.0722 - accuracy: 0.97 - ETA: 7s - loss: 0.0722 - accuracy: 0.97 - ETA: 7s - loss: 0.0721 - accuracy: 0.97 - ETA: 7s - loss: 0.0721 - accuracy: 0.97 - ETA: 7s - loss: 0.0720 - accuracy: 0.97 - ETA: 7s - loss: 0.0720 - accuracy: 0.97 - ETA: 7s - loss: 0.0719 - accuracy: 0.97 - ETA: 7s - loss: 0.0719 - accuracy: 0.97 - ETA: 7s - loss: 0.0718 - accuracy: 0.97 - ETA: 6s - loss: 0.0718 - accuracy: 0.97 - ETA: 6s - loss: 0.0717 - accuracy: 0.97 - ETA: 6s - loss: 0.0717 - accuracy: 0.97 - ETA: 6s - loss: 0.0717 - accuracy: 0.97 - ETA: 6s - loss: 0.0716 - accuracy: 0.97 - ETA: 6s - loss: 0.0716 - accuracy: 0.97 - ETA: 6s - loss: 0.0715 - accuracy: 0.97 - ETA: 6s - loss: 0.0715 - accuracy: 0.97 - ETA: 6s - loss: 0.0715 - accuracy: 0.97 - ETA: 6s - loss: 0.0714 - accuracy: 0.97 - ETA: 6s - loss: 0.0714 - accuracy: 0.97 - ETA: 6s - loss: 0.0713 - accuracy: 0.97 - ETA: 6s - loss: 0.0713 - accuracy: 0.97 - ETA: 6s - loss: 0.0713 - accuracy: 0.97 - ETA: 6s - loss: 0.0712 - accuracy: 0.97 - ETA: 6s - loss: 0.0712 - accuracy: 0.97 - ETA: 6s - loss: 0.0712 - accuracy: 0.97 - ETA: 5s - loss: 0.0711 - accuracy: 0.97 - ETA: 5s - loss: 0.0711 - accuracy: 0.97 - ETA: 5s - loss: 0.0711 - accuracy: 0.97 - ETA: 5s - loss: 0.0710 - accuracy: 0.97 - ETA: 5s - loss: 0.0710 - accuracy: 0.97 - ETA: 5s - loss: 0.0709 - accuracy: 0.97 - ETA: 5s - loss: 0.0709 - accuracy: 0.97 - ETA: 5s - loss: 0.0709 - accuracy: 0.97 - ETA: 5s - loss: 0.0708 - accuracy: 0.97 - ETA: 5s - loss: 0.0708 - accuracy: 0.97 - ETA: 5s - loss: 0.0707 - accuracy: 0.97 - ETA: 5s - loss: 0.0707 - accuracy: 0.97 - ETA: 5s - loss: 0.0707 - accuracy: 0.97 - ETA: 5s - loss: 0.0706 - accuracy: 0.97 - ETA: 5s - loss: 0.0706 - accuracy: 0.97 - ETA: 5s - loss: 0.0706 - accuracy: 0.97 - ETA: 5s - loss: 0.0706 - accuracy: 0.97 - ETA: 5s - loss: 0.0705 - accuracy: 0.97 - ETA: 4s - loss: 0.0705 - accuracy: 0.97 - ETA: 4s - loss: 0.0704 - accuracy: 0.97 - ETA: 4s - loss: 0.0704 - accuracy: 0.97 - ETA: 4s - loss: 0.0704 - accuracy: 0.97 - ETA: 4s - loss: 0.0703 - accuracy: 0.97 - ETA: 4s - loss: 0.0703 - accuracy: 0.97 - ETA: 4s - loss: 0.0702 - accuracy: 0.97 - ETA: 4s - loss: 0.0702 - accuracy: 0.97 - ETA: 4s - loss: 0.0702 - accuracy: 0.97 - ETA: 4s - loss: 0.0701 - accuracy: 0.97 - ETA: 4s - loss: 0.0701 - accuracy: 0.97 - ETA: 4s - loss: 0.0700 - accuracy: 0.97 - ETA: 4s - loss: 0.0700 - accuracy: 0.97 - ETA: 4s - loss: 0.0700 - accuracy: 0.97 - ETA: 4s - loss: 0.0699 - accuracy: 0.97 - ETA: 4s - loss: 0.0699 - accuracy: 0.97 - ETA: 4s - loss: 0.0699 - accuracy: 0.97 - ETA: 4s - loss: 0.0698 - accuracy: 0.97 - ETA: 3s - loss: 0.0698 - accuracy: 0.97 - ETA: 3s - loss: 0.0698 - accuracy: 0.97 - ETA: 3s - loss: 0.0698 - accuracy: 0.97 - ETA: 3s - loss: 0.0697 - accuracy: 0.97 - ETA: 3s - loss: 0.0697 - accuracy: 0.97 - ETA: 3s - loss: 0.0696 - accuracy: 0.97 - ETA: 3s - loss: 0.0696 - accuracy: 0.97 - ETA: 3s - loss: 0.0696 - accuracy: 0.97 - ETA: 3s - loss: 0.0695 - accuracy: 0.97 - ETA: 3s - loss: 0.0695 - accuracy: 0.97 - ETA: 3s - loss: 0.0694 - accuracy: 0.97 - ETA: 3s - loss: 0.0694 - accuracy: 0.97 - ETA: 3s - loss: 0.0694 - accuracy: 0.97 - ETA: 3s - loss: 0.0693 - accuracy: 0.97 - ETA: 3s - loss: 0.0693 - accuracy: 0.97 - ETA: 3s - loss: 0.0693 - accuracy: 0.97 - ETA: 3s - loss: 0.0692 - accuracy: 0.97 - ETA: 3s - loss: 0.0692 - accuracy: 0.97 - ETA: 2s - loss: 0.0692 - accuracy: 0.97 - ETA: 2s - loss: 0.0691 - accuracy: 0.97 - ETA: 2s - loss: 0.0691 - accuracy: 0.97 - ETA: 2s - loss: 0.0691 - accuracy: 0.97 - ETA: 2s - loss: 0.0691 - accuracy: 0.97 - ETA: 2s - loss: 0.0690 - accuracy: 0.97 - ETA: 2s - loss: 0.0690 - accuracy: 0.97 - ETA: 2s - loss: 0.0689 - accuracy: 0.97 - ETA: 2s - loss: 0.0689 - accuracy: 0.97 - ETA: 2s - loss: 0.0688 - accuracy: 0.97 - ETA: 2s - loss: 0.0688 - accuracy: 0.97 - ETA: 2s - loss: 0.0688 - accuracy: 0.97 - ETA: 2s - loss: 0.0687 - accuracy: 0.97 - ETA:
"Epoch 2/2\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" 255680/1806870 [===>..........................] - ETA: 1:52:37 - loss: 0.0028 - accuracy: 1.000 - ETA: 4:13 - loss: 0.0256 - accuracy: 0.9901 - ETA: 2:40 - loss: 0.0382 - accuracy: 0.98 - ETA: 2:10 - loss: 0.0324 - accuracy: 0.99 - ETA: 1:57 - loss: 0.0302 - accuracy: 0.99 - ETA: 1:47 - loss: 0.0321 - accuracy: 0.98 - ETA: 1:41 - loss: 0.0311 - accuracy: 0.98 - ETA: 1:36 - loss: 0.0318 - accuracy: 0.98 - ETA: 1:34 - loss: 0.0307 - accuracy: 0.98 - ETA: 1:34 - loss: 0.0307 - accuracy: 0.98 - ETA: 1:33 - loss: 0.0352 - accuracy: 0.98 - ETA: 1:32 - loss: 0.0353 - accuracy: 0.98 - ETA: 1:32 - loss: 0.0343 - accuracy: 0.98 - ETA: 1:31 - loss: 0.0347 - accuracy: 0.98 - ETA: 1:29 - loss: 0.0354 - accuracy: 0.98 - ETA: 1:28 - loss: 0.0349 - accuracy: 0.98 - ETA: 1:26 - loss: 0.0345 - accuracy: 0.98 - ETA: 1:25 - loss: 0.0337 - accuracy: 0.98 - ETA: 1:23 - loss: 0.0336 - accuracy: 0.98 - ETA: 1:22 - loss: 0.0330 - accuracy: 0.98 - ETA: 1:22 - loss: 0.0331 - accuracy: 0.98 - ETA: 1:23 - loss: 0.0327 - accuracy: 0.98 - ETA: 1:22 - loss: 0.0322 - accuracy: 0.98 - ETA: 1:21 - loss: 0.0321 - accuracy: 0.98 - ETA: 1:20 - loss: 0.0313 - accuracy: 0.98 - ETA: 1:19 - loss: 0.0306 - accuracy: 0.98 - ETA: 1:17 - loss: 0.0301 - accuracy: 0.98 - ETA: 1:17 - loss: 0.0307 - accuracy: 0.98 - ETA: 1:16 - loss: 0.0304 - accuracy: 0.98 - ETA: 1:16 - loss: 0.0310 - accuracy: 0.98 - ETA: 1:15 - loss: 0.0316 - accuracy: 0.98 - ETA: 1:15 - loss: 0.0318 - accuracy: 0.98 - ETA: 1:15 - loss: 0.0314 - accuracy: 0.98 - ETA: 1:14 - loss: 0.0309 - accuracy: 0.98 - ETA: 1:14 - loss: 0.0306 - accuracy: 0.98 - ETA: 1:13 - loss: 0.0301 - accuracy: 0.98 - ETA: 1:13 - loss: 0.0305 - accuracy: 0.98 - ETA: 1:12 - loss: 0.0301 - accuracy: 0.98 - ETA: 1:12 - loss: 0.0303 - accuracy: 0.98 - ETA: 1:11 - loss: 0.0306 - accuracy: 0.98 - ETA: 1:11 - loss: 0.0302 - accuracy: 0.98 - ETA: 1:11 - loss: 0.0301 - accuracy: 0.98 - ETA: 1:11 - loss: 0.0298 - accuracy: 0.98 - ETA: 1:10 - loss: 0.0296 - accuracy: 0.99 - ETA: 1:10 - loss: 0.0292 - accuracy: 0.99 - ETA: 1:10 - loss: 0.0292 - accuracy: 0.99 - ETA: 1:10 - loss: 0.0291 - accuracy: 0.99 - ETA: 1:09 - loss: 0.0288 - accuracy: 0.99 - ETA: 1:09 - loss: 0.0289 - accuracy: 0.99 - ETA: 1:09 - loss: 0.0299 - accuracy: 0.99 - ETA: 1:08 - loss: 0.0303 - accuracy: 0.98 - ETA: 1:08 - loss: 0.0300 - accuracy: 0.99 - ETA: 1:08 - loss: 0.0301 - accuracy: 0.99 - ETA: 1:08 - loss: 0.0299 - accuracy: 0.99 - ETA: 1:08 - loss: 0.0299 - accuracy: 0.99 - ETA: 1:08 - loss: 0.0297 - accuracy: 0.99 - ETA: 1:08 - loss: 0.0298 - accuracy: 0.99 - ETA: 1:07 - loss: 0.0295 - accuracy: 0.99 - ETA: 1:07 - loss: 0.0293 - accuracy: 0.99 - ETA: 1:07 - loss: 0.0293 - accuracy: 0.99 - ETA: 1:07 - loss: 0.0292 - accuracy: 0.99 - ETA: 1:07 - loss: 0.0292 - accuracy: 0.99 - ETA: 1:06 - loss: 0.0291 - accuracy: 0.99 - ETA: 1:06 - loss: 0.0290 - accuracy: 0.99 - ETA: 1:06 - loss: 0.0289 - accuracy: 0.99 - ETA: 1:06 - loss: 0.0288 - accuracy: 0.99 - ETA: 1:06 - loss: 0.0289 - accuracy: 0.99 - ETA: 1:06 - loss: 0.0291 - accuracy: 0.99 - ETA: 1:05 - loss: 0.0294 - accuracy: 0.99 - ETA: 1:05 - loss: 0.0294 - accuracy: 0.99 - ETA: 1:05 - loss: 0.0295 - accuracy: 0.99 - ETA: 1:05 - loss: 0.0297 - accuracy: 0.99 - ETA: 1:05 - loss: 0.0295 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0296 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0298 - accuracy: 0.98 - ETA: 1:04 - loss: 0.0296 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0295 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0293 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0292 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0293 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0292 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0291 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0291 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0290 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0290 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0289 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0294 - accuracy: 0.98 - ETA: 1:04 - loss: 0.0295 - accuracy: 0.98 - ETA: 1:04 - loss: 0.0294 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0293 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0294 - accuracy: 0.99 - ETA: 1:04 - loss: 0.0294 -
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" 545536/1806870 [========>.....................] - ETA: 57s - loss: 0.0274 - accuracy: 0.990 - ETA: 57s - loss: 0.0273 - accuracy: 0.990 - ETA: 57s - loss: 0.0273 - accuracy: 0.990 - ETA: 57s - loss: 0.0273 - accuracy: 0.990 - ETA: 57s - loss: 0.0273 - accuracy: 0.990 - ETA: 57s - loss: 0.0274 - accuracy: 0.990 - ETA: 57s - loss: 0.0274 - accuracy: 0.990 - ETA: 56s - loss: 0.0272 - accuracy: 0.990 - ETA: 56s - loss: 0.0272 - accuracy: 0.990 - ETA: 56s - loss: 0.0272 - accuracy: 0.990 - ETA: 56s - loss: 0.0272 - accuracy: 0.990 - ETA: 56s - loss: 0.0272 - accuracy: 0.990 - ETA: 56s - loss: 0.0272 - accuracy: 0.990 - ETA: 56s - loss: 0.0272 - accuracy: 0.990 - ETA: 56s - loss: 0.0271 - accuracy: 0.990 - ETA: 56s - loss: 0.0271 - accuracy: 0.990 - ETA: 55s - loss: 0.0271 - accuracy: 0.990 - ETA: 55s - loss: 0.0271 - accuracy: 0.990 - ETA: 55s - loss: 0.0271 - accuracy: 0.990 - ETA: 55s - loss: 0.0271 - accuracy: 0.990 - ETA: 55s - loss: 0.0271 - accuracy: 0.990 - ETA: 55s - loss: 0.0271 - accuracy: 0.990 - ETA: 55s - loss: 0.0272 - accuracy: 0.990 - ETA: 55s - loss: 0.0272 - accuracy: 0.990 - ETA: 55s - loss: 0.0271 - accuracy: 0.991 - ETA: 55s - loss: 0.0271 - accuracy: 0.991 - ETA: 55s - loss: 0.0270 - accuracy: 0.991 - ETA: 55s - loss: 0.0270 - accuracy: 0.991 - ETA: 55s - loss: 0.0270 - accuracy: 0.991 - ETA: 54s - loss: 0.0269 - accuracy: 0.991 - ETA: 54s - loss: 0.0271 - accuracy: 0.991 - ETA: 54s - loss: 0.0270 - accuracy: 0.991 - ETA: 54s - loss: 0.0270 - accuracy: 0.991 - ETA: 54s - loss: 0.0270 - accuracy: 0.991 - ETA: 54s - loss: 0.0270 - accuracy: 0.991 - ETA: 54s - loss: 0.0270 - accuracy: 0.991 - ETA: 54s - loss: 0.0269 - accuracy: 0.991 - ETA: 53s - loss: 0.0269 - accuracy: 0.991 - ETA: 53s - loss: 0.0269 - accuracy: 0.991 - ETA: 53s - loss: 0.0269 - accuracy: 0.991 - ETA: 53s - loss: 0.0269 - accuracy: 0.991 - ETA: 53s - loss: 0.0268 - accuracy: 0.991 - ETA: 53s - loss: 0.0267 - accuracy: 0.991 - ETA: 53s - loss: 0.0267 - accuracy: 0.991 - ETA: 53s - loss: 0.0267 - accuracy: 0.991 - ETA: 53s - loss: 0.0267 - accuracy: 0.991 - ETA: 53s - loss: 0.0267 - accuracy: 0.991 - ETA: 53s - loss: 0.0267 - accuracy: 0.991 - ETA: 53s - loss: 0.0268 - accuracy: 0.991 - ETA: 53s - loss: 0.0269 - accuracy: 0.991 - ETA: 53s - loss: 0.0269 - accuracy: 0.991 - ETA: 52s - loss: 0.0269 - accuracy: 0.991 - ETA: 52s - loss: 0.0269 - accuracy: 0.991 - ETA: 52s - loss: 0.0268 - accuracy: 0.991 - ETA: 52s - loss: 0.0268 - accuracy: 0.991 - ETA: 52s - loss: 0.0269 - accuracy: 0.991 - ETA: 52s - loss: 0.0269 - accuracy: 0.991 - ETA: 52s - loss: 0.0269 - accuracy: 0.991 - ETA: 52s - loss: 0.0269 - accuracy: 0.991 - ETA: 52s - loss: 0.0269 - accuracy: 0.991 - ETA: 52s - loss: 0.0270 - accuracy: 0.991 - ETA: 52s - loss: 0.0269 - accuracy: 0.991 - ETA: 51s - loss: 0.0269 - accuracy: 0.991 - ETA: 51s - loss: 0.0269 - accuracy: 0.991 - ETA: 51s - loss: 0.0270 - accuracy: 0.991 - ETA: 51s - loss: 0.0270 - accuracy: 0.991 - ETA: 51s - loss: 0.0270 - accuracy: 0.991 - ETA: 51s - loss: 0.0270 - accuracy: 0.991 - ETA: 51s - loss: 0.0269 - accuracy: 0.991 - ETA: 51s - loss: 0.0269 - accuracy: 0.991 - ETA: 51s - loss: 0.0269 - accuracy: 0.991 - ETA: 51s - loss: 0.0268 - accuracy: 0.991 - ETA: 51s - loss: 0.0268 - accuracy: 0.991 - ETA: 51s - loss: 0.0267 - accuracy: 0.991 - ETA: 51s - loss: 0.0267 - accuracy: 0.991 - ETA: 51s - loss: 0.0267 - accuracy: 0.991 - ETA: 51s - loss: 0.0267 - accuracy: 0.991 - ETA: 51s - loss: 0.0267 - accuracy: 0.991 - ETA: 50s - loss: 0.0273 - accuracy: 0.991 - ETA: 50s - loss: 0.0275 - accuracy: 0.990 - ETA: 50s - loss: 0.0275 - accuracy: 0.990 - ETA: 50s - loss: 0.0275 - accuracy: 0.990 - ETA: 50s - loss: 0.0274 - accuracy: 0.991 - ETA: 50s - loss: 0.0275 - accuracy: 0.990 - ETA: 50s - loss: 0.0275 - accuracy: 0.990 - ETA: 50s - loss: 0.0274 - accuracy: 0.990 - ETA: 50s - loss: 0.0274 - accuracy: 0.991 - ETA: 50s - loss: 0.0274 - accuracy: 0.990 - ETA: 50s - loss: 0.0274 - accuracy: 0.990 - ETA: 50s - loss: 0.0274 - accuracy: 0.990 - ETA: 50s - loss: 0.0274 - accuracy: 0.990 - ETA: 50s - loss: 0.0274 - accurac
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" 827360/1806870 [============>.................] - ETA: 43s - loss: 0.0266 - accuracy: 0.991 - ETA: 43s - loss: 0.0266 - accuracy: 0.991 - ETA: 43s - loss: 0.0265 - accuracy: 0.991 - ETA: 43s - loss: 0.0265 - accuracy: 0.991 - ETA: 43s - loss: 0.0264 - accuracy: 0.991 - ETA: 43s - loss: 0.0264 - accuracy: 0.991 - ETA: 43s - loss: 0.0264 - accuracy: 0.991 - ETA: 43s - loss: 0.0264 - accuracy: 0.991 - ETA: 43s - loss: 0.0264 - accuracy: 0.991 - ETA: 43s - loss: 0.0264 - accuracy: 0.991 - ETA: 43s - loss: 0.0264 - accuracy: 0.991 - ETA: 43s - loss: 0.0264 - accuracy: 0.991 - ETA: 43s - loss: 0.0264 - accuracy: 0.991 - ETA: 43s - loss: 0.0264 - accuracy: 0.991 - ETA: 42s - loss: 0.0264 - accuracy: 0.991 - ETA: 42s - loss: 0.0263 - accuracy: 0.991 - ETA: 42s - loss: 0.0263 - accuracy: 0.991 - ETA: 42s - loss: 0.0263 - accuracy: 0.991 - ETA: 42s - loss: 0.0263 - accuracy: 0.991 - ETA: 42s - loss: 0.0263 - accuracy: 0.991 - ETA: 42s - loss: 0.0263 - accuracy: 0.991 - ETA: 42s - loss: 0.0263 - accuracy: 0.991 - ETA: 42s - loss: 0.0262 - accuracy: 0.991 - ETA: 42s - loss: 0.0262 - accuracy: 0.991 - ETA: 42s - loss: 0.0262 - accuracy: 0.991 - ETA: 42s - loss: 0.0262 - accuracy: 0.991 - ETA: 42s - loss: 0.0262 - accuracy: 0.991 - ETA: 42s - loss: 0.0262 - accuracy: 0.991 - ETA: 42s - loss: 0.0262 - accuracy: 0.991 - ETA: 41s - loss: 0.0263 - accuracy: 0.991 - ETA: 41s - loss: 0.0263 - accuracy: 0.991 - ETA: 41s - loss: 0.0263 - accuracy: 0.991 - ETA: 41s - loss: 0.0263 - accuracy: 0.991 - ETA: 41s - loss: 0.0262 - accuracy: 0.991 - ETA: 41s - loss: 0.0262 - accuracy: 0.991 - ETA: 41s - loss: 0.0262 - accuracy: 0.991 - ETA: 41s - loss: 0.0262 - accuracy: 0.991 - ETA: 41s - loss: 0.0262 - accuracy: 0.991 - ETA: 41s - loss: 0.0261 - accuracy: 0.991 - ETA: 41s - loss: 0.0261 - accuracy: 0.991 - ETA: 41s - loss: 0.0261 - accuracy: 0.991 - ETA: 41s - loss: 0.0261 - accuracy: 0.991 - ETA: 41s - loss: 0.0261 - accuracy: 0.991 - ETA: 41s - loss: 0.0261 - accuracy: 0.991 - ETA: 41s - loss: 0.0261 - accuracy: 0.991 - ETA: 41s - loss: 0.0261 - accuracy: 0.991 - ETA: 41s - loss: 0.0261 - accuracy: 0.991 - ETA: 40s - loss: 0.0261 - accuracy: 0.991 - ETA: 40s - loss: 0.0261 - accuracy: 0.991 - ETA: 40s - loss: 0.0261 - accuracy: 0.991 - ETA: 40s - loss: 0.0261 - accuracy: 0.991 - ETA: 40s - loss: 0.0261 - accuracy: 0.991 - ETA: 40s - loss: 0.0260 - accuracy: 0.991 - ETA: 40s - loss: 0.0260 - accuracy: 0.991 - ETA: 40s - loss: 0.0261 - accuracy: 0.991 - ETA: 40s - loss: 0.0261 - accuracy: 0.991 - ETA: 40s - loss: 0.0261 - accuracy: 0.991 - ETA: 40s - loss: 0.0261 - accuracy: 0.991 - ETA: 40s - loss: 0.0261 - accuracy: 0.991 - ETA: 40s - loss: 0.0260 - accuracy: 0.991 - ETA: 40s - loss: 0.0260 - accuracy: 0.991 - ETA: 40s - loss: 0.0260 - accuracy: 0.991 - ETA: 40s - loss: 0.0260 - accuracy: 0.991 - ETA: 40s - loss: 0.0260 - accuracy: 0.991 - ETA: 40s - loss: 0.0260 - accuracy: 0.991 - ETA: 40s - loss: 0.0260 - accuracy: 0.991 - ETA: 39s - loss: 0.0260 - accuracy: 0.991 - ETA: 39s - loss: 0.0259 - accuracy: 0.991 - ETA: 39s - loss: 0.0259 - accuracy: 0.991 - ETA: 39s - loss: 0.0259 - accuracy: 0.991 - ETA: 39s - loss: 0.0259 - accuracy: 0.991 - ETA: 39s - loss: 0.0259 - accuracy: 0.991 - ETA: 39s - loss: 0.0259 - accuracy: 0.991 - ETA: 39s - loss: 0.0259 - accuracy: 0.991 - ETA: 39s - loss: 0.0258 - accuracy: 0.991 - ETA: 39s - loss: 0.0258 - accuracy: 0.991 - ETA: 39s - loss: 0.0258 - accuracy: 0.991 - ETA: 39s - loss: 0.0258 - accuracy: 0.991 - ETA: 39s - loss: 0.0258 - accuracy: 0.991 - ETA: 39s - loss: 0.0258 - accuracy: 0.991 - ETA: 39s - loss: 0.0257 - accuracy: 0.991 - ETA: 39s - loss: 0.0257 - accuracy: 0.991 - ETA: 38s - loss: 0.0257 - accuracy: 0.991 - ETA: 38s - loss: 0.0257 - accuracy: 0.991 - ETA: 38s - loss: 0.0257 - accuracy: 0.991 - ETA: 38s - loss: 0.0257 - accuracy: 0.991 - ETA: 38s - loss: 0.0257 - accuracy: 0.991 - ETA: 38s - loss: 0.0256 - accuracy: 0.991 - ETA: 38s - loss: 0.0256 - accuracy: 0.991 - ETA: 38s - loss: 0.0256 - accuracy: 0.991 - ETA: 38s - loss: 0.0255 - accuracy: 0.991 - ETA: 38s - loss: 0.0255 - accurac
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"1088864/1806870 [=================>............] - ETA: 33s - loss: 0.0251 - accuracy: 0.991 - ETA: 33s - loss: 0.0251 - accuracy: 0.991 - ETA: 33s - loss: 0.0251 - accuracy: 0.991 - ETA: 33s - loss: 0.0251 - accuracy: 0.991 - ETA: 33s - loss: 0.0251 - accuracy: 0.991 - ETA: 33s - loss: 0.0251 - accuracy: 0.991 - ETA: 33s - loss: 0.0250 - accuracy: 0.991 - ETA: 33s - loss: 0.0250 - accuracy: 0.991 - ETA: 33s - loss: 0.0250 - accuracy: 0.991 - ETA: 33s - loss: 0.0250 - accuracy: 0.991 - ETA: 33s - loss: 0.0250 - accuracy: 0.991 - ETA: 33s - loss: 0.0250 - accuracy: 0.991 - ETA: 33s - loss: 0.0249 - accuracy: 0.991 - ETA: 33s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0250 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0250 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 32s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0249 - accuracy: 0.991 - ETA: 31s - loss: 0.0248 - accuracy: 0.991 - ETA: 31s - loss: 0.0248 - accuracy: 0.991 - ETA: 31s - loss: 0.0248 - accuracy: 0.991 - ETA: 31s - loss: 0.0248 - accuracy: 0.991 - ETA: 31s - loss: 0.0248 - accuracy: 0.991 - ETA: 31s - loss: 0.0248 - accuracy: 0.991 - ETA: 31s - loss: 0.0248 - accuracy: 0.991 - ETA: 30s - loss: 0.0248 - accuracy: 0.991 - ETA: 30s - loss: 0.0248 - accuracy: 0.991 - ETA: 30s - loss: 0.0248 - accuracy: 0.991 - ETA: 30s - loss: 0.0248 - accuracy: 0.991 - ETA: 30s - loss: 0.0248 - accuracy: 0.991 - ETA: 30s - loss: 0.0248 - accuracy: 0.991 - ETA: 30s - loss: 0.0247 - accuracy: 0.991 - ETA: 30s - loss: 0.0247 - accuracy: 0.991 - ETA: 30s - loss: 0.0247 - accuracy: 0.991 - ETA: 30s - loss: 0.0247 - accuracy: 0.991 - ETA: 30s - loss: 0.0247 - accuracy: 0.991 - ETA: 30s - loss: 0.0247 - accuracy: 0.991 - ETA: 30s - loss: 0.0247 - accuracy: 0.991 - ETA: 30s - loss: 0.0246 - accuracy: 0.991 - ETA: 30s - loss: 0.0246 - accuracy: 0.991 - ETA: 30s - loss: 0.0246 - accuracy: 0.991 - ETA: 30s - loss: 0.0246 - accuracy: 0.991 - ETA: 30s - loss: 0.0246 - accuracy: 0.991 - ETA: 30s - loss: 0.0246 - accuracy: 0.991 - ETA: 30s - loss: 0.0246 - accuracy: 0.991 - ETA: 30s - loss: 0.0246 - accuracy: 0.991 - ETA: 29s - loss: 0.0246 - accuracy: 0.991 - ETA: 29s - loss: 0.0246 - accuracy: 0.991 - ETA: 29s - loss: 0.0246 - accuracy: 0.991 - ETA: 29s - loss: 0.0246 - accuracy: 0.991 - ETA: 29s - loss: 0.0245 - accuracy: 0.991 - ETA: 29s - loss: 0.0246 - accurac
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"1370528/1806870 [=====================>........] - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0239 - accuracy: 0.992 - ETA: 24s - loss: 0.0239 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0240 - accuracy: 0.992 - ETA: 24s - loss: 0.0239 - accuracy: 0.992 - ETA: 24s - loss: 0.0239 - accuracy: 0.992 - ETA: 24s - loss: 0.0239 - accuracy: 0.992 - ETA: 23s - loss: 0.0239 - accuracy: 0.992 - ETA: 23s - loss: 0.0239 - accuracy: 0.992 - ETA: 23s - loss: 0.0239 - accuracy: 0.992 - ETA: 23s - loss: 0.0239 - accuracy: 0.992 - ETA: 23s - loss: 0.0239 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 23s - loss: 0.0238 - accuracy: 0.992 - ETA: 22s - loss: 0.0238 - accuracy: 0.992 - ETA: 22s - loss: 0.0238 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 22s - loss: 0.0238 - accuracy: 0.992 - ETA: 22s - loss: 0.0238 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 22s - loss: 0.0238 - accuracy: 0.992 - ETA: 22s - loss: 0.0238 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 22s - loss: 0.0237 - accuracy: 0.992 - ETA: 21s - loss: 0.0237 - accuracy: 0.992 - ETA: 21s - loss: 0.0237 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0236 - accuracy: 0.992 - ETA: 21s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accuracy: 0.992 - ETA: 20s - loss: 0.0235 - accurac
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"1661024/1806870 [==========================>...] - ETA: 15s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0229 - accuracy: 0.992 - ETA: 14s - loss: 0.0228 - accuracy: 0.992 - ETA: 14s - loss: 0.0228 - accuracy: 0.992 - ETA: 14s - loss: 0.0228 - accuracy: 0.992 - ETA: 14s - loss: 0.0228 - accuracy: 0.992 - ETA: 14s - loss: 0.0228 - accuracy: 0.992 - ETA: 14s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0228 - accuracy: 0.992 - ETA: 13s - loss: 0.0227 - accuracy: 0.992 - ETA: 13s - loss: 0.0227 - accuracy: 0.992 - ETA: 13s - loss: 0.0227 - accuracy: 0.992 - ETA: 13s - loss: 0.0227 - accuracy: 0.992 - ETA: 13s - loss: 0.0227 - accuracy: 0.992 - ETA: 13s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0227 - accuracy: 0.992 - ETA: 12s - loss: 0.0226 - accuracy: 0.992 - ETA: 12s - loss: 0.0226 - accuracy: 0.992 - ETA: 12s - loss: 0.0226 - accuracy: 0.992 - ETA: 12s - loss: 0.0226 - accuracy: 0.992 - ETA: 12s - loss: 0.0226 - accuracy: 0.992 - ETA: 12s - loss: 0.0226 - accuracy: 0.992 - ETA: 12s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 11s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0226 - accuracy: 0.992 - ETA: 10s - loss: 0.0225 - accuracy: 0.992 - ETA: 10s - loss: 0.0225 - accuracy: 0.992 - ETA: 10s - loss: 0.0225 - accuracy: 0.992 - ETA: 10s - loss: 0.0225 - accuracy: 0.992 - ETA: 10s - loss: 0.0225 - accurac
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"1806870/1806870 [==============================] - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0219 - accuracy: 0.99 - ETA: 4s - loss: 0.0219 - accuracy: 0.99 - ETA: 4s - loss: 0.0219 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0220 - accuracy: 0.99 - ETA: 4s - loss: 0.0219 - accuracy: 0.99 - ETA: 4s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0219 - accuracy: 0.99 - ETA: 3s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0218 - accuracy: 0.99 - ETA: 2s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 1s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0217 - accuracy: 0.99 - ETA: 0s - loss: 0.0216 - accuracy: 0.99 - ETA: 0s - loss: 0.0216 - accuracy: 0.99 - ETA:
]
}
],
"source": [
"#begin federated\n",
"\n",
"earlystopping = EarlyStopping(monitor = 'val_loss',\n",
" min_delta = 0.01,\n",
" patience = 50,\n",
" verbose = 0,\n",
" baseline = 2,\n",
" restore_best_weights = True)\n",
"\n",
"checkpoint = ModelCheckpoint('test.h8',\n",
" monitor='val_loss',\n",
" mode='min',\n",
" save_best_only=True,\n",
" verbose=0)\n",
" \n",
"model = Sequential()\n",
"model.add(Dense(70, input_dim=Features_number, activation='relu'))\n",
"model.add(Dense(50, activation='relu'))\n",
"model.add(Dense(50, activation='relu'))\n",
"model.add(Dense(2, activation='softmax'))\n",
"model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n",
"history = model.fit(X_train, y_train,\n",
"epochs=2,\n",
"validation_data=(X_test, y_test),\n",
"callbacks = [checkpoint, earlystopping],\n",
"shuffle=True)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"#AUXILIARY METHODS FOR FEDERATED LEARNING\n",
"\n",
"# RETURN INDICES TO LAYERS WITH WEIGHTS AND BIASES\n",
"def trainable_layers(model):\n",
" return [i for i, layer in enumerate(model.layers) if len(layer.get_weights()) > 0]\n",
"\n",
"# RETURN WEIGHTS AND BIASES OF A MODEL\n",
"def get_parameters(model):\n",
" weights = []\n",
" biases = []\n",
" index = trainable_layers(model)\n",
" for i in index:\n",
" weights.append(copy.deepcopy(model.layers[i].get_weights()[0]))\n",
" biases.append(copy.deepcopy(model.layers[i].get_weights()[1])) \n",
" \n",
" return weights, biases\n",
" \n",
"# SET WEIGHTS AND BIASES OF A MODEL\n",
"def set_parameters(model, weights, biases):\n",
" index = trainable_layers(model)\n",
" for i, j in enumerate(index):\n",
" model.layers[j].set_weights([weights[i], biases[i]])\n",
" \n",
"# DEPRECATED: RETURN THE GRADIENTS OF THE MODEL AFTER AN UPDATE \n",
"def get_gradients(model, inputs, outputs):\n",
" \"\"\" Gets gradient of model for given inputs and outputs for all weights\"\"\"\n",
" grads = model.optimizer.get_gradients(model.total_loss, model.trainable_weights)\n",
" symb_inputs = (model._feed_inputs + model._feed_targets + model._feed_sample_weights)\n",
" f = K.function(symb_inputs, grads)\n",
" x, y, sample_weight = model._standardize_user_data(inputs, outputs)\n",
" output_grad = f(x + y + sample_weight)\n",
" \n",
" w_grad = [w for i,w in enumerate(output_grad) if i%2==0]\n",
" b_grad = [w for i,w in enumerate(output_grad) if i%2==1]\n",
" \n",
" return w_grad, b_grad\n",
"\n",
"# RETURN THE DIFFERENCE OF MODELS' WEIGHTS AND BIASES AFTER AN UPDATE \n",
"# NOTE: LEARNING RATE IS APPLIED, SO THE UPDATE IS DIFFERENT FROM THE\n",
"# GRADIENTS. IN CASE VANILLA SGD IS USED, THE GRADIENTS ARE OBTAINED\n",
"# AS (UPDATES / LEARNING_RATE)\n",
"def get_updates(model, inputs, outputs, batch_size, epochs):\n",
" w, b = get_parameters(model)\n",
" #model.train_on_batch(inputs, outputs)\n",
" model.fit(inputs, outputs, batch_size=batch_size, epochs=epochs, verbose=0)\n",
" w_new, b_new = get_parameters(model)\n",
" \n",
" weight_updates = [old - new for old,new in zip(w, w_new)]\n",
" bias_updates = [old - new for old,new in zip(b, b_new)]\n",
" \n",
" return weight_updates, bias_updates\n",
"\n",
"# UPDATE THE MODEL'S WEIGHTS AND PARAMETERS WITH AN UPDATE\n",
"def apply_updates(model, eta, w_new, b_new):\n",
" w, b = get_parameters(model)\n",
" new_weights = [theta - eta*delta for theta,delta in zip(w, w_new)]\n",
" new_biases = [theta - eta*delta for theta,delta in zip(b, b_new)]\n",
" set_parameters(model, new_weights, new_biases)\n",
" \n",
"# FEDERATED AGGREGATION FUNCTION\n",
"def aggregate(n_layers, n_peers, f, w_updates, b_updates):\n",
" agg_w = [f([w_updates[j][i] for j in range(n_peers)], axis=0) for i in range(n_layers)]\n",
" agg_b = [f([b_updates[j][i] for j in range(n_peers)], axis=0) for i in range(n_layers)]\n",
" return agg_w, agg_b\n",
"\n",
"# SOLVE NANS\n",
"def nans_to_zero(W, B):\n",
" W0 = [np.nan_to_num(w, nan=0.0, posinf=0.0, neginf=0.0) for w in W]\n",
" B0 = [np.nan_to_num(b, nan=0.0, posinf=0.0, neginf=0.0) for b in B]\n",
" return W0, B0\n",
"\n",
"def build_forest(X,y):\n",
" clf=RandomForestClassifier(n_estimators=1000, max_depth=7, random_state=0, verbose = 1)\n",
" clf.fit(X,y)\n",
" return clf\n",
"\n",
"# COMPUTE EUCLIDEAN DISTANCE OF WEIGHTS\n",
"def dist_weights(w_a, w_b):\n",
" wf_a = flatten_weights(w_a)\n",
" wf_b = flatten_weights(w_b)\n",
" return euclidean(wf_a, wf_b)\n",
"\n",
"# TRANSFORM ALL WEIGHT TENSORS TO 1D ARRAY\n",
"def flatten_weights(w_in):\n",
" h = w_in[0].reshape(-1)\n",
" for w in w_in[1:]:\n",
" h = np.append(h, w.reshape(-1))\n",
" return h\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"def byzantine_attack_data(inputs):\n",
" attack_persentage = 40\n",
" number_of_attacked_samples = len(inputs) * attack_persentage /100\n",
" number_of_attacked_samples = int(number_of_attacked_samples)\n",
" sampels_attacked = random.sample(range(len(inputs)), number_of_attacked_samples)\n",
" if data_set == 'adult':\n",
" z=0\n",
" C=0\n",
" z=inputs.max(axis = 0)\n",
" C=inputs.min(axis = 0)\n",
" for i in range(len(inputs)):\n",
" if i in sampels_attacked:\n",
" for j in range(len(inputs[0])):\n",
" inputs[i][j]= random.uniform(z[j], C[j])\n",
" return inputs"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"def poisoning_attack_data(h, feature_attacked):\n",
" attack_persentage = 60\n",
" number_of_attacked_samples = len(h) * attack_persentage /100\n",
" number_of_attacked_samples = int(number_of_attacked_samples)\n",
" sampels_attacked = random.sample(range(len(h)), number_of_attacked_samples)\n",
" if data_set == 'adult':\n",
" z=0\n",
" C=0\n",
" z=h.max(axis = 0)\n",
" C=h.min(axis = 0)\n",
" for i in range(len(h)):\n",
" if i in sampels_attacked:\n",
" for j in range(len(feature_attacked)):\n",
" h[i][feature_attacked[j]]= random.uniform(z[feature_attacked[j]], C[feature_attacked[j]])\n",
" return h"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"def label_flipping_attack_data(z):\n",
" attack_persentage = 50\n",
" number_of_attacked_samples = len(z) * attack_persentage /100\n",
" number_of_attacked_samples = int(number_of_attacked_samples)\n",
" sampels_attacked = random.sample(range(len(z)), number_of_attacked_samples)\n",
" if data_set == 'adult':\n",
" for i in range(len(z)):\n",
" if i in sampels_attacked:\n",
" for j in range(len(z[i])):\n",
" if z[i][j] == 0:\n",
" z[i][j] = 1\n",
" else:\n",
" z[i][j] = 0\n",
" \n",
" return z"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"# scan the forest for trees maches the wrong predictions of the black-box\n",
"def scan_wrong(forest_predictions, FL_predict1, forest , y_test_local, X_test_local):\n",
" sum_feature_improtance= 0\n",
" overal_wrong_feature_importance = 0\n",
" counter = 0\n",
" second_counter = 0\n",
" never_seen = 0\n",
" avr_wrong_importance = 0\n",
" FL_predict1 = np.argmax(FL_predict1, axis=1)\n",
" forest_predictions = np.argmax(forest_predictions, axis=1)\n",
" y_test_local = np.argmax(y_test_local, axis=1)\n",
" FL_wrong = 0\n",
" for i in range (len(FL_predict1)):\n",
" i_tree = 0\n",
"# if the black-box got a wrong prediction\n",
" if (FL_predict1[i] != y_test_local[i]):\n",
" FL_wrong = FL_wrong + 1\n",
"# getting the prediction of the trees one by one\n",
" for tree_in_forest in forest.estimators_:\n",
" sample = X_test_local[i].reshape(1, -1)\n",
" temp = forest.estimators_[i_tree].predict(sample)\n",
" temp = np.argmax(temp, axis=1)\n",
"# print('the prediction of the t')\n",
"# print(temp)\n",
" i_tree = i_tree + 1\n",
"# if the prediction of the tree maches the predictions of the black-box\n",
" if(FL_predict1[i] == temp):\n",
"# getting the features importances\n",
" sum_feature_improtance = sum_feature_improtance + tree_in_forest.feature_importances_\n",
" counter = counter + 1\n",
"# if we have trees maches the black-box predictions\n",
" if(counter>0):\n",
" ave_feature_importence = sum_feature_improtance/counter\n",
" overal_wrong_feature_importance = ave_feature_importence + overal_wrong_feature_importance\n",
" second_counter = second_counter + 1\n",
" counter = 0\n",
" sum_feature_improtance = 0\n",
"# if there is no trees maches the black-box predictions\n",
" else:\n",
" if(FL_predict1[i] != y_test_local[i]):\n",
" never_seen = never_seen +1\n",
"\n",
"# getting the average features importances for all the samples that had wrong predictions.\n",
" if(second_counter>0):\n",
" avr_wrong_importance = overal_wrong_feature_importance / second_counter\n",
" return avr_wrong_importance"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[0, 1, 2, 3]"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"trainable_layers(model)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"([array([[ 0.08338594, -0.1254255 , -0.49211267, ..., 0.24750227,\n",
" -0.65377104, -0.05224424],\n",
" [-0.01734522, -0.1021487 , 0.14053608, ..., -0.28375858,\n",
" -0.15333907, -0.21018188],\n",
" [ 0.0459124 , 0.17106615, -0.2748136 , ..., 0.29032764,\n",
" -0.12925142, 0.06985184],\n",
" ...,\n",
" [-0.15448453, 0.17909111, 0.18795453, ..., 0.46541557,\n",
" 0.01830631, -0.0534319 ],\n",
" [-0.15942477, 0.09147607, 0.06007228, ..., 0.11995307,\n",
" 0.5220185 , -0.16314192],\n",
" [ 0.09670959, -0.1825741 , -0.24682267, ..., 0.21973692,\n",
" 0.26263914, 0.1036981 ]], dtype=float32),\n",
" array([[-0.13370994, 0.05705584, -0.01060855, ..., 0.03942909,\n",
" -0.05360872, 0.18457419],\n",
" [ 0.18660966, 0.02259383, -0.06519584, ..., 0.12901685,\n",
" -0.03150385, -0.05975418],\n",
" [-0.18495369, -0.3026115 , 0.15492044, ..., 0.3693038 ,\n",
" 0.42332208, 0.17241667],\n",
" ...,\n",
" [-0.41482276, -0.14596964, -0.10000814, ..., -0.07026828,\n",
" -0.13250498, -0.11739882],\n",
" [-0.1614849 , -0.264906 , 0.1812628 , ..., 0.51671666,\n",
" 0.15924722, 0.18865098],\n",
" [-0.22190864, 0.16413453, -0.15392351, ..., -0.02723815,\n",
" -0.19859377, -0.2072882 ]], dtype=float32),\n",
" array([[ 0.47770685, -0.7960992 , 0.4646107 , ..., 0.04795517,\n",
" -0.10576709, 0.5649921 ],\n",
" [-0.37207457, -0.25275326, -0.18699329, ..., 0.24836566,\n",
" -0.3587133 , -0.03917937],\n",
" [ 0.087133 , -0.14499295, -0.01567947, ..., -0.02826147,\n",
" 0.18539242, -0.2073498 ],\n",
" ...,\n",
" [-0.54771024, -0.56772953, -1.081203 , ..., 0.84306604,\n",
" 0.14480972, 0.12935087],\n",
" [-1.2092329 , 0.72066 , -0.76670545, ..., 0.3710571 ,\n",
" -0.02872824, 0.13071369],\n",
" [-0.24432653, -0.20408279, 0.18815796, ..., -0.24326074,\n",
" -0.81241286, -0.38458872]], dtype=float32),\n",
" array([[ 1.1354454 , -0.8150371 ],\n",
" [ 0.8239793 , -0.38024253],\n",
" [ 0.18105277, -0.22851984],\n",
" [ 0.36343297, -0.68712914],\n",
" [-0.14369975, 0.20164204],\n",
" [-1.3597993 , 1.1511468 ],\n",
" [ 0.17626402, -0.44736794],\n",
" [ 0.783137 , -1.2214484 ],\n",
" [ 0.2721217 , -0.06518261],\n",
" [ 0.8918733 , -0.77505213],\n",
" [-0.656868 , 1.1960154 ],\n",
" [-1.6819351 , 1.3945241 ],\n",
" [ 0.61881614, -0.7248123 ],\n",
" [-0.21632305, 0.10406036],\n",
" [ 0.17342244, 0.00396303],\n",
" [ 1.9393425 , -2.2392154 ],\n",
" [ 0.25582054, -0.09821833],\n",
" [-1.4448843 , 1.351973 ],\n",
" [-0.7736038 , 1.014288 ],\n",
" [ 1.5285544 , -1.3971529 ],\n",
" [ 2.319214 , -1.9343866 ],\n",
" [ 0.4609306 , -0.22342049],\n",
" [ 0.17103793, -0.22433195],\n",
" [ 0.84780014, -0.5512217 ],\n",
" [ 0.16438304, -0.4931989 ],\n",
" [ 1.15009 , -1.2441653 ],\n",
" [-0.7512086 , 0.69693834],\n",
" [-1.7812865 , 1.8653326 ],\n",
" [ 0.9114694 , -1.0244646 ],\n",
" [ 0.68447626, -0.45571706],\n",
" [-0.9834174 , 1.2978134 ],\n",
" [ 2.1663804 , -1.6653944 ],\n",
" [-0.2522568 , -0.24315625],\n",
" [-0.30284685, 0.6635906 ],\n",
" [-1.0407605 , 0.75330424],\n",
" [-1.5959861 , 1.6432168 ],\n",
" [-1.6533945 , 1.4116566 ],\n",
" [-1.3818094 , 0.96237814],\n",
" [ 0.49473518, -0.7128965 ],\n",
" [-1.1544157 , 1.2103665 ],\n",
" [-0.31084502, -0.06416508],\n",
" [ 0.07730638, -0.34380186],\n",
" [ 0.00517065, -0.07974567],\n",
" [ 0.76227933, -0.46143502],\n",
" [-2.0212495 , 2.1503792 ],\n",
" [-1.419235 , 1.1352861 ],\n",
" [ 0.40954936, -0.76466006],\n",
" [-0.592412 , 1.0149235 ],\n",
" [-0.9884663 , 1.3155804 ],\n",
" [-1.633649 , 1.6489463 ]], dtype=float32)],\n",
" [array([-1.58836432e-02, -1.20011568e-02, -2.30990946e-02, 0.00000000e+00,\n",
" 0.00000000e+00, 0.00000000e+00, -6.30686581e-02, -6.26324415e-02,\n",
" -3.39236371e-02, -6.14111274e-02, 0.00000000e+00, -4.05886732e-02,\n",
" 0.00000000e+00, -6.10261457e-03, -3.06573324e-02, 0.00000000e+00,\n",
" -1.63990387e-03, 0.00000000e+00, -1.11483254e-01, -1.81604289e-02,\n",
" -5.61644835e-03, 1.37938242e-02, 0.00000000e+00, -2.23437846e-02,\n",
" 0.00000000e+00, -6.16606697e-02, 1.70837268e-01, 5.55998720e-02,\n",
" -1.64261945e-02, 1.20512873e-01, -6.44331053e-03, 0.00000000e+00,\n",
" -3.33447531e-02, -5.07780984e-02, 3.10869161e-02, 7.47342259e-02,\n",
" 7.46760815e-02, -5.52449860e-02, -8.27614740e-02, -2.39871517e-02,\n",
" -3.01994607e-02, -4.17327993e-02, -1.15456417e-01, 8.73452723e-02,\n",
" -7.59378746e-02, 0.00000000e+00, -3.00336909e-02, 6.94403024e-09,\n",
" -4.00215089e-02, -1.37547646e-02, -4.61019538e-02, 5.88711686e-02,\n",
" -6.63833246e-02, -2.25303844e-02, -1.15773613e-02, 3.89949568e-02,\n",
" -3.27692814e-02, -1.54451123e-02, 1.45327836e-01, -7.80039234e-03,\n",
" -3.16219591e-02, 0.00000000e+00, 1.14573305e-02, -2.14286316e-02,\n",
" -3.89366113e-02, 1.13435954e-01, -4.89240699e-02, -1.66841432e-01,\n",
" -3.22352685e-02, -1.43911066e-02], dtype=float32),\n",
" array([-1.1628173e-01, 1.4605005e-01, -8.3894879e-02, -1.7480729e-02,\n",
" 9.5676459e-02, -1.5144591e-01, -8.5667908e-02, -2.8433751e-02,\n",
" -5.2900422e-02, -4.8166331e-02, 3.3510438e-01, -9.0990268e-02,\n",
" 5.9481107e-02, 4.4261031e-02, 1.3082844e-01, -1.3972101e-01,\n",
" -7.2492525e-02, -3.3382095e-02, -2.3739910e-01, -1.8229088e-01,\n",
" -9.5385693e-02, 3.0102465e-02, 6.0827412e-02, 3.3912200e-01,\n",
" 7.5339697e-02, -2.1391104e-01, -1.0858524e-01, 3.6047959e-01,\n",
" -3.7770301e-01, 9.7297080e-02, -1.0461237e-01, -7.4935108e-02,\n",
" -8.2315236e-02, 1.9678907e-01, 1.1734279e-06, -2.2777809e-01,\n",
" -1.2057750e-01, -2.1283591e-02, 1.3623047e-01, -1.9323155e-01,\n",
" -3.3379752e-02, -3.5926573e-02, 2.8303096e-01, 9.3418181e-02,\n",
" -1.3997810e-01, 9.8838598e-02, -2.3493488e-01, -1.9615906e-01,\n",
" -1.0158879e-02, -7.1108431e-02], dtype=float32),\n",
" array([-0.33614156, -0.19233358, -0.02823093, 0.48520288, 0.23908217,\n",
" 0.27522194, 0.01314433, -0.01497711, -0.0828162 , -0.23802279,\n",
" 0.32634258, 0.12124245, 0.18269299, 0.08887233, -0.06745057,\n",
" -0.556095 , -0.11532133, 0.40208554, -0.01085546, 0.4505257 ,\n",
" -0.36126408, 0.01500582, 0.07917931, 0.09815152, 0.33817917,\n",
" -0.2813558 , 0.35876733, -0.36583313, -0.1797759 , -0.13289435,\n",
" 0.04141649, -0.31939384, -0.10274614, 0.1994237 , -0.14661174,\n",
" -0.42591807, 0.28506443, 0.04345732, 0.2656843 , -0.30315512,\n",
" -0.07892313, -0.06843591, -0.09118145, 0.41733742, 0.17977186,\n",
" 0.25952908, 0.4197849 , 0.3480925 , -0.3922683 , -0.05383924],\n",
" dtype=float32),\n",
" array([ 0.30379787, -0.3038005 ], dtype=float32)])"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"get_parameters(model)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"([array([[ 2.8014183e-06, 0.0000000e+00, -4.5895576e-06, ...,\n",
" -2.2205949e-02, 0.0000000e+00, 0.0000000e+00],\n",
" [ 2.1923333e-06, 0.0000000e+00, -7.8976154e-06, ...,\n",
" 4.9105823e-02, 0.0000000e+00, 0.0000000e+00],\n",
" [ 1.5608966e-06, 0.0000000e+00, -3.7252903e-06, ...,\n",
" 7.3180795e-03, 0.0000000e+00, 0.0000000e+00],\n",
" ...,\n",
" [ 1.5050173e-06, 0.0000000e+00, -7.4207783e-06, ...,\n",
" -2.6787940e-01, 0.0000000e+00, 0.0000000e+00],\n",
" [ 1.8626451e-06, 0.0000000e+00, -8.1695616e-06, ...,\n",
" 1.6834244e-02, 0.0000000e+00, 0.0000000e+00],\n",
" [ 1.7285347e-06, 0.0000000e+00, -4.6193600e-06, ...,\n",
" -5.3731605e-02, 0.0000000e+00, 0.0000000e+00]], dtype=float32),\n",
" array([[ 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, ...,\n",
" -4.1723251e-07, -1.3783574e-07, 0.0000000e+00],\n",
" [ 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, ...,\n",
" 0.0000000e+00, 0.0000000e+00, 0.0000000e+00],\n",
" [ 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, ...,\n",
" 0.0000000e+00, 0.0000000e+00, 0.0000000e+00],\n",
" ...,\n",
" [-8.3480060e-02, -7.9958737e-03, 0.0000000e+00, ...,\n",
" 1.0635569e-01, -3.4096837e-04, 6.9890119e-02],\n",
" [ 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, ...,\n",
" 0.0000000e+00, 0.0000000e+00, 0.0000000e+00],\n",
" [ 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, ...,\n",
" 0.0000000e+00, 0.0000000e+00, 0.0000000e+00]], dtype=float32),\n",
" array([[-0.04616958, 0.59762466, -0.26143235, ..., -0.06850739,\n",
" -0.4217179 , 0.08980078],\n",
" [-0.17200631, 0.3219571 , 0.27451754, ..., 0.07618259,\n",
" 0.48217025, 0.26553166],\n",
" [ 0. , 0. , 0. , ..., 0. ,\n",
" 0. , 0. ],\n",
" ...,\n",
" [ 0.37040782, 0.77656084, -0.17277646, ..., -0.04284477,\n",
" 0.27722144, -0.29050782],\n",
" [ 0.49921095, -0.3744073 , -0.79761034, ..., 0.06160343,\n",
" 0.26952648, 0.44993538],\n",
" [ 0.12304485, 0.08797711, 0.14218739, ..., 0.40071172,\n",
" 0.43845785, 0.22023249]], dtype=float32),\n",
" array([[ 0.01556432, -0.01553738],\n",
" [-0.80137116, 0.8013473 ],\n",
" [ 0.05863538, -0.05863395],\n",
" [ 0.09222463, -0.09223729],\n",
" [ 0.22395234, -0.22392958],\n",
" [-0.25126195, 0.25127405],\n",
" [ 0.19329323, -0.1932973 ],\n",
" [-0.42185718, 0.4218619 ],\n",
" [-0.09126899, 0.09127331],\n",
" [-0.13490325, 0.13491231],\n",
" [ 0.07229513, -0.07227075],\n",
" [-0.1858536 , 0.18585992],\n",
" [ 0.3444129 , -0.3444115 ],\n",
" [-0.04673225, 0.04675576],\n",
" [-0.744485 , 0.7444947 ],\n",
" [ 0.59019566, -0.5901569 ],\n",
" [ 0.00766785, -0.0076676 ],\n",
" [ 0.1348064 , -0.13479984],\n",
" [ 0.19738197, -0.19736266],\n",
" [ 0.20221901, -0.20221877],\n",
" [ 0.2895143 , -0.28952658],\n",
" [ 0.20282978, -0.20283176],\n",
" [-0.76195276, 0.7619654 ],\n",
" [ 0.26977128, -0.2697831 ],\n",
" [ 0.3250053 , -0.32500228],\n",
" [-0.15308547, 0.15309238],\n",
" [-0.10742784, 0.10744089],\n",
" [-0.15101504, 0.15102363],\n",
" [-0.6646027 , 0.6646162 ],\n",
" [ 0.23995936, -0.23995501],\n",
" [-0.30166405, 0.30165863],\n",
" [ 0.4730208 , -0.47301006],\n",
" [ 0. , 0. ],\n",
" [-0.02773407, 0.02774608],\n",
" [ 0.07481575, -0.07481062],\n",
" [-0.51466 , 0.51466644],\n",
" [-0.02005684, 0.02005637],\n",
" [-0.11586785, 0.11588269],\n",
" [ 0.16156894, -0.16156316],\n",
" [-0.30240393, 0.3024137 ],\n",
" [ 0.8963616 , -0.89635366],\n",
" [-0.06791203, 0.06791377],\n",
" [-0.17452781, 0.17454016],\n",
" [ 0.08860135, -0.08860841],\n",
" [-0.6942698 , 0.69428754],\n",
" [-0.17503893, 0.17506146],\n",
" [ 0.16636667, -0.16637546],\n",
" [-0.25057983, 0.25059932],\n",
" [ 0.2041834 , -0.20417154],\n",
" [-0.01968396, 0.01968992]], dtype=float32)],\n",
" [array([ 3.3285469e-06, 0.0000000e+00, -1.2369826e-05, 0.0000000e+00,\n",
" 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,\n",
" 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 8.8644736e-03,\n",
" 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,\n",
" 0.0000000e+00, 0.0000000e+00, 6.6848241e-02, 0.0000000e+00,\n",
" 0.0000000e+00, -2.9313583e-03, 0.0000000e+00, 0.0000000e+00,\n",
" 0.0000000e+00, 0.0000000e+00, -9.5326707e-02, -3.0721266e-02,\n",
" 8.1290156e-03, -6.8495750e-02, 0.0000000e+00, 0.0000000e+00,\n",
" 1.8126052e-02, 0.0000000e+00, 0.0000000e+00, -6.3957557e-02,\n",
" -4.2761512e-02, -4.0137991e-03, 2.5934458e-02, 0.0000000e+00,\n",
" 0.0000000e+00, 4.7971878e-02, 8.1122592e-02, -6.5605357e-02,\n",
" 0.0000000e+00, 0.0000000e+00, -1.6420111e-03, 5.2435798e-09,\n",
" 0.0000000e+00, 0.0000000e+00, 4.5959245e-02, -1.8143710e-02,\n",
" 9.4908625e-03, 4.1580014e-04, 0.0000000e+00, -6.7653313e-02,\n",
" 0.0000000e+00, 0.0000000e+00, -1.2950024e-01, 0.0000000e+00,\n",
" 0.0000000e+00, 0.0000000e+00, -1.7437223e-02, 0.0000000e+00,\n",
" -2.3985766e-03, -8.6141318e-02, 0.0000000e+00, 1.0811789e-01,\n",
" 0.0000000e+00, 0.0000000e+00], dtype=float32),\n",
" array([-0.00627401, -0.191559 , 0. , 0.00838837, 0.0529352 ,\n",
" 0.12782945, 0. , 0.0156331 , 0. , 0. ,\n",
" -0.11151084, 0.0588697 , -0.02708764, 0.07176588, -0.02906452,\n",
" 0.11350146, 0. , 0.06241233, 0.14389527, 0.15049464,\n",
" 0.1770727 , -0.03121255, 0.02478044, -0.2777393 , -0.09406517,\n",
" 0.12997295, 0.13041441, -0.1484769 , 0.2955907 , 0.00798142,\n",
" 0. , 0. , 0.0003342 , 0.05636339, 0. ,\n",
" 0.06773217, 0.04719278, 0.12417503, -0.039441 , 0.14855272,\n",
" 0.03889409, 0.10604867, -0.0547139 , -0.06550588, 0.14823543,\n",
" 0.01506494, 0.08808593, 0.1861152 , 0.01942448, 0.11284278],\n",
" dtype=float32),\n",
" array([ 2.54564315e-01, 1.08867824e-01, 5.61506003e-02, -1.75794929e-01,\n",
" 2.33219430e-01, -1.58185065e-02, -1.31240949e-01, 8.01615715e-02,\n",
" 2.55392641e-02, 3.14171731e-01, -1.29695922e-01, -5.60564399e-02,\n",
" -3.62583399e-02, 3.38414431e-01, 2.22075596e-01, 1.01215661e-01,\n",
" 2.62457654e-02, -2.69618690e-01, 1.52954599e-02, -1.20112836e-01,\n",
" 2.02051371e-01, -7.11961389e-02, -3.22458982e-01, 2.04761773e-01,\n",
" 9.38781500e-02, 2.39238501e-01, -9.64630842e-02, 2.50120312e-01,\n",
" -3.17509770e-02, 2.22122446e-01, 3.26190665e-02, 4.22861874e-02,\n",
" -2.37673521e-06, 4.52675968e-02, 2.75943756e-01, 1.95241719e-01,\n",
" -2.34854549e-01, -7.68669993e-02, -7.01770782e-02, 2.47255087e-01,\n",
" 2.31929541e-01, 1.45128936e-01, 4.07781303e-01, -2.12165058e-01,\n",
" 3.37380767e-02, -2.01950014e-01, -1.65518045e-01, -4.14324701e-02,\n",
" 4.91949677e-01, 2.43567675e-01], dtype=float32),\n",
" array([-0.139424 , 0.13942933], dtype=float32)])"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"get_updates(model, X_train, y_train, 32, 2)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"W = get_parameters(model)[0]\n",
"B = get_parameters(model)[1]"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"# BASELINE SCENARIO\n",
"#buid the model as base line for the shards (sequential)\n",
"# Number of peers\n",
"#accordin to what we need\n",
"ss = int(len(X_train)/n_peers)\n",
"inputs_in = X_train[0*ss:0*ss+ss]\n",
"outputs_in = y_train[0*ss:0*ss+ss]\n",
"def build_model(X_t, y_t):\n",
" model = Sequential()\n",
" model.add(Dense(70, input_dim=Features_number, activation='relu'))\n",
" model.add(Dense(50, activation='relu'))\n",
" model.add(Dense(50, activation='relu'))\n",
" model.add(Dense(2, activation='softmax'))\n",
" model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])\n",
" model.fit(X_t,\n",
" y_t, \n",
" batch_size=32, \n",
" epochs=250, \n",
" verbose=0,\n",
" validation_data=((X_test, y_test)))\n",
" return model\n"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_1\"\n",
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"dense_1 (Dense) (None, 70) 2800 \n",
"_________________________________________________________________\n",
"dense_2 (Dense) (None, 50) 3550 \n",
"_________________________________________________________________\n",
"dense_3 (Dense) (None, 50) 2550 \n",
"_________________________________________________________________\n",
"dense_4 (Dense) (None, 2) 102 \n",
"=================================================================\n",
"Total params: 9,002\n",
"Trainable params: 9,002\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n"
]
},
{
"data": {
"text/plain": [
"None"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display(model.summary())"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"# predict probabilities for test set\n",
"yhat_probs = model.predict(X_test, verbose=0)\n",
"# predict crisp classes for test set\n",
"yhat_classes = model.predict_classes(X_test, verbose=0)"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy: 0.997338\n",
"Precision: 0.996095\n",
"Recall: 0.999387\n",
"F1 score: 0.997738\n"
]
}
],
"source": [
"# accuracy: (tp + tn) / (p + n)\n",
"accuracy = accuracy_score(np.argmax(y_test, axis=1), np.argmax(model.predict(X_test), axis=1))\n",
"print('Accuracy: %f' % accuracy)\n",
"# precision tp / (tp + fp)\n",
"precision = precision_score(np.argmax(y_test, axis=1), np.argmax(model.predict(X_test), axis=1))\n",
"print('Precision: %f' % precision)\n",
"# recall: tp / (tp + fn)\n",
"recall = recall_score(np.argmax(y_test, axis=1), np.argmax(model.predict(X_test), axis=1))\n",
"print('Recall: %f' % recall)\n",
"# f1: 2 tp / (2 tp + fp + fn)\n",
"f1 = f1_score(np.argmax(y_test, axis=1), np.argmax(model.predict(X_test), axis=1))\n",
"print('F1 score: %f' % f1)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[55804, 313],\n",
" [ 49, 79836]], dtype=int64)"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAQsAAADtCAYAAACoP1B5AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAP6ElEQVR4nO3dbaxlVXnA8f8zw8ugdXgRRSK20nRCpSagUqQ1aVQqjLQRP9gENIUYkmkMGkybtNgvtFoT2w+1klobEqdCo1BCS2oMMp1QjWmCCChFYERGrDKFMuLgS0vk5d6nH/a6nePtvXevI+vMmb3P/5es3HPW2XffxQGerLe9nshMJKnPpnk3QNIwGCwkVTFYSKpisJBUxWAhqYrBQlKVI+bdAGkMzn/TC/P7B5aqrr373qd3Zeb2GTepOYOF1MATB5a4Y9cpVdceefK3Tpxxc2bCYCE1kSzl8rwbMVMGC6mBBJYZ925og4XUQJI8m3VzFkNlsJAaGXvPYuGXTiNie0Q8GBF7I+LKebdnbCJiZ0Tsj4j75t2WWUpgiawqQ7XQwSIiNgMfB94KnA5cHBGnz7dVo/MpYHDLhD+LZbKqDNVCBwvgbGBvZj6cmc8ANwAXzrlNo5KZXwIOzLsds5bAUmZVGapFDxYvBx6ZeL+v1ElTW64sQ7XoE5yxRt1wQ7/mJgc+H1Fj0YPFPuAVE+9PAR6dU1s0YJnw7LhjxcIHizuBbRFxKvCfwEXAO+fbJA1TsLRmR3U8FnrOIjOfA94L7AL2ADdm5v3zbdW4RMT1wO3AaRGxLyIum3ebZiGB5awrQ7XoPQsy8xbglnm3Y6wy8+J5t+FQGXvPYuGDhdRCtynLYCGpwnIaLCT1sGchqUoSPJub592MmVro1ZAVEbFj3m0Yu7F/xys9i5oyVAaLzqj/Qz5MjPw7DpZyU1UZKochUgPdSVnDDQQ1ZhIsjjr2mNzysq2zuPVMHH3Si9h62knD2i7zzWfn3YKpbOEFbI0TBvUd/4T/4Zl8unrcMOQhRo2ZBIstL9vKr/7tu2ZxaxWbzn2k/yI9L3fkbdXXZsaghxg1xv1PJx1Cy0RV6RMRp0XEPRPlRxHx/og4ISJ2R8RD5efx5fqIiKvLaW/3RsRrJ+51abn+oYi4dKL+dRHx9fI7V0dEb8MMFlIDSfBMHlFVeu+V+WBmnpmZZwKvA54CbgauBG7LzG3AbeU9dCe9bStlB/AJgIg4AbgKeD3dQU9XrQSYcs2Oid/rPc3MYCE1sDLBWVOmdC7wrcz8Dt0pbteW+muBt5fXFwLXZefLwHERcTJwPrA7Mw9k5pPAbmB7+WxrZt6emQlcN3GvdbkaIjWyVL/d+8SIuGvi/TWZec06114EXF9en5SZjwFk5mMR8dJSv96JbxvV71ujfkMGC6mBJFiq7zU8kZln9V0UEUcBbwM+0Hfpmk2avn5DDkOkRpZzU1WZwluBr2bm4+X942UIQfm5v9Svd+LbRvWnrFG/IYOF1EC33XtTVZnCxRwcggB8FlhZ0bgU+OeJ+kvKqsg5wA/LcGUXcF5EHF8mNs8DdpXPfhwR55RVkEsm7rUuhyFSA60fJIuIFwBvAX5vovojwI3ltLHvAr9T6m8BLgD20q2cvBsgMw9ExIfojo8E+GBmrqRleA9dTpdjgM+XsiGDhdRAJk03ZWXmU8CLV9V9n251ZPW1CVy+zn12AjvXqL8LePU0bTJYSE3UbbgaMoOF1ECXkWzcU4AGC6mRKScvB8dgITWQhGdwSqpjz0JSr0U4g9NgITXQZSSzZyGpgidlSeqVGfYsJNVxn4WkXt3hNw5DJPUa/4G9BgupgQSXTiX1cwenpGpmJJPUqzvPwp6FpAoOQyT16uYsHIZIqjD27d7jDoXSIZIEzy1vrio1IuK4iLgpIr4REXsi4tfMdSqNRKvEyMXHgFsz85eBM4A9mOtUGr6V1ZCa0icitgK/AXyyu3c+k5k/wFyn0jhMMcHZl+v0F4HvAX8XEWcAdwNXYK5Tafim3MHZl+v0COC1wPsy846I+BgHhxxrMdepNCQN5yz2Afsy847y/ia64GGuU2noumP1oqr03ivzv4BHIuK0UnUu8ADmOpVGIKN6WbTS+4BPR8RRwMN0+Us3Ya5TadhaH36TmfcAa81rmOtUGjqfDZHUa2XOYsyqJjgjYntEPFi2hm60hCMtrFYTnIer3p5FRGwGPg68hW7J5c6I+GxmPjDrxklD4UlZnbOBvZn5MEBE3EC3vdRgIa1IeM5H1NfcMvr62TRHGqZFmLOoCRZVW0MjYgfdU2wcfdKLnmezpOEZe7Co6Tett2X0p2TmNZl5VmaeddSxx7RqnzQIK3MWY57grAkWdwLbIuLUspvsIrrtpZImZEZVGareYUhmPhcR76XbZ74Z2JmZ98+8ZdLAmL4QyMxb6PafS1pD5vjnLNzBKTURLC27dCqpwpDnI2oYLKQG3GchqU528xZjZrCQGnE1RFKvxDkLSVWGvTuzhsFCamR5edzBYtwLw9Ihktl2u3dE/EfJRXrPSkIic51KIzGDB8nelJlnTiQkMtepNAaZdeV5mGuuU4OF1MgUw5ATI+KuibJjrdsB/xIRd098/lO5TgFznUpDk0z1+HlfrlOAN2TmoyX58e6I+MYG15rrVBqSrCxV98p8tPzcD9xMN+dgrlNp8BJyOapKn4h4YUS8aOU1XY7S+zDXqTQODXdwngTcXFYzjwA+k5m3RsSdmOtUGr5WD5KVtBtnrFH/fcx1Kg2bz4ZIqpOAwUJSDc+zkFTHYCGpX92y6JAZLKQW0glOSbUchkiqY89CUg17FpKqGCwk9SoPko2ZwUJqxZ6FpCounUqqEfYsJPWa5hisgTJYSE2EwxBJlexZSKqyPO8GzJbBQmphAQ6/8XRvqZHIulJ9v4jNEfG1iPhceX9qRNxR8pb+Q0QcVeqPLu/3ls9fOXGPD5T6ByPi/In67aVub0Rcufpvr8VgIbXSMnFI5wpgz8T7Pwc+WnKdPglcVuovA57MzF8CPlquIyJOBy4CfoUul+nflAC0Gfg4XY7U04GLy7Ubms0w5JvPsuncR/qv089s16P3zLsJo3f2+U/N7W9HxCnAbwEfBn6/5Pd4M/DOcsm1wJ/QJTi+sLwGuAn463L9hcANmfk08O2I2EuXrAhgbzlFnIi4oVz7wEZtsmchNTLFMKQm1+lfAX/IwWnTFwM/yMznyvvJ/KT/l9O0fP7Dcv20OVA35ASn1EqjXKcR8dvA/sy8OyLeuFK91l/s+Wy9+rU6Cb0DJIOF1ELScun0DcDbIuICYAuwla6ncVxEHFF6D5P5SVdymu6LiCOAY4EDrJ/rlA3q1+UwRGqk1WpIZn4gM0/JzFfSTVD+a2a+C/gC8I5y2epcpys5UN9Rrs9Sf1FZLTkV2AZ8hS6d4bayunJU+Ruf7WuXPQupldnv4Pwj4IaI+DPga8AnS/0ngb8vE5gH6P7nJzPvj4gb6SYunwMuz8wlgIh4L13i5M3Azsy8v++PGyykVmYQLDLzi8AXy+uHObiaMXnNTziYJHn1Zx+mW1FZXX8LXULlagYLqYFpN1wNkcFCamXk270NFlIr9iwk1QifOpXUyzkLSdUMFpKqGCwk1Rj7MMTt3pKq2LOQWhl5z8JgIbWQLp1KqmXPQlKfYPwTnAYLqRWDhaRe7uCUVM1gIamGqyGS6tizkNRr+mxjg+N2b6mRVqd7R8SWiPhKRPx7RNwfEX9a6s11Ko1Cu1ynTwNvzswzgDOB7RFxDnPOdWqwkBppmDckM/O/y9sjS0m6XKc3lfprgbeX1xeW95TPz12d6zQzvw2s5Do9m5LrNDOfAVZynW7IYCG1Ut+z6M11WnoA9wD7gd3AtzDXqTR8U6YC2DDXKUBJBnRmRBwH3Ay8aq3LVv78Op81zXVqz0Jqpd2cxcFbZv6ALsnQOZRcp+WjtXKdUpnrdKMcqOsyWEiNNFwNeUnpURARxwC/CezBXKfSSLTbZ3EycG1ZtdgE3JiZn4uIBzDXqTQCjYJFZt4LvGaNenOdSoPnU6eSqhksJNX
"text/plain": [
"<Figure size 288x288 with 2 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
}
],
"source": [
"# confusion matrix\n",
"mat = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(model.predict(X_test), axis=1))\n",
"\n",
"display(mat)\n",
"plt.matshow(mat);\n",
"plt.colorbar()\n",
"plt.show()\n"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
"# the dectinary\n",
"FI_dic1= {0:[],1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[],9:[]}\n",
"ave_FI_dic= {0:[],1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[],9:[]}\n",
"targeted_Features ={0:[],1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[],9:[]}\n",
"rounds_attack_detected ={0:[],1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[],9:[]}\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Initializing network.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|████████████████████████████████████████████████████████████████████████████| 100/100 [00:00<00:00, 100342.20it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Round 1.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 57%|██████████████████████████████████████████████▏ | 57/100 [02:21<01:44, 2.44s/it][Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n",
"[Parallel(n_jobs=1)]: Done 1000 out of 1000 | elapsed: 12.6s finished\n",
"[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n",
"[Parallel(n_jobs=1)]: Done 1000 out of 1000 | elapsed: 1.4s finished\n",
"100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [04:35<00:00, 2.75s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Global model loss: 0.4784254086968245; global model accuracy: 0.8537300825119019\n",
"Round 2.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 32%|█████████████████████████▉ | 32/100 [01:19<02:44, 2.42s/it][Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n",
"[Parallel(n_jobs=1)]: Done 1000 out of 1000 | elapsed: 1.5s finished\n",
"100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [06:19<00:00, 3.79s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Global model loss: 1.913670726969011; global model accuracy: 0.7795841097831726\n",
"Round 3.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 0%| | 0/100 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 58 I started the attack, at round 3\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 9%|███████▍ | 9/100 [00:02<00:25, 3.57it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 51 I started the attack, at round 3\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 23%|██████████████████▋ | 23/100 [00:05<00:19, 4.01it/s][Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n",
"[Parallel(n_jobs=1)]: Done 1000 out of 1000 | elapsed: 1.4s finished\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"total_Changes in this round: 0.05399442952366329\n",
"average changes: 0.058936568090674844\n",
"0.0707238817088098\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 35%|████████████████████████████▎ | 35/100 [03:17<05:24, 4.99s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 95 I started the attack, at round 3\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [03:20<00:00, 2.00s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Global model loss: 1.9800631387364644; global model accuracy: 0.6950191855430603\n",
"Round 4.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 0%| | 0/100 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 58 I started the attack, at round 4\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 8%|██████▌ | 8/100 [00:02<00:29, 3.17it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 95 I started the attack, at round 4\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 42%|██████████████████████████████████ | 42/100 [00:05<00:14, 4.09it/s][Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n",
"[Parallel(n_jobs=1)]: Done 1000 out of 1000 | elapsed: 1.4s finished\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"total_Changes in this round: 0.07331074788490739\n",
"average changes: 0.06372796135541901\n",
"0.07647355362650281\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 57%|██████████████████████████████████████████████▏ | 57/100 [04:30<03:55, 5.48s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 51 I started the attack, at round 4\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [04:33<00:00, 2.73s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Global model loss: 0.1299796798054893; global model accuracy: 0.9635814428329468\n",
"Round 5.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 0%| | 0/100 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 51 I started the attack, at round 5\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 24%|███████████████████▍ | 24/100 [00:02<00:08, 9.26it/s][Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n",
"[Parallel(n_jobs=1)]: Done 1000 out of 1000 | elapsed: 1.4s finished\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"total_Changes in this round: 0.07282977245378465\n",
"average changes: 0.06600341413001043\n",
"0.0792040969560125\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 53%|██████████████████████████████████████████▉ | 53/100 [00:38<00:20, 2.26it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 58 I started the attack, at round 5\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 79%|███████████████████████████████████████████████████████████████▉ | 79/100 [00:40<00:07, 2.92it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 95 I started the attack, at round 5\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [00:43<00:00, 2.30it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Global model loss: 0.04791858909110321; global model accuracy: 0.9851178526878357\n",
"Round 6.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 0%| | 0/100 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 51 I started the attack, at round 6\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 49%|███████████████████████████████████████▋ | 49/100 [00:02<00:02, 18.14it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 58 I started the attack, at round 6\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 74%|███████████████████████████████████████████████████████████▉ | 74/100 [00:05<00:01, 14.26it/s][Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n",
"[Parallel(n_jobs=1)]: Done 1000 out of 1000 | elapsed: 1.4s finished\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"total_Changes in this round: 0.047182858753608475\n",
"average changes: 0.06223930305473005\n",
"0.07468716366567606\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 87%|██████████████████████████████████████████████████████████████████████▍ | 87/100 [00:21<00:05, 2.34it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 95 I started the attack, at round 6\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [00:24<00:00, 4.09it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Global model loss: 0.12014329470220501; global model accuracy: 0.9583682417869568\n",
"Round 7.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 0%| | 0/100 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 95 I started the attack, at round 7\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 4%|███▎ | 4/100 [00:02<01:01, 1.56it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 51 I started the attack, at round 7\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 7%|█████▋ | 7/100 [00:05<01:05, 1.43it/s][Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n",
"[Parallel(n_jobs=1)]: Done 1000 out of 1000 | elapsed: 1.4s finished\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"total_Changes in this round: 0.04856604792580881\n",
"average changes: 0.059960427199909835\n",
"0.0719525126398918\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r",
" 55%|████████████████████████████████████████████▌ | 55/100 [00:44<00:33, 1.36it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I am peer 58 I started the attack, at round 7\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [00:47<00:00, 2.12it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Global model loss: 0.06726779270004804; global model accuracy: 0.9827722907066345\n",
"Round 8.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 56%|█████████████████████████████████████████████▎ | 56/100 [02:20<01:47, 2.43s/it][Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n",
"[Parallel(n_jobs=1)]: Done 1000 out of 1000 | elapsed: 1.5s finished\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"total_Changes in this round: 0.085633505125058\n",
"average changes: 0.06362800976064527\n",
"0.07635361171277431\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [04:24<00:00, 2.65s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Global model loss: 6.405008795226287; global model accuracy: 0.5955941677093506\n",
"Round 9.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 9%|███████▍ | 9/100 [00:22<03:47, 2.50s/it][Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n",
"[Parallel(n_jobs=1)]: Done 1000 out of 1000 | elapsed: 1.4s finished\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"total_Changes in this round: 0.10100569015777777\n",
"average changes: 0.06830021981028685\n",
"0.08196026377234421\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 67%|██████████████████████████████████████████████████████▎ | 67/100 [08:34<01:26, 2.61s/it]"
]
}
],
"source": [
"# select aa random peer to be the scanner peer\n",
"peers_selected = random.sample(range(n_peers), number_attackers+1)\n",
"scaner = peers_selected[0]\n",
"mal = peers_selected[1 :]\n",
"if scaner == 0:\n",
" scaner = random.sample(range(n_peers), 1)\n",
"\n",
"# Percentage and number of peers participating at each global training epoch\n",
"percentage_participants = 1.0\n",
"n_participants = int(n_peers * percentage_participants)\n",
"\n",
"# the feature you want to attack in case of a poisoning attack\n",
"feature_attacked = [3,5,8]\n",
"\n",
"# Number of local training epochs per global training epoch\n",
"n_local_rounds = 5\n",
"\n",
"# Local batch size\n",
"local_batch_size = 32\n",
"\n",
"# Local learning rate\n",
"local_lr = 0.001\n",
"\n",
"# Global learning rate or 'gain'\n",
"model_substitution_rate = 1.0\n",
"\n",
"# Attack detection / prevention mechanism = {None, 'distance', 'median', 'accuracy', 'krum'}\n",
"discard_outliers = None\n",
"\n",
"# Used in 'dist' attack detection, defines how far the outliers are (1.5 is a typical value)\n",
"tau = 1.5\n",
"\n",
"# Used in 'accuracy' attack detection, defines the error margin for the accuracy improvement\n",
"sensitivity = 0.05\n",
"\n",
"# Used in 'krum' attack detection, defines how many byzantine attackers we want to defend against\n",
"tolerance=4\n",
"\n",
"# Prevent suspicious peers from participating again, only valid for 'dist' and 'accuracy'\n",
"ban_malicious = False\n",
"\n",
"# Clear nans and infinites in model updates\n",
"clear_nans = True\n",
"\n",
"number_for_threshold1 = numpy.empty(20, dtype=float)\n",
"number_for_threshold2 = numpy.empty(20, dtype=float)\n",
"for r in range(len(number_for_threshold1)):\n",
" number_for_threshold1[r] = 0\n",
" number_for_threshold2[r] = 0\n",
"\n",
"########################\n",
"# ATTACK CONFIGURATION #\n",
"########################\n",
"\n",
"# Percentage of malicious peers\n",
"r_malicious_peers = 0.0\n",
"\n",
"# Number of malicious peers (absolute or relative to total number of peers)\n",
"n_malicious_peers = int(n_peers * r_malicious_peers)\n",
"#n_malicious_peers = 1\n",
"\n",
"# Malicious peers\n",
"malicious_peer = range(n_malicious_peers)\n",
"\n",
"# Target for coalitions\n",
"common_attack_target = [4,7]\n",
"\n",
"# Target class of the attack, per each malicious peer\n",
"malicious_targets = dict([(p, t) for p,t in zip(malicious_peer, [common_attack_target]*n_malicious_peers)])\n",
"\n",
"# Boosting parameter per each malicious peer\n",
"common_malicious_boost = 12\n",
"malicious_boost = dict([(p, b) for p,b in zip(malicious_peer, [common_malicious_boost]*n_malicious_peers)])\n",
"\n",
"###########\n",
"# METRICS #\n",
"###########\n",
"metrics = {'accuracy': [],\n",
" 'atk_effectivity': [],\n",
" 'update_distances': [],\n",
" 'outliers_detected': [],\n",
"\n",
" 'acc_no_target': []}\n",
"\n",
"####################################\n",
"# MODEL AND NETWORK INITIALIZATION #\n",
"####################################\n",
"inputs = X_train[0*ss:0*ss+ss]\n",
"outputs = y_train[0*ss:0*ss+ss]\n",
"global_model = build_model(inputs,outputs)\n",
"n_layers = len(trainable_layers(global_model))\n",
"\n",
"print('Initializing network.')\n",
"sleep(1)\n",
"network = []\n",
"for i in tqdm(range(n_peers)):\n",
" ss = int(len(X_train)/n_peers)\n",
" inputs = X_train[i*ss:i*ss+ss]\n",
" outputs = y_train[i*ss:i*ss+ss]\n",
"# network.append(build_model(inputs, outputs))\n",
" network.append(global_model)\n",
"\n",
"\n",
"banned_peers = set()\n",
"\n",
"##################\n",
"# BEGIN TRAINING #\n",
"##################\n",
"for t in range(n_rounds):\n",
" print(f'Round {t+1}.')\n",
" sleep(1)\n",
"\n",
" ## SERVER SIDE #################################################################\n",
" # Fetch global model parameters\n",
" global_weights, global_biases = get_parameters(global_model)\n",
"\n",
" if clear_nans:\n",
" global_weights, global_biases = nans_to_zero(global_weights, global_biases)\n",
"\n",
" # Initialize peer update lists\n",
" network_weight_updates = []\n",
" network_bias_updates = []\n",
"\n",
" # Selection of participant peers in this global training epoch\n",
" if ban_malicious:\n",
" good_peers = list([p for i,p in enumerate(network) if i not in banned_peers])\n",
" n_participants = n_participants if n_participants <= len(good_peers) else int(len(good_peers) * percentage_participants)\n",
" participants = random.sample(list(enumerate(good_peers)), n_participants)\n",
" else:\n",
" participants = random.sample(list(enumerate(network)),n_participants)\n",
" ################################################################################\n",
"\n",
"\n",
" ## CLIENT SIDE #################################################################\n",
" for i, local_model in tqdm(participants):\n",
"\n",
" # Update local model with global parameters \n",
" set_parameters(local_model, global_weights, global_biases)\n",
"\n",
" # Initialization of user data\n",
" ss = int(len(X_train)/n_peers)\n",
" inputs = X_train[i*ss:i*ss+ss]\n",
" outputs = y_train[i*ss:i*ss+ss]\n",
"\n",
"# the scanner peer side\n",
" if(i == scaner):\n",
" X_train_local, X_test_local, y_train_local, y_test_local = train_test_split(inputs,outputs, test_size=0.7, random_state=rs)\n",
" inputs = X_train_local\n",
" outputs = y_train_local\n",
" if(t == 0):\n",
" forest = build_forest(X_train_local,y_train_local)\n",
" forest_predictions = forest.predict(X_test_local)\n",
" acc_forest = np.mean([t==p for t,p in zip(y_test_local, forest_predictions)])\n",
" FL_predict1 = global_model.predict(X_test_local)\n",
" imp = scan_wrong(forest_predictions, FL_predict1, forest , y_test_local, X_test_local)\n",
" FI_dic1[t] = imp\n",
" if t > 0:\n",
" ave_FI_dic[t-1] = abs(FI_dic1[t] - FI_dic1[t-1])\n",
" average_overall_changes = 0\n",
" if t > 1:\n",
" for r in range(0,t):\n",
" average_overall_changes = average_overall_changes + ave_FI_dic[r]\n",
" average_overall_changes = average_overall_changes / t\n",
" total_Changes=0\n",
" average_overall_changes_one_val = 0\n",
" for s in range(len(ave_FI_dic[t-1])):\n",
" total_Changes = total_Changes + ave_FI_dic[t-1][s]\n",
" average_overall_changes_one_val = average_overall_changes_one_val + average_overall_changes[s]\n",
" print('total_Changes in this round: ', total_Changes)\n",
" print('average changes: ' ,average_overall_changes_one_val)\n",
" threshold1 = alpha * average_overall_changes_one_val\n",
" print(threshold1)\n",
" if total_Changes >= threshold1:\n",
" for ra in range(len(ave_FI_dic[t-1])):\n",
" rounds_attack_detected[t+1] = 1\n",
" if ave_FI_dic[t-1][ra] > beta * total_Changes:\n",
" print('attack have been detected')\n",
" targeted_Features[t+1] =names[ra]\n",
" print(\"attack detected on feature \", names[ra])\n",
" \n",
" \n",
"\n",
"\n",
"\n",
"# atttacker peer side\n",
"\n",
"\n",
" if(t+1>=start_attack_round and t+1<=end_attack_round): \n",
" if (i in mal):\n",
" print(\"I am peer \",i,\"I started the attack, at round\", t+1)\n",
" #attack\n",
" if attack_type == 'Byzantine':\n",
" inputs = byzantine_attack_data(inputs)\n",
" elif attack_type == 'poisoning':\n",
" inputs = poisoning_attack_data(inputs, feature_attacked)\n",
" elif attack_type == 'label_flipping':\n",
" outputs = label_flipping_attack_data(outputs)\n",
"\n",
" local_weight_updates, local_bias_updates = get_updates(local_model, \n",
" inputs, outputs, \n",
" local_batch_size, n_local_rounds)\n",
" if clear_nans:\n",
" local_weight_updates, local_bias_updates = nans_to_zero(local_weight_updates, local_bias_updates)\n",
"\n",
"\n",
"\n",
" else:\n",
" # Benign peer\n",
" # Train local model \n",
" local_weight_updates, local_bias_updates = get_updates(local_model, \n",
" inputs, outputs, \n",
" local_batch_size, n_local_rounds)\n",
" if clear_nans:\n",
" local_weight_updates, local_bias_updates = nans_to_zero(local_weight_updates, local_bias_updates)\n",
"\n",
" # Send updates to the server\n",
" network_weight_updates.append(local_weight_updates)\n",
" network_bias_updates.append(local_bias_updates)\n",
"\n",
"\n",
" ## END OF CLIENT SIDE ##########################################################\n",
"\n",
" ######################################\n",
" # SERVER SIDE AGGREGATION MECHANISMS #\n",
" ######################################\n",
"\n",
"\n",
" # Aggregate client updates\n",
" aggregated_weights, aggregated_biases = aggregate(n_layers, \n",
" n_participants, \n",
" np.mean, \n",
" network_weight_updates, \n",
" network_bias_updates)\n",
"\n",
" if clear_nans:\n",
" aggregated_weights, aggregated_biases = nans_to_zero(aggregated_weights, aggregated_biases)\n",
"\n",
" # Apply updates to global model\n",
" apply_updates(global_model, model_substitution_rate, aggregated_weights, aggregated_biases)\n",
"\n",
" # Proceed as in first case\n",
" aggregated_weights, aggregated_biases = aggregate(n_layers, \n",
" n_participants, \n",
" np.mean, \n",
" network_weight_updates, \n",
" network_bias_updates)\n",
" if clear_nans:\n",
" aggregated_weights, aggregated_biases = nans_to_zero(aggregated_weights, aggregated_biases)\n",
"\n",
" apply_updates(global_model, model_substitution_rate, aggregated_weights, aggregated_biases)\n",
"\n",
" ###################\n",
" # COMPUTE METRICS #\n",
" ###################\n",
"\n",
" # Global model accuracy\n",
" score = global_model.evaluate(X_test, y_test, verbose=0)\n",
" print(f'Global model loss: {score[0]}; global model accuracy: {score[1]}')\n",
" metrics['accuracy'].append(score[1])\n",
"\n",
"\n",
" # Accuracy without the target\n",
" score = global_model.evaluate(X_test, y_test, verbose=0)\n",
" metrics['acc_no_target'].append(score[1])\n",
"\n",
"\n",
" # Distance of individual updates to the final aggregation\n",
" metrics['update_distances'].append([dist_weights(aggregated_weights, w_i) for w_i in network_weight_updates])\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# sort the feature according to the last epoch and print it with importances\n",
"\n",
"sort_index = np.argsort(FI_dic1[9])\n",
"for x in sort_index:\n",
" print(names[x], ', ', FI_dic1[9][x])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}