From 34d49d6c7e094458478051c59eb4cc6e0b1be760 Mon Sep 17 00:00:00 2001 From: Hiroshi Date: Wed, 3 Apr 2019 15:48:38 -0300 Subject: [PATCH 1/3] Add new ann Testing rnn --- lstm.py | 83 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 lstm.py diff --git a/lstm.py b/lstm.py new file mode 100644 index 0000000..220fd47 --- /dev/null +++ b/lstm.py @@ -0,0 +1,83 @@ +# Artificial Neural Network + +# Installing Theano +# pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git + +# Installing Tensorflow +# pip install tensorflow + +# Installing Keras +# pip install --upgrade keras + +# Part 1 - Data Preprocessing + +# Importing the libraries +import numpy as np +import matplotlib.pyplot as plt +import pandas as pd + +# Importing the dataset +dataset = pd.read_csv('Churn_Modelling.csv') +X = dataset.iloc[:, 3:13].values +y = dataset.iloc[:, 13].values + +# Encoding categorical data +from sklearn.preprocessing import LabelEncoder, OneHotEncoder +labelencoder_X_1 = LabelEncoder() +X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1]) +labelencoder_X_2 = LabelEncoder() +X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2]) +onehotencoder = OneHotEncoder(categorical_features = [1]) +X = onehotencoder.fit_transform(X).toarray() +X = X[:, 1:] + +# Splitting the dataset into the Training set and Test set +from sklearn.model_selection import train_test_split +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0) + +# Feature Scaling +from sklearn.preprocessing import StandardScaler +sc = StandardScaler() +X_train = sc.fit_transform(X_train) +X_test = sc.transform(X_test) + +# Convert to 3d (not recommended) +X_test = np.reshape(X_test, X_test.shape + (1,)) +X_train = np.reshape(X_train, X_train.shape + (1,)) + +# Part 2 - Now let's make the ANN! + +# Importing the Keras libraries and packages +from keras.models import Sequential +from keras.layers import Dense, Dropout, Conv1D, Masking, Embedding, LSTM +from keras.initializers import RandomNormal + +# Initialising the ANN + +model = Sequential() + +model.add(LSTM(128)) + +model.add(Dropout(0.5)) + +model.add(Dense(1, activation='sigmoid')) + +# Compiling the ANN +model.compile(loss='binary_crossentropy', + optimizer='rmsprop', + metrics=['accuracy']) + +# Fitting the ANN to the Training set +model.fit(X_train, y_train, batch_size=16, epochs=10) +print(model.summary()) + + +# Part 3 - Making predictions and evaluating the model + +# Predicting the Test set results +y_pred = model.predict(X_test) +y_pred = (y_pred > 0.5) + +# Making the Confusion Matrix +from sklearn.metrics import confusion_matrix +cm = confusion_matrix(y_test, y_pred) \ No newline at end of file From b57270c4936d9924d09b5687006a8acf119f0b01 Mon Sep 17 00:00:00 2001 From: Hiroshi Date: Wed, 3 Apr 2019 15:49:34 -0300 Subject: [PATCH 2/3] add drop and change param in layers --- ann.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/ann.py b/ann.py index a8a3d54..a416cb1 100644 --- a/ann.py +++ b/ann.py @@ -44,24 +44,27 @@ # Part 2 - Now let's make the ANN! # Importing the Keras libraries and packages -import keras from keras.models import Sequential -from keras.layers import Dense +from keras.layers import Dense, Dropout, Conv1D, Masking, Embedding, LSTM +from keras.initializers import RandomNormal # Initialising the ANN classifier = Sequential() -# Adding the input layer and the first hidden layer -classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11)) +classifier.add(Dense(256, input_dim=11, activation='relu')) -# Adding the second hidden layer -classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu')) +classifier.add(Dropout(0.5)) -# Adding the output layer -classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid')) +classifier.add(Dense(256, activation='relu')) + +classifier.add(Dropout(0.5)) + +classifier.add(Dense(1, activation='sigmoid')) # Compiling the ANN -classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) +classifier.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy', metrics = ['accuracy']) + +print(classifier.summary()) # Fitting the ANN to the Training set classifier.fit(X_train, y_train, batch_size = 10, epochs = 100) From fa3eec0f5ecfd1fbf1a92ad1507fe90bc06b8fa0 Mon Sep 17 00:00:00 2001 From: Hiroshi Date: Fri, 26 Apr 2019 17:30:30 -0300 Subject: [PATCH 3/3] Add new evaluation --- evaluating_improving_tuning.py => eitlstm.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) rename evaluating_improving_tuning.py => eitlstm.py (96%) diff --git a/evaluating_improving_tuning.py b/eitlstm.py similarity index 96% rename from evaluating_improving_tuning.py rename to eitlstm.py index 4e30d91..1e47357 100644 --- a/evaluating_improving_tuning.py +++ b/eitlstm.py @@ -137,8 +137,8 @@ def build_classifier(optimizer): classifier = KerasClassifier(build_fn = build_classifier) -parameters = {'batch_size': [10, 12], - 'epochs': [5, 10], +parameters = {'batch_size': [20,32], + 'epochs': [20, 32], 'optimizer': ['adam', 'rmsprop']} grid_search = GridSearchCV(estimator = classifier, @@ -151,6 +151,5 @@ def build_classifier(optimizer): best_parameters = grid_search.best_params_ best_accuracy = grid_search.best_score_ - -# Before: loss: 0.3864 - acc: 0.8512 -# After: loss: 0.3547 - acc: 0.8561 \ No newline at end of file +# Best Accuracy: 0.861375 +# Best parameter: {'batch_size': 30, 'epochs': 32, 'optimizer': 'adam'}