| | |
| | |
| | """ |
| | Created on Fri Mar 15 14:57:46 2019 |
| | |
| | @author: atavci |
| | """ |
| |
|
| | import pandas as pd |
| | import numpy as np |
| |
|
| | import seaborn as sns |
| | import matplotlib.pyplot as plt |
| |
|
| | from sklearn.model_selection import train_test_split |
| | from sklearn.metrics import confusion_matrix, classification_report |
| | from sklearn.metrics import roc_curve, auc |
| |
|
| | import xgboost as xgb |
| | from sklearn.neighbors import KNeighborsClassifier |
| | from sklearn.ensemble import RandomForestClassifier |
| | from sklearn.ensemble import VotingClassifier |
| |
|
| | |
| | sns.set() |
| | |
| | data = pd.read_csv("Data/synthetic-data-from-a-financial-payment-system/bs140513_032310.csv") |
| |
|
| | data.head(5) |
| |
|
| | |
| | df_fraud = data.loc[data.fraud == 1] |
| | df_non_fraud = data.loc[data.fraud == 0] |
| |
|
| |
|
| | sns.countplot(x="fraud",data=data) |
| | plt.title("Count of Fraudulent Payments") |
| | plt.legend() |
| | plt.show() |
| | print("Number of normal examples: ",df_non_fraud.fraud.count()) |
| | print("Number of fradulent examples: ",df_fraud.fraud.count()) |
| | |
| |
|
| | print("Mean feature values per category",data.groupby('category')['amount','fraud'].mean()) |
| |
|
| | print("Columns: ", data.columns) |
| |
|
| |
|
| |
|
| | |
| | plt.hist(df_fraud.amount, alpha=0.5, label='fraud',bins=100) |
| | plt.hist(df_non_fraud.amount, alpha=0.5, label='nonfraud',bins=100) |
| | plt.title("Histogram for fraud and nonfraud payments") |
| | plt.ylim(0,10000) |
| | plt.xlim(0,1000) |
| | plt.legend() |
| | plt.show() |
| |
|
| | |
| | print(data.zipcodeOri.nunique()) |
| | print(data.zipMerchant.nunique()) |
| |
|
| | |
| | data_reduced = data.drop(['zipcodeOri','zipMerchant'],axis=1) |
| |
|
| | data_reduced.columns |
| |
|
| | |
| | col_categorical = data_reduced.select_dtypes(include= ['object']).columns |
| | for col in col_categorical: |
| | data_reduced[col] = data_reduced[col].astype('category') |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | data_reduced[col_categorical] = data_reduced[col_categorical].apply(lambda x: x.cat.codes) |
| |
|
| | |
| | X = data_reduced.drop(['fraud'],axis=1) |
| | y = data['fraud'] |
| |
|
| |
|
| | |
| | X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=42,shuffle=True,stratify=y) |
| |
|
| | |
| |
|
| | def plot_roc_auc(y_test, preds): |
| | ''' |
| | Takes actual and predicted(probabilities) as input and plots the Receiver |
| | Operating Characteristic (ROC) curve |
| | ''' |
| | fpr, tpr, threshold = roc_curve(y_test, preds) |
| | roc_auc = auc(fpr, tpr) |
| | plt.title('Receiver Operating Characteristic') |
| | plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc) |
| | plt.legend(loc = 'lower right') |
| | plt.plot([0, 1], [0, 1],'r--') |
| | plt.xlim([0, 1]) |
| | plt.ylim([0, 1]) |
| | plt.ylabel('True Positive Rate') |
| | plt.xlabel('False Positive Rate') |
| | plt.show() |
| |
|
| | |
| | print("Base score we must beat is: ", |
| | df_non_fraud.fraud.count()/ np.add(df_non_fraud.fraud.count(),df_fraud.fraud.count()) * 100) |
| |
|
| |
|
| | |
| |
|
| | knn = KNeighborsClassifier(n_neighbors=5,p=1) |
| |
|
| | knn.fit(X_train,y_train) |
| | y_pred = knn.predict(X_test) |
| |
|
| | |
| | print("Classification Report for K-Nearest Neighbours: \n", classification_report(y_test, y_pred)) |
| | print("Confusion Matrix of K-Nearest Neigbours: \n", confusion_matrix(y_test,y_pred)) |
| | plot_roc_auc(y_test, knn.predict_proba(X_test)[:,1]) |
| |
|
| | |
| |
|
| | rf_clf = RandomForestClassifier(n_estimators=100,max_depth=8,random_state=42, |
| | verbose=1,class_weight="balanced") |
| |
|
| | rf_clf.fit(X_train,y_train) |
| | y_pred = rf_clf.predict(X_test) |
| |
|
| | |
| | print("Classification Report for Random Forest Classifier: \n", classification_report(y_test, y_pred)) |
| | print("Confusion Matrix of Random Forest Classifier: \n", confusion_matrix(y_test,y_pred)) |
| | plot_roc_auc(y_test, rf_clf.predict_proba(X_test)[:,1]) |
| |
|
| | |
| | XGBoost_CLF = xgb.XGBClassifier(max_depth=6, learning_rate=0.05, n_estimators=400, |
| | objective="binary:hinge", booster='gbtree', |
| | n_jobs=-1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0, |
| | subsample=1, colsample_bytree=1, colsample_bylevel=1, reg_alpha=0, reg_lambda=1, |
| | scale_pos_weight=1, base_score=0.5, random_state=42, verbosity=True) |
| |
|
| | XGBoost_CLF.fit(X_train,y_train) |
| |
|
| | y_pred = XGBoost_CLF.predict(X_test) |
| |
|
| | |
| | print("Classification Report for XGBoost: \n", classification_report(y_test, y_pred)) |
| | print("Confusion Matrix of XGBoost: \n", confusion_matrix(y_test,y_pred)) |
| | plot_roc_auc(y_test, XGBoost_CLF.predict_proba(X_test)[:,1]) |
| |
|
| | |
| |
|
| | estimators = [("KNN",knn),("rf",rf_clf),("xgb",XGBoost_CLF)] |
| | ens = VotingClassifier(estimators=estimators, voting="soft",weights=[1,4,1]) |
| |
|
| | ens.fit(X_train,y_train) |
| | y_pred = ens.predict(X_test) |
| |
|
| |
|
| | |
| | |
| | print("Classification Report for Ensembled Models: \n", classification_report(y_test, y_pred)) |
| | print("Confusion Matrix of Ensembled Models: \n", confusion_matrix(y_test,y_pred)) |
| | plot_roc_auc(y_test, ens.predict_proba(X_test)[:,1]) |
| |
|