| | |
| | """old_models.ipynb |
| | |
| | Automatically generated by Colaboratory. |
| | |
| | Original file is located at |
| | https://colab.research.google.com/drive/1Oc7A5TaGLg1qkYXzf0qLGIe0_ZxyAnXE |
| | |
| | This notebook contains Feature selection with Chi-Square test, Logistic Regression with TFIDF as well as Bidirectional LSTM with gensim to classifies a given tweet into depressive or non-depressive ones. |
| | """ |
| |
|
| | from google.colab import drive |
| | drive.mount('/content/drive') |
| |
|
| | |
| |
|
| | |
| | import warnings |
| | warnings.filterwarnings("ignore") |
| |
|
| | |
| | import numpy as np |
| | import pandas as pd |
| |
|
| | |
| | import matplotlib.pyplot as plt |
| | import seaborn as sns |
| |
|
| | |
| | from sklearn.feature_extraction.text import TfidfVectorizer |
| |
|
| | |
| | from yellowbrick.text import TSNEVisualizer |
| | from sklearn import manifold |
| |
|
| | |
| | from sklearn.model_selection import train_test_split |
| |
|
| | |
| | from sklearn import feature_selection |
| |
|
| | |
| | from sklearn.pipeline import Pipeline |
| | import sklearn.metrics as skm |
| | from sklearn.metrics import confusion_matrix, accuracy_score |
| | from sklearn.linear_model import LogisticRegression |
| | from sklearn.neighbors import KNeighborsClassifier |
| | from sklearn.svm import SVC |
| | from sklearn.tree import DecisionTreeClassifier |
| | from sklearn.neural_network import MLPClassifier |
| | from sklearn.ensemble import RandomForestClassifier |
| |
|
| | |
| | import pickle |
| |
|
| | |
| | from nltk.tokenize.treebank import TreebankWordDetokenizer |
| |
|
| | |
| | import gensim |
| | import gensim.downloader as gensim_api |
| | from gensim.models import Word2Vec |
| | from gensim.models import KeyedVectors |
| | from keras.preprocessing.text import Tokenizer |
| | from keras.preprocessing.sequence import pad_sequences |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | from keras.models import load_model |
| | from keras.models import Model, Sequential |
| | from keras.callbacks import EarlyStopping, ModelCheckpoint |
| | from keras.layers import Conv1D, Dense, Input, LSTM, Embedding, Dropout, Activation, MaxPooling1D |
| | from tensorflow.keras import models, layers, preprocessing as kprocessing |
| | from tensorflow.keras import backend as K |
| | import tensorflow as tf |
| | import keras |
| | from keras.layers import Lambda |
| | import tensorflow as tf |
| | from keras.models import model_from_json |
| |
|
| | |
| | |
| |
|
| | """## Loading the dataset:""" |
| |
|
| | df_all = pd.read_csv("/content/drive/MyDrive/NLP/Depression_Detection/data_cleaning/processed_data/processed_data.csv", |
| | sep='\t', encoding='utf-8') |
| |
|
| | df_all |
| |
|
| | """## Feature selection |
| | |
| | In order to drop some columns and reduce the matrix dimensionality, we can carry out some Feature Selection, the process of selecting a subset of relevant variables. I will proceed as follows: |
| | |
| | |
| | |
| | 1. treat each category as binary (for example, the “depressive” category is 1 for the depressive tweets and 0 for non_depressive); |
| | 2. perform a Chi-Square test to determine whether a feature and the (binary) target are independent; |
| | 3. keep only the features with a certain p-value from the Chi-Square test. |
| | |
| | This snippet of code is derived from https://towardsdatascience.com/text-classification-with-nlp-tf-idf-vs-word2vec-vs-bert-41ff868d1794 |
| | """ |
| |
|
| | y = y_train |
| | X_names = cv.get_feature_names() |
| | p_value_limit = 0.95 |
| | df_features = pd.DataFrame() |
| | for cat in np.unique(y): |
| | chi2, p = feature_selection.chi2(X_train_tfidf, y==cat) |
| | df_features = df_features.append(pd.DataFrame( |
| | {"feature":X_names, "score":1-p, "y":cat})) |
| | df_features = df_features.sort_values(["y","score"], |
| | ascending=[True,False]) |
| | df_features = df_features[df_features["score"]>p_value_limit] |
| | X_names = df_features["feature"].unique().tolist() |
| |
|
| | print(len(X_names)) |
| |
|
| | """I reduced the number of features from 20018 to 688 by keeping the most statistically relevant ones. Let’s print some:""" |
| |
|
| | for cat in np.unique(y): |
| | print("# {}:".format(cat)) |
| | print(" . selected features:", |
| | len(df_features[df_features["y"]==cat])) |
| | print(" . top features:", ",".join(df_features[df_features["y"]==cat]["feature"].values[:10])) |
| | print(" ") |
| |
|
| | """## Logistic Regression with TFIDF: |
| | |
| | ### Spliting data to train and test datasets: |
| | """ |
| |
|
| | |
| | X_train, X_test, y_train, y_test = train_test_split(df_all['clean_text'], df_all['label'], test_size=0.3, random_state= 42) |
| |
|
| | X_train.shape, X_test.shape, y_train.shape, y_test.shape |
| |
|
| | """### TF-IDF |
| | |
| | TF-IDF (term frequency and inverse document frequency): |
| | """ |
| |
|
| | |
| | cv = TfidfVectorizer() |
| | cv.fit(X_train.to_list()) |
| | dic_vocabulary = cv.vocabulary_ |
| |
|
| | X_train_tfidf = cv.transform(X_train.to_list()) |
| |
|
| | X_test_tfidf = cv.transform(X_test.to_list()) |
| |
|
| | cv.inverse_transform(X_test_tfidf[0]) |
| |
|
| | X_train_tfidf.shape |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | """The feature matrix X_train_tfidf has a shape of 16,464 (Number of documents in training) x 20018 (Length of vocabulary) and it’s pretty sparse:""" |
| |
|
| | sns.heatmap(X_train_tfidf.todense()[:,np.random.randint(0,X_train_tfidf.shape[1],100)]==0, vmin=0, vmax=1, cbar=False).set_title('Sparse Matrix Sample') |
| |
|
| | """In order to know the position of a certain word, we can look it up in the vocabulary:""" |
| |
|
| | word = "mental" |
| | dic_vocabulary[word] |
| |
|
| | """Build a scikit-learn pipeline: a sequential application of a list of transformations and a final estimator. Putting the Tf-Idf vectorizer and Logistic Regression classifier in a pipeline allows us to transform and predict test data in just one step.""" |
| |
|
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | lr = LogisticRegression(solver='liblinear', penalty='l1') |
| | |
| | print(lr.fit(X_train_tfidf, y_train.to_list())) |
| |
|
| | |
| | LogisticReg = "/content/drive/MyDrive/NLP/Depression_Detection/modeling/model_LogReg.pkl" |
| |
|
| | with open(LogisticReg, 'wb') as file: |
| | pickle.dump(lr, file) |
| |
|
| | |
| | with open(LogisticReg, 'rb') as file: |
| | lr = pickle.load(file) |
| |
|
| | lr |
| |
|
| | |
| | y_pred_lr = lr.predict(X_test_tfidf) |
| | probs = lr.predict_proba(X_test_tfidf) |
| | classes = np.unique(y_test.to_list()) |
| | y_test_array = pd.get_dummies(y_test, drop_first=False).values |
| |
|
| | """## Evaluate the performance: |
| | |
| | * **Accuracy:** the fraction of predictions the model got right. |
| | * **Confusion Matrix:** a summary table that breaks down the number of correct and incorrect predictions by each class. |
| | * **ROC:** a plot that illustrates the true positive rate against the false positive rate at various threshold settings. The area under the curve (AUC) indicates the probability that the classifier will rank a randomly chosen positive observation higher than a randomly chosen negative one. |
| | * **Precision:** the fraction of relevant instances among the retrieved instances. |
| | * **Recall:** the fraction of the total amount of relevant instances that were actually retrieved. |
| | """ |
| |
|
| | def conf_matrix_acc(y_true, y_pred): |
| | |
| | cm = confusion_matrix(y_true, y_pred) |
| | fig, ax = plt.subplots() |
| | sns.heatmap(cm, annot=True, fmt='d', ax=ax, cmap=plt.cm.Blues, |
| | cbar=False) |
| | ax.set(xlabel="Pred", ylabel="True", xticklabels=classes, |
| | yticklabels=classes, title="Confusion matrix") |
| | plt.yticks(rotation=0) |
| | print("=========================================") |
| | print(f'Accuracy score is : {accuracy_score(y_true, y_pred)}') |
| | print("=========================================") |
| | print("Detail:") |
| | print(skm.classification_report(y_true, y_pred)) |
| |
|
| | |
| | def roc_precision_auc(): |
| | fig, ax = plt.subplots(nrows=1, ncols=2) |
| | |
| | for i in range(len(classes)): |
| | fpr, tpr, thresholds = skm.roc_curve(y_test_array[:,i], |
| | probs[:,i]) |
| | ax[0].plot(fpr, tpr, lw=3, |
| | label='{0} (area={1:0.2f})'.format(classes[i], |
| | skm.auc(fpr, tpr)) |
| | ) |
| | ax[0].plot([0,1], [0,1], color='navy', lw=3, linestyle='--') |
| | ax[0].set(xlim=[-0.05,1.0], ylim=[0.0,1.05], |
| | xlabel='False Positive Rate', |
| | ylabel="True Positive Rate (Recall)", |
| | title="Receiver operating characteristic") |
| | ax[0].legend(loc="lower right") |
| | ax[0].grid(True) |
| |
|
| | |
| | for i in range(len(classes)): |
| | precision, recall, thresholds = skm.precision_recall_curve( |
| | y_test_array[:,i], probs[:,i]) |
| | ax[1].plot(recall, precision, lw=3, |
| | label='{0} (area={1:0.2f})'.format(classes[i], |
| | skm.auc(recall, precision)) |
| | ) |
| | ax[1].set(xlim=[0.0,1.05], ylim=[0.0,1.05], xlabel='Recall', |
| | ylabel="Precision", title="Precision-Recall curve") |
| | ax[1].legend(loc="best") |
| | ax[1].grid(True) |
| | plt.show() |
| | |
| | plt.savefig('/content/drive/MyDrive/NLP/Depression_Detection/modeling/ROC_Precision_SVM.png') |
| | |
| | print(f'AUC score is : {skm.roc_auc_score(Y_test, probs[:,1])}') |
| |
|
| | conf_matrix_acc(y_test.to_list(),y_pred_lr) |
| |
|
| | roc_precision_auc() |
| |
|
| |
|
| |
|
| | """## Bidirectional LSTM: |
| | |
| | In Python, you can load a pre-trained Word Embedding model from genism-data like this: |
| | """ |
| |
|
| | nlp_pre = gensim_api.load("word2vec-google-news-300") |
| |
|
| | word = "anxiety" |
| | fig = plt.figure() |
| | |
| | tot_words = [word] + [tupla[0] for tupla in |
| | nlp_pre.most_similar(word, topn=20)] |
| | X = nlp_pre[tot_words] |
| | |
| | pca = manifold.TSNE(perplexity=40, n_components=3, init='pca') |
| | X = pca.fit_transform(X) |
| | |
| | dtf_ = pd.DataFrame(X, index=tot_words, columns=["x","y","z"]) |
| | dtf_["input"] = 0 |
| | dtf_["input"].iloc[0:1] = 1 |
| | |
| | from mpl_toolkits.mplot3d import Axes3D |
| | ax = fig.add_subplot(111, projection='3d') |
| | ax.scatter(dtf_[dtf_["input"]==0]['x'], |
| | dtf_[dtf_["input"]==0]['y'], |
| | dtf_[dtf_["input"]==0]['z'], c="black") |
| | ax.scatter(dtf_[dtf_["input"]==1]['x'], |
| | dtf_[dtf_["input"]==1]['y'], |
| | dtf_[dtf_["input"]==1]['z'], c="red") |
| | ax.set(xlabel=None, ylabel=None, zlabel=None, xticklabels=[], |
| | yticklabels=[], zticklabels=[]) |
| | for label, row in dtf_[["x","y","z"]].iterrows(): |
| | x, y, z = row |
| | ax.text(x, y, z, s=label) |
| |
|
| | """Instead of using a pre-trained model, I am going to fit my own Word2Vec on the training data corpus with gensim. Before fitting the model, the corpus needs to be transformed into a list of lists of n-grams. In this particular case, I’ll try to capture unigrams (“york”), bigrams (“new york”), and trigrams (“new york city”).""" |
| |
|
| | |
| | dtf_train, dtf_test = train_test_split(df_all, test_size=0.3) |
| | |
| | y_train = dtf_train["label"].values |
| | y_test = dtf_test["label"].values |
| |
|
| | corpus = [] |
| | corpus = [x for x in dtf_train['clean_text']] |
| |
|
| | |
| | lst_corpus = [] |
| | for string in corpus: |
| | lst_words = str(string).split() |
| | lst_grams = [" ".join(lst_words[i:i+1]) |
| | for i in range(0, len(lst_words), 1)] |
| | lst_corpus.append(lst_grams) |
| |
|
| | |
| | bigrams_detector = gensim.models.phrases.Phrases(lst_corpus, |
| | delimiter=" ".encode(), min_count=5, threshold=10) |
| | bigrams_detector = gensim.models.phrases.Phraser(bigrams_detector) |
| | trigrams_detector = gensim.models.phrases.Phrases(bigrams_detector[lst_corpus], |
| | delimiter=" ".encode(), min_count=5, threshold=10) |
| | trigrams_detector = gensim.models.phrases.Phraser(trigrams_detector) |
| |
|
| | """When fitting the Word2Vec, you need to specify: |
| | |
| | * the target size of the word vectors, I’ll use 300; |
| | * the window, or the maximum distance between the current and predicted word within a sentence, I’ll use the mean length of text in the corpus; |
| | * the training algorithm, I’ll use skip-grams (sg=1) as in general it has better results. |
| | """ |
| |
|
| | |
| | nlp = gensim.models.word2vec.Word2Vec(lst_corpus, size=300, |
| | window=8, min_count=1, sg=1, iter=30) |
| |
|
| | """We have our embedding model, so we can select any word from the corpus and transform it into a vector.""" |
| |
|
| | word = "anxiety" |
| | nlp[word].shape |
| |
|
| | """We can even use it to visualize a word and its context into a smaller dimensional space (2D or 3D) by applying any dimensionality reduction algorithm (i.e. TSNE).""" |
| |
|
| | word = "anxiety" |
| | fig = plt.figure() |
| | |
| | tot_words = [word] + [tupla[0] for tupla in |
| | nlp.most_similar(word, topn=20)] |
| | X = nlp[tot_words] |
| | |
| | pca = manifold.TSNE(perplexity=40, n_components=3, init='pca') |
| | X = pca.fit_transform(X) |
| | |
| | dtf_ = pd.DataFrame(X, index=tot_words, columns=["x","y","z"]) |
| | dtf_["input"] = 0 |
| | dtf_["input"].iloc[0:1] = 1 |
| | |
| | from mpl_toolkits.mplot3d import Axes3D |
| | ax = fig.add_subplot(111, projection='3d') |
| | ax.scatter(dtf_[dtf_["input"]==0]['x'], |
| | dtf_[dtf_["input"]==0]['y'], |
| | dtf_[dtf_["input"]==0]['z'], c="black") |
| | ax.scatter(dtf_[dtf_["input"]==1]['x'], |
| | dtf_[dtf_["input"]==1]['y'], |
| | dtf_[dtf_["input"]==1]['z'], c="red") |
| | ax.set(xlabel=None, ylabel=None, zlabel=None, xticklabels=[], |
| | yticklabels=[], zticklabels=[]) |
| | for label, row in dtf_[["x","y","z"]].iterrows(): |
| | x, y, z = row |
| | ax.text(x, y, z, s=label) |
| |
|
| | """The word vectors can be used in a neural network as weights in the follwing procedure: |
| | 1. Transform the corpus into padded sequences of word ids to get a feature matrix. |
| | 2. Create an embedding matrix so that the vector of the word with id N is located at the Nth row. |
| | 3. Build a neural network with an embedding layer that weighs every word in the sequences with the corresponding vector. |
| | |
| | **Feature Engineering:** by transforming the same preprocessed corpus (list of lists of n-grams) given to the Word2Vec into a list of sequences using tensorflow/keras: |
| | """ |
| |
|
| | |
| | tokenizer = kprocessing.text.Tokenizer(lower=True, split=' ', |
| | oov_token="NaN", |
| | filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n') |
| | tokenizer.fit_on_texts(lst_corpus) |
| | dic_vocabulary = tokenizer.word_index |
| |
|
| | |
| | lst_text2seq= tokenizer.texts_to_sequences(lst_corpus) |
| |
|
| | |
| | X_train = kprocessing.sequence.pad_sequences(lst_text2seq, |
| | maxlen=35, padding="post", truncating="post") |
| |
|
| | X_train.shape |
| |
|
| | """The feature matrix X_train has a shape of 16559 x 35 (Number of sequences x Sequences max length). Let’s visualize it:""" |
| |
|
| | sns.heatmap(X_train==0, vmin=0, vmax=1, cbar=False) |
| | plt.show() |
| |
|
| | """Every text in the corpus is now an id sequence with length 35. For instance, if a text had 20 tokens in it, then the sequence is composed of 20 ids + 15 0s, which is the padding element (while the id for word not in the vocabulary is 1) |
| | |
| | Let’s print how a text from the train set has been transformed into a sequence with the padding and the vocabulary. |
| | """ |
| |
|
| | i = 8 |
| |
|
| | |
| | len_txt = len(dtf_train["clean_text"].iloc[i].split()) |
| | print("from: ", dtf_train["clean_text"].iloc[i], "| len:", len_txt) |
| |
|
| | |
| | len_tokens = len(X_train[i]) |
| | print("to: ", X_train[i], "| len:", len(X_train[i])) |
| |
|
| | |
| | print("check: ", dtf_train["clean_text"].iloc[i].split()[0], |
| | " -- idx in vocabulary -->", |
| | dic_vocabulary[dtf_train["clean_text"].iloc[i].split()[0]]) |
| |
|
| | print("vocabulary: ", dict(list(dic_vocabulary.items())[0:5]), "... (padding element, 0)") |
| |
|
| | corpus = dtf_test["clean_text"] |
| |
|
| | |
| | lst_corpus = [] |
| | for string in corpus: |
| | lst_words = str(string).split() |
| | lst_grams = [" ".join(lst_words[i:i+1]) for i in range(0, |
| | len(lst_words), 1)] |
| | lst_corpus.append(lst_grams) |
| | |
| | |
| | lst_corpus = list(bigrams_detector[lst_corpus]) |
| | lst_corpus = list(trigrams_detector[lst_corpus]) |
| | |
| | lst_text2seq = tokenizer.texts_to_sequences(lst_corpus) |
| |
|
| | |
| | X_test = kprocessing.sequence.pad_sequences(lst_text2seq, maxlen=35, |
| | padding="post", truncating="post") |
| |
|
| | X_test.shape |
| |
|
| | sns.heatmap(X_test==0, vmin=0, vmax=1, cbar=False) |
| | plt.show() |
| |
|
| | """We’ve got our X_train and X_test, now we need to create the embedding matrix that will be used as a weight matrix in the neural network.""" |
| |
|
| | |
| | embeddings = np.zeros((len(dic_vocabulary)+1, 300)) |
| | for word,idx in dic_vocabulary.items(): |
| | |
| | try: |
| | embeddings[idx] = nlp[word] |
| | |
| | except: |
| | pass |
| |
|
| | embeddings.shape |
| |
|
| | """That code generates a matrix of shape 20,050 x 300 (Length of vocabulary extracted from the corpus x Vector size). It can be navigated by word id, which can be obtained from the vocabulary.""" |
| |
|
| | word = "anxiety" |
| | print("dic[word]:", dic_vocabulary[word], "|idx") |
| | print("embeddings[idx]:", embeddings[dic_vocabulary[word]].shape, |
| | "|vector") |
| |
|
| | """### Deep Learning: |
| | |
| | It’s finally time to build a deep learning model. I’m going to use the embedding matrix in the first Embedding layer of the neural network that I will build and train to classify the news. Each id in the input sequence will be used as the index to access the embedding matrix. The output of this Embedding layer will be a 2D matrix with a word vector for each word id in the input sequence (Sequence length x Vector size). Let’s use the sentence “I like this article” as an example: |
| | |
| | My neural network shall be structured as follows: |
| | |
| | * An Embedding layer that takes the sequences as input and the word vectors as weights, just as described before. |
| | |
| | * A simple Attention layer that won’t affect the predictions but it’s going to capture the weights of each instance and allow us to build a nice explainer (it isn't necessary for the predictions, just for the explainability, so you can skip it). |
| | |
| | * Two layers of Bidirectional LSTM to model the order of words in a sequence in both directions. |
| | |
| | * Two final dense layers that will predict the probability of each category. |
| | """ |
| |
|
| | |
| | def attention_layer(inputs, neurons): |
| | x = layers.Permute((2,1))(inputs) |
| | x = layers.Dense(neurons, activation="softmax")(x) |
| | x = layers.Permute((2,1), name="attention")(x) |
| | x = layers.multiply([inputs, x]) |
| | return x |
| |
|
| | |
| | x_in = layers.Input(shape=(35,)) |
| | |
| | x = layers.Embedding(input_dim=embeddings.shape[0], |
| | output_dim=embeddings.shape[1], |
| | weights=[embeddings], |
| | input_length=35, trainable=False)(x_in) |
| | |
| | x = attention_layer(x, neurons=35) |
| | |
| | x = layers.Bidirectional(layers.LSTM(units=35, dropout=0.2, |
| | return_sequences=True))(x) |
| | x = layers.Bidirectional(layers.LSTM(units=35, dropout=0.2))(x) |
| | |
| | x = layers.Dense(64, activation='relu')(x) |
| | y_out = layers.Dense(1, activation='sigmoid')(x) |
| | |
| | model = models.Model(x_in, y_out) |
| | model.compile(loss='binary_crossentropy', |
| | optimizer='adam', metrics=['accuracy']) |
| |
|
| | model.summary() |
| |
|
| | |
| | dic_y_mapping = {n:label for n,label in |
| | enumerate(np.unique(y_train))} |
| | inverse_dic = {v:k for k,v in dic_y_mapping.items()} |
| | y_train = np.array([inverse_dic[y] for y in y_train]) |
| | |
| | training = model.fit(x=X_train, y=y_train, batch_size=256, |
| | epochs=30, shuffle=True, verbose=0, |
| | validation_split=0.3) |
| |
|
| | |
| | metrics = [k for k in training.history.keys() if ("loss" not in k) and ("val" not in k)] |
| | fig, ax = plt.subplots(nrows=1, ncols=2, sharey=True) |
| | ax[0].set(title="Training") |
| | ax11 = ax[0].twinx() |
| | ax[0].plot(training.history['loss'], color='black') |
| | ax[0].set_xlabel('Epochs') |
| | ax[0].set_ylabel('Loss', color='black') |
| | for metric in metrics: |
| | ax11.plot(training.history[metric], label=metric) |
| | ax11.set_ylabel("Score", color='steelblue') |
| | ax11.legend() |
| | ax[1].set(title="Validation") |
| | ax22 = ax[1].twinx() |
| | ax[1].plot(training.history['val_loss'], color='black') |
| | ax[1].set_xlabel('Epochs') |
| | ax[1].set_ylabel('Loss', color='black') |
| | for metric in metrics: |
| | ax22.plot(training.history['val_'+metric], label=metric) |
| | ax22.set_ylabel("Score", color="steelblue") |
| | plt.savefig('/content/drive/MyDrive/NLP/Depression_Detection/modeling/loss_accuracy_LSTM_3.png') |
| | plt.show() |
| |
|
| | |
| | model_json = model.to_json() |
| | with open("/content/drive/MyDrive/NLP/Depression_Detection/modeling/model.json", "w") as json_file: |
| | json_file.write(model_json) |
| | |
| | model.save_weights("/content/drive/MyDrive/NLP/Depression_Detection/modeling/model.h5") |
| | print("Saved model to disk") |
| |
|
| | loaded_model = model_from_json(open("/content/drive/MyDrive/NLP/Depression_Detection/modeling/model.json", "r").read(), |
| | custom_objects={'tf': tf}) |
| | json_file.close() |
| | |
| | loaded_model.load_weights("/content/drive/MyDrive/NLP/Depression_Detection/modeling/model.h5") |
| | print("Loaded model from disk") |
| |
|
| | labels_pred = model.predict(X_test) |
| | labels_pred = np.round(labels_pred.flatten()) |
| | accuracy = accuracy_score(y_test, labels_pred) |
| | classes = np.unique(y_test) |
| | print("Accuracy: %.2f%%" % (accuracy*100)) |
| |
|
| | def conf_matrix_acc2(y_true, y_pred): |
| | |
| | cm = confusion_matrix(y_test, y_pred) |
| | fig, ax = plt.subplots() |
| | sns.heatmap(cm, annot=True, fmt='d', ax=ax, cmap=plt.cm.Blues, |
| | cbar=False) |
| | ax.set(xlabel="Pred", ylabel="True", xticklabels=classes, |
| | yticklabels=classes, title="Confusion matrix") |
| | plt.yticks(rotation=0) |
| | print("=========================================") |
| | print(f'Accuracy score is : {accuracy_score(y_true, y_pred)}') |
| | print("=========================================") |
| | print("Detail:") |
| | print(skm.classification_report(y_test, y_pred)) |
| |
|
| | conf_matrix_acc2(y_test, labels_pred) |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |