import numpy as np
import pandas as pd
y = np.loadtxt('temp_3.txt')
y_pred = np.loadtxt('temp_4.txt')
data = np.hstack((y.reshape(-1, 1), y_pred.reshape(-1, 1)))
from sklearn.metrics import confusion_matrix
cf_matrix = confusion_matrix(data[:, 0], data[:, 1]) # True, Prediction
# each row corresponding to the true class
# each column corresponding to the prediction class
pd.DataFrame(cf_matrix, index = ['False', 'True'], columns = ['False', 'True'])
data[np.logical_and(data[:, 0] == 0, data[:, 1] == 0)].shape
TP = 243, FP = 93, FN = 237, TN = 619
from sklearn.metrics import accuracy_score
print(accuracy_score(data[:, 0], data[:, 1])) # fraction
print(accuracy_score(data[:, 0], data[:, 1], normalize=False)) # correct prediction
from sklearn.metrics import precision_score
# None, return the scores for each class
print(precision_score(data[:, 0], data[:, 1], average=None))
# Only report results for the class specified by pos_label, the default pos_label = 1
print(precision_score(data[:, 0], data[:, 1], average='binary'))
from sklearn.metrics import recall_score
# None, return the scores for each class
print(recall_score(data[:, 0], data[:, 1], average=None))
# Only report results for the class specified by pos_label, the default pos_label = 1
print(recall_score(data[:, 0], data[:, 1], average='binary'))
from sklearn.metrics import f1_score
# None, return the scores for each class
print(f1_score(data[:, 0], data[:, 1], average=None))
# Only report results for the class specified by pos_label, the default pos_label = 1
print(f1_score(data[:, 0], data[:, 1], average='binary'))
y = np.loadtxt('temp_1.txt')
y_pred = np.loadtxt('temp_2.txt')
data = np.hstack((y.reshape(-1, 1), y_pred.reshape(-1, 1)))
data # Column 0, True; Column 1, Prediction
from sklearn.metrics import confusion_matrix
cf_matrix = confusion_matrix(data[:, 0], data[:, 1]) # True, Prediction
# each row corresponding to the true class
# each column corresponding to the prediction class
pd.DataFrame(cf_matrix, index = ['0', '1', '2'], columns = ['0', '1', '2'])
data[np.logical_and(data[:, 0] == 2, data[:, 1] == 1)].shape
from sklearn.metrics import classification_report
print(classification_report(data[:, 0], data[:, 1], target_names=['0', '1', '2']))
from sklearn.metrics import accuracy_score
print(accuracy_score(data[:, 0], data[:, 1])) # fraction
print(accuracy_score(data[:, 0], data[:, 1], normalize=False)) # correct prediction
from sklearn.metrics import precision_score
# None, return the scores for each class
print(precision_score(data[:, 0], data[:, 1], average=None))
# simply calculates the mean of the binary metrics, giving equal weight to each class
print(precision_score(data[:, 0], data[:, 1], average='macro')) # (0.64 + 0.34 + 0.55) / 3
# sums the dividends and divisors that make up the per-class metrics to calculate an overall quotient
print(precision_score(data[:, 0], data[:, 1], average='micro')) # (227+22+384)/(227+22+384+159+42+308)
# each class’s score is weighted by its presence in the true data sample
print(precision_score(data[:, 0], data[:, 1], average='weighted')) # 0.64*0.38+0.34*0.15+0.55*0.47
from sklearn.metrics import recall_score
# None, return the scores for each class
print(recall_score(data[:, 0], data[:, 1], average=None))
# simply calculates the mean of the binary metrics, giving equal weight to each class
print(recall_score(data[:, 0], data[:, 1], average='macro'))
# sums the dividends and divisors that make up the per-class metrics to calculate an overall quotient
print(recall_score(data[:, 0], data[:, 1], average='micro'))
# each class’s score is weighted by its presence in the true data sample
print(recall_score(data[:, 0], data[:, 1], average='weighted'))
from sklearn.metrics import f1_score
# None, return the scores for each class
print(f1_score(data[:, 0], data[:, 1], average=None))
# simply calculates the mean of the binary metrics, giving equal weight to each class
print(f1_score(data[:, 0], data[:, 1], average='macro'))
# sums the dividends and divisors that make up the per-class metrics to calculate an overall quotient
print(f1_score(data[:, 0], data[:, 1], average='micro'))
# each class’s score is weighted by its presence in the true data sample
print(f1_score(data[:, 0], data[:, 1], average='weighted'))