-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathscorer.py
87 lines (61 loc) · 2.94 KB
/
scorer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
"""
Created by Nidhi Mundra on 26/04/17.
"""
import numpy as np
class Scorer:
def __init__(self):
"""
Initializes data structures needed to compute score
"""
# List of all the class labels
self.labels = [0, 1, 2, 3]
# Dictionary to store count of each label in predicted labels list
self.total_prediction_count = {0: 0, 1: 0, 2: 0, 3: 0}
# Dictionary to store count of each label in actual labels list
self.total_actual_count = {0: 0, 1: 0, 2: 0, 3: 0}
# Dictionary to store count of correctly predicted labels
self.total_correct_prediction_count = {0: 0, 1: 0, 2: 0, 3: 0}
def __get_f1_scores__(self):
"""
Compute F1 score of each label using the formula given at https://www.physionet.org/challenge/2017/
:return: Numpy array of F1 scores of each label
"""
# Initialize empty output array
output = np.array([])
for label in self.labels:
# Compute f1 value for current label
if self.total_correct_prediction_count[label] != 0:
f1 = float(2 * self.total_correct_prediction_count[label]) / \
float(self.total_actual_count[label] + self.total_prediction_count[label])
else:
f1 = 0.0
# Append computed f1 score to output array
output = np.append(output, f1)
return output
def score(self, predicted_y, actual_y):
"""
Compute the classification score based on predicted and actual labels
Formula given at https://www.physionet.org/challenge/2017/
:param predicted_y: Array containing all the predicted labels
:param actual_y: Array containing all the actual labels
:return: Prediction Score
"""
self.labels = [0, 1, 2, 3]
# Dictionary to store count of each label in predicted labels list
self.total_prediction_count = {0: 0, 1: 0, 2: 0, 3: 0}
# Dictionary to store count of each label in actual labels list
self.total_actual_count = {0: 0, 1: 0, 2: 0, 3: 0}
# Dictionary to store count of correctly predicted labels
self.total_correct_prediction_count = {0: 0, 1: 0, 2: 0, 3: 0}
for i in xrange(len(predicted_y)):
# Extract predicted and actual labels for ith record
predicted_label = predicted_y[i]
actual_label = actual_y[i]
# Increment the count of corrected predicted label if predicted and actual labels are same
if predicted_label == actual_label:
self.total_correct_prediction_count[actual_label] += 1
# Increment total counts
self.total_actual_count[actual_label] += 1
self.total_prediction_count[predicted_label] += 1
# Compute f1 scores of each label and return their mean
return np.mean(self.__get_f1_scores__())