-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathK2_utils.py
185 lines (135 loc) · 5.74 KB
/
K2_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# ! pip3 install pydot
# ! pip3 install graphviz
#Atenttion To plot the graph you need also install graphviz in your system
# In[2]:
import pandas as pd
import numpy as np
import copy
import math
from functools import reduce
from decimal import Decimal
import itertools
from graphviz import Digraph
import pydot
# In[3]:
def graph_from_dict(dictionary,):
edge_style = ""
g = Digraph()
for k in dictionary.keys():
if any([k in sub for sub in dictionary.values() for key in dictionary.keys()]) or dictionary[k]:
g.node(str(k),k, shape='oval', fontsize='10', width='0', style='filled', fillcolor='#c9c9c9', color="gray")
for k, i in dictionary.items():
for it in i:
g.edge(str(it), str(k), label='',style= edge_style, color='black')
return g
# In[5]:
def alpha(df, i, parents):
parents = np.sort(parents)
states = list(map(list, itertools.product([0, 1], repeat=len(parents)+1)))
states_mod = [["".join(map(str,sublist[:len(sublist)-1]))]+[str(sublist[-1])] for sublist in states]
gpd_values = pd.DataFrame()
if len(parents):
label_parents = ''.join(parents)
df_to_group = pd.DataFrame(columns = [label_parents, df.columns[i]],
data = np.transpose(
[df.astype(str)[parents].apply(lambda x: "".join(x), axis=1).values,
[str(item) for item in df[df.columns[i]]]]))
gpd_values = df_to_group.groupby(by=
[df_to_group[label_parents],
df.columns[i]]).size()
gpd_values = gpd_values.reset_index(name='size')
for state in states_mod:
if not state in gpd_values[[label_parents, df.columns[i]]].values.tolist() :
gpd_values.loc[len(gpd_values)] = state+[0]
gpd_values.sort_values(by=[label_parents, df.columns[i]], inplace=True)
gpd_values.reset_index(inplace=True)
gpd_values = gpd_values['size']
else:
gpd_values = df.groupby(df.columns[i]).size().values
return gpd_values
# In[6]:
def get_N(df, i, parents):
parents = np.sort(parents)
states = list(map(list, itertools.product([0, 1], repeat=len(parents))))
gpd_values = None
N = []
if len(parents):
cols_to_group = ([index for index in parents])
cols_to_group.insert(0,df.columns[i])
N = df[cols_to_group].groupby(cols_to_group[1:]).size()
N = N.reset_index(name='size')
for state in states:
if not state in N[cols_to_group[1:]].values.tolist() :
N.loc[len(N)] = state+[0]
N.sort_values(by=cols_to_group[1:], inplace=True)
N.reset_index(inplace = True)
N = N['size']
else:
N = df.groupby(by=df.columns[i]).size().values.sum()
return N
# In[7]:
def f_mdl(df,x_i,pi, c):
'''
Minimum Length description metric score implementation
'''
N = len(df)
r_i = len(df[df.columns[x_i]].unique())
q_i = reduce(lambda x, y: x*y, [len(pd.unique(df[pai].values)) for pai in pi]) if pi else 0
Nij = get_N(df, x_i, pi)
Nijk = alpha(df, x_i, pi)
pbs = 0
if(pi):
for j in np.arange(0,q_i):
for i in np.arange(0,r_i):
if (len(Nijk) > (2*j + i) and len(Nij)>j):
if Nijk[2*j + i] and Nij[j]:
pbs += Nijk[2*j + i]*(math.log(Nijk[2*j + i]) - math.log(Nij[j]))
elif Nij[j]:
pbs += - math.log(Nij[j])
pbs += -(c/2)*math.log(N)*q_i*(r_i -1)
else:
for i in np.arange(0,r_i):
pbs += Nijk[i]*(math.log(Nijk[i]) - math.log(Nij))
pbs += -(c/2)*math.log(N)*(r_i -1)
return pbs
# In[8]:
def f_ch(df, x_i, pi):
'''
Cooper-Herskovits metric score
You can substitue factorial evaluations by log sum evaluations when working with large data
'''
prod = 1
# prod = 0
r_i = len(df[df.columns[x_i]].unique())
alfa = alpha(df, x_i, pi)
q_i = reduce(lambda x, y: x*y, [len(pd.unique(df[pai].values)) for pai in pi]) if pi else 0
Nij = get_N(df, x_i, pi)
if pi:
for j in np.arange(0,q_i):
prod *= math.factorial(r_i - 1)/math.factorial(Nij[j] + r_i - 1)
# prod += math.log(math.factorial(r_i - 1)) - math.log(math.factorial(Nij[j] + r_i - 1))
for i in np.arange(0,r_i):
prod *= math.factorial(alfa[2*j + i])
# prod += math.log(math.factorial(alfa[2*j + i]))
else:
prod *= math.factorial(r_i - 1)/math.factorial(Nij + r_i - 1)
# prod += math.log(math.factorial(r_i - 1)) - math.log(math.factorial(Nij + r_i - 1))
for i in np.arange(0, r_i):
# prod += math.log(math.factorial(alfa[i]))
prod *= math.factorial(alfa[i])
return prod
# In[ ]:
# The K2 implementation was based in the literature provided in:
#
# Gregory F. Cooper Edward Herskovits. A bayesian method for the induction of probabilistic networks from data. Technical Report KSL-91-02, Knowledge Systems Laboratory. Medical Computer Science. Stanford University School of
# Medicine, Stanford, CA 94305-5479, Updated Nov. 1993. Available at: http://smiweb.stanford.edu/pubs/SMI Abstracts/SMI-91-0355.html.
#
# The mdl metric score was implemented based on the equation of the literature provided in:
#
#
# Bouckaert, R.R. Probabilistic network construction using the minimum description length principle.763European conference on symbolic and quantitative approaches to reasoning and uncertainty. Springer,7641993, pp. 41–48
#
#