-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathacquisition_functions.py
131 lines (100 loc) · 3.39 KB
/
acquisition_functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# -*- coding: utf-8 -*-
# @Author: Andre Goncalves
# @Date: 2019-08-07 09:46:39
# @Last Modified by: Andre Goncalves
# @Last Modified time: 2019-10-29 15:32:03
import numpy as np
import scipy.stats as stats
def random(pk):
""" Random acquisition function. Generate a random
vector of scalars. The ranking of the corresponding
samples will be random.
Args:
pk (np.array): array of probability. Probability predictions
for each sample.
Returns:
np.array: array of random values
"""
return np.random.rand(pk.shape[0])
def entropy(pk):
"""Compute entropy directly from the probability vector.
Args:
pk (np.array): array of probabilities. Each row
is the vector of probability for
each class, for each sample
Returns:
np.array: array of entropies for all samples
"""
h = np.zeros((pk.shape[0], ))
for i in range(pk.shape[0]):
h[i] = stats.entropy(pk[i, :])
return h
def margin_sampling(pk):
"""Compute margin sampling from the probability vector.
Args:
pk (np.array): array of probabilities. Each row
is the vector of probability for
each class, for each sample
Returns:
np.array: array of margin sampled values for all samples
"""
rev = np.sort(pk, axis=-1)
h = rev[:, 0] - rev[:, 1]
return -h
def least_confidence(pk):
"""Compute least-confidence from the probability vector.
Args:
pk (np.array): array of probabilities. Each row
is the vector of probability for
each class, for each sample
Returns:
np.array: array of least confidence values for all samples
"""
rev = np.sort(pk, axis=-1)
h = 1 - rev[:, 0]
return -h
def abstention(pk):
""" Extract abstention probability from the probability vector
Args:
pk (np.array): array of probabilities. Each row
is the vector of probability for
each class, for each sample
Returns:
np.array: array of abstention probabilities
"""
return pk[:, -1].ravel()
def abstention_entropy_amean(pk):
""" Compute arithmetic mean from abstention and entropy.
Args:
pk (np.array): array of probabilities. Each row
is the vector of probability for
each class, for each sample
Returns:
np.array: array of abstention probabilities
"""
h = 0.5 * entropy(pk) + 0.5 * pk[:, -1].ravel()
return h
def abstention_entropy_hmean(pk):
""" Compute harmonic mean from abstention and entropy.
Args:
pk (np.array): array of probabilities. Each row
is the vector of probability for
each class, for each sample
Returns:
np.array: array of abstention probabilities
"""
h0 = entropy(pk)
h1 = pk[:, -1].ravel()
h = (2 * h0 * h1) / (h0 + h1)
return h
# def compute_entropy_from_prob(pk):
# """Compute entropy directly from the probability vector.
# Args:
# pk (np.array): array of probabilities
# Returns:
# np.array: array of entropies for all samples
# """
# h = np.zeros((pk.shape[0], ))
# for i in range(pk.shape[0]):
# h[i] = stats.entropy(pk[i, :])
# return h