-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmodel20-equal-tracks.py
226 lines (157 loc) · 7.51 KB
/
model20-equal-tracks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
#!/usr/bin/env python
# model using tracks only in a way that individual
# track information is used and no hierarchy or
# ordering is imposed on the tracks
#----------------------------------------------------------------------
# (default) model parameters
#----------------------------------------------------------------------
# size and number of hidden layers on input side
#----------------------------------------------------------------------
# function to prepare input data samples
#----------------------------------------------------------------------
def makeInput(dataset, rowIndices, inputDataIsSparse):
assert inputDataIsSparse,"non-sparse input data is not supported"
# maximum distance (in cm) for which a vertex is considered to be
# 'the same' or not
maxVertexDist = 0.01 # 100 um
etaAtVertex = dataset['tracks']['etaAtVertex']
phiAtVertex = dataset['tracks']['phiAtVertex']
maxRecHitEta = dataset['phoVars/maxRecHitEta']
maxRecHitPhi = dataset['phoVars/maxRecHitPhi']
detaDphiFunc = lambda photonIndex, trackIndex: (
etaAtVertex[trackIndex] - maxRecHitEta[photonIndex],
phiAtVertex[trackIndex] - maxRecHitPhi[photonIndex],
)
retval = [
]
for trackFilter in (
### # tracks from same vertex as diphoton candidate
### lambda dataset, photonIndex, trackIndex: dataset['tracks']['vtxIndex'][trackIndex] == dataset['phoVars/phoVertexIndex'][photonIndex],
# tracks from the worst iso vertex
lambda dataset, photonIndex, trackIndex: dataset['tracks']['vtxIndex'][trackIndex] == dataset['phoVars/phoWorstIsoVertexIndex'][photonIndex],
### # tracks from the second worst iso vertex
### lambda dataset, photonIndex, trackIndex: dataset['tracks']['vtxIndex'][trackIndex] == dataset['phoVars/phoSecondWorstIsoVertexIndex'][photonIndex],
###
### # tracks from other vertices
### lambda dataset, photonIndex, trackIndex: dataset['tracks']['vtxIndex'][trackIndex] != dataset['phoVars/phoVertexIndex'][photonIndex] and \
### dataset['tracks']['vtxIndex'][trackIndex] != dataset['phoVars/phoWorstIsoVertexIndex'][photonIndex] and \
### dataset['tracks']['vtxIndex'][trackIndex] != dataset['phoVars/phoSecondWorstIsoVertexIndex'][photonIndex],
):
# build list of list of tracks from the given vertex
# for the moment just use the trackPt
trackFirstIndex = dataset['tracks']['firstIndex']
numTracks = dataset['tracks']['numTracks']
trackPt = dataset['tracks']['pt']
charge = dataset['tracks']['charge']
numVarsPerTrack = 4
thisVertexValues = []
for photonIndex in rowIndices:
thisVal = np.zeros( (numTracks[photonIndex], numVarsPerTrack) , dtype='float32')
trackIndexOffset = trackFirstIndex[photonIndex]
trkInd = slice(trackIndexOffset, trackIndexOffset + numTracks[photonIndex])
# assign variables vectorized
thisVal[:,0] = trackPt[trkInd]
thisVal[:,1] = etaAtVertex[trkInd] - maxRecHitEta[photonIndex]
thisVal[:,2] = phiAtVertex[trkInd] - maxRecHitPhi[photonIndex]
thisVal[:,3] = charge[trkInd]
thisVertexValues.append(thisVal)
retval.append(thisVertexValues)
# end of loop over vertex types
# first index is vertex type
# second index is photon index
# value is 2D array of tracks (size varies from event to event)
return retval
#----------------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import operator
class Net(nn.Module):
def __init__(self,
numLayersInputSide = 5,
widthInputSide = 50,
numLayersCommonSide = 5,
widthCommonSide = 50,
):
super(Net, self).__init__()
#----------
# input side layers
#----------
numInputs = 4
self.inputSideLayers = []
for i in range(numLayersInputSide):
layer = nn.Linear(numInputs, widthInputSide)
self.inputSideLayers.append(layer)
self.add_module("iLayer%d" % i, layer)
numInputs = widthInputSide
#----------
# output side layers
#----------
numInputs = widthInputSide
numOutputs = widthCommonSide
self.commonSideLayers = []
for i in range(numLayersCommonSide):
if i == numLayersCommonSide - 1:
numOutputs = 1
else:
numOutputs = widthCommonSide
layer = nn.Linear(numInputs, numOutputs)
self.commonSideLayers.append(layer)
self.add_module("oLayer%d" % i, layer)
numInputs = numOutputs
# neutral element as input to output network
# for rows with no tracks
self.noTracksIntermediateOutput = Variable(torch.zeros(1,widthCommonSide))
if cuda:
self.noTracksIntermediateOutput = self.noTracksIntermediateOutput.cuda()
#----------------------------------------
def forward(self, dataset, indices):
# we only have one vertex
thisVtxData = dataset[0]
# for each row,
# - feed this average to the output side network
# overall output for the entire minibatch
outputs = []
# loop over minibatch entries
for index in indices:
# input is a 2D tensor:
# first index is the index of the track within the row
# second index is the variable index
numPoints = thisVtxData[index].shape[0]
numVars = thisVtxData[index].shape[1]
if numPoints > 0:
h = Variable(torch.from_numpy(thisVtxData[index]))
if cuda:
h = h.cuda()
# forward all input points through the input side network
for layer in self.inputSideLayers:
h = layer(h)
h = F.relu(h)
# average the input side network outputs: sum along first dimension (point index),
# then divide by number of points
output = h.sum(0) / numPoints
else:
# no tracks in this event
output = self.noTracksIntermediateOutput
# feed through the output side network
h = output
for layerIndex, layer in enumerate(self.commonSideLayers):
h = layer(h)
if layerIndex == len(self.commonSideLayers) - 1:
# apply sigmoid at output of last layer
h = F.sigmoid(h)
else:
h = F.relu(h)
outputs.append(h)
# end of loop over minibatch entries
# convert the list of outputs to a torch 2D tensor
return torch.cat(outputs, 0)[:,0]
#----------------------------------------
def getNumOutputNodes(self):
# do not apply any loss function
return None
#----------------------------------------------------------------------
def makeModel():
return Net()
#----------------------------------------------------------------------