Skip to content

Commit

Permalink
Fix #1, add initial net specification to avoid dealing with manual
Browse files Browse the repository at this point in the history
prototxt creation.

If it reaches enough impact, I should care about the licensing of this
portion of code.
  • Loading branch information
escorciav committed May 26, 2015
1 parent f954da5 commit c19bcdb
Show file tree
Hide file tree
Showing 2 changed files with 174 additions and 0 deletions.
120 changes: 120 additions & 0 deletions src/create_proto.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
from collections import OrderedDict
import re

from caffe.proto import caffe_pb2
from google import protobuf

def uncamel(s):
"""Convert CamelCase to underscore_case."""

return re.sub('(?!^)([A-Z])(?=[^A-Z])', r'_\1', s).lower()

def assign_proto(proto, name, val):
if isinstance(val, list):
if isinstance(val[0], dict):
for item in val:
proto_item = getattr(proto, name).add()
for k, v in item.iteritems():
assign_proto(proto_item, k, v)
else:
getattr(proto, name).extend(val)
elif isinstance(val, dict):
for k, v in val.iteritems():
assign_proto(getattr(proto, name), k, v)
else:
setattr(proto, name, val)

class Top(object):
def __init__(self, fn, n):
self.fn = fn
self.n = n

class Function(object):
def __init__(self, type_name, inputs, params):
self.type_name = type_name
self.inputs = inputs
self.params = params
self.ntop = self.params.get('ntop', 1)
if 'ntop' in self.params:
del self.params['ntop']
self.in_place = self.params.get('in_place', False)
if 'in_place' in self.params:
del self.params['in_place']
self.tops = tuple(Top(self, n) for n in range(self.ntop))

def _get_name(self, top, names, autonames):
if top not in names:
n = autonames.setdefault(top.fn.type_name, 1)
autonames[top.fn.type_name] += 1
names[top] = uncamel(top.fn.type_name) + str(n)
return names[top]

def _to_proto(self, layers, names, autonames):
if self in layers:
return
bottom_names = []
for inp in self.inputs:
inp.fn._to_proto(layers, names, autonames)
bottom_names.append(layers[inp.fn].top[inp.n])
layer = caffe_pb2.LayerParameter()
layer.type = self.type_name
layer.bottom.extend(bottom_names)

if self.in_place:
layer.top.extend(layer.bottom)
else:
for top in self.tops:
layer.top.append(self._get_name(top, names, autonames))
layer.name = self._get_name(self.tops[0], names, autonames)

for k, v in self.params.iteritems():
# special case to handle generic *params
if k.endswith('param'):
assign_proto(layer, k, v)
else:
try:
assign_proto(getattr(layer, uncamel(self.type_name) + '_param'), k, v)
except AttributeError:
assign_proto(layer, k, v)

layers[self] = layer

class NetSpec(object):
def __init__(self):
super(NetSpec, self).__setattr__('tops', OrderedDict())

def __setattr__(self, name, value):
self.tops[name] = value

def __getattr__(self, name):
return self.tops[name]

def to_proto(self):
names = {v: k for k, v in self.tops.iteritems()}
autonames = {}
layers = OrderedDict()
for name, top in self.tops.iteritems():
top.fn._to_proto(layers, names, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net

class Layers(object):
def __getattr__(self, name):
def layer_fn(*args, **kwargs):
fn = Function(name, args, kwargs)
if fn.ntop == 1:
return fn.tops[0]
else:
return fn.tops
return layer_fn

class Parameters(object):
def __getattr__(self, name):
class Param:
def __getattr__(self, param_name):
return getattr(getattr(caffe_pb2, name + 'Parameter'), param_name)
return Param()

layers = Layers()
params = Parameters()
54 changes: 54 additions & 0 deletions src/test/test_create_proto.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
from caffe.proto import caffe_pb2

from create_proto import layers as L, params as P, NetSpec

# helper function for common structures

def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, group=group)
return conv, L.ReLU(conv, in_place=True)

def fc_relu(bottom, nout):
fc = L.InnerProduct(bottom, num_output=nout)
return fc, L.ReLU(fc, in_place=True)

def max_pool(bottom, ks, stride=1):
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)

def alexnet(lmdb, batch_size=256, include_acc=False):
net = NetSpec()
net.data, net.label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))

# the net itself
net.conv1, net.relu1 = conv_relu(net.data, 11, 96, stride=4)
net.pool1 = max_pool(net.relu1, 3, stride=2)
net.norm1 = L.LRN(net.pool1, local_size=5, alpha=1e-4, beta=0.75)
net.conv2, net.relu2 = conv_relu(net.norm1, 5, 256, pad=2, group=2)
net.pool2 = max_pool(net.relu2, 3, stride=2)
net.norm2 = L.LRN(net.pool2, local_size=5, alpha=1e-4, beta=0.75)
net.conv3, net.relu3 = conv_relu(net.norm2, 3, 384, pad=1)
net.conv4, net.relu4 = conv_relu(net.relu3, 3, 384, pad=1, group=2)
net.conv5, net.relu5 = conv_relu(net.relu4, 3, 256, pad=1, group=2)
net.pool5 = max_pool(net.relu5, 3, stride=2)
net.fc6, net.relu6 = fc_relu(net.pool5, 4096)
net.drop6 = L.Dropout(net.relu6, in_place=True)
net.fc7, net.relu7 = fc_relu(net.drop6, 4096)
net.drop7 = L.Dropout(net.relu7, in_place=True)
net.fc8 = L.InnerProduct(net.drop7, num_output=1000)
net.loss = L.SoftmaxWithLoss(net.fc8, net.label)

if include_acc:
net.acc = L.Accuracy(net.fc8, net.label)
return net.to_proto()

def make_net():
with open('train.prototxt', 'w') as f:
print >>f, alexnet('/path/to/caffe-train-lmdb')

with open('test.prototxt', 'w') as f:
print >>f, alexnet('/path/to/caffe-val-lmdb', batch_size=50, include_acc=True)

if __name__ == '__main__':
make_net()

0 comments on commit c19bcdb

Please sign in to comment.