Skip to content

Commit

Permalink
Merge pull request BVLC#2086 from longjon/python-net-spec
Browse files Browse the repository at this point in the history
Python net specification
  • Loading branch information
longjon authored and twerdster committed Jul 17, 2015
2 parents a70af4a + beb7151 commit c81b580
Show file tree
Hide file tree
Showing 2 changed files with 149 additions and 0 deletions.
54 changes: 54 additions & 0 deletions examples/python_nets/caffenet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2

# helper function for common structures

def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, group=group)
return conv, L.ReLU(conv, in_place=True)

def fc_relu(bottom, nout):
fc = L.InnerProduct(bottom, num_output=nout)
return fc, L.ReLU(fc, in_place=True)

def max_pool(bottom, ks, stride=1):
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)

def alexnet(lmdb, batch_size=256, include_acc=False):
data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))

# the net itself
conv1, relu1 = conv_relu(data, 11, 96, stride=4)
pool1 = max_pool(relu1, 3, stride=2)
norm1 = L.LRN(pool1, local_size=5, alpha=1e-4, beta=0.75)
conv2, relu2 = conv_relu(norm1, 5, 256, pad=2, group=2)
pool2 = max_pool(relu2, 3, stride=2)
norm2 = L.LRN(pool2, local_size=5, alpha=1e-4, beta=0.75)
conv3, relu3 = conv_relu(norm2, 3, 384, pad=1)
conv4, relu4 = conv_relu(relu3, 3, 384, pad=1, group=2)
conv5, relu5 = conv_relu(relu4, 3, 256, pad=1, group=2)
pool5 = max_pool(relu5, 3, stride=2)
fc6, relu6 = fc_relu(pool5, 4096)
drop6 = L.Dropout(relu6, in_place=True)
fc7, relu7 = fc_relu(drop6, 4096)
drop7 = L.Dropout(relu7, in_place=True)
fc8 = L.InnerProduct(drop7, num_output=1000)
loss = L.SoftmaxWithLoss(fc8, label)

if include_acc:
acc = L.Accuracy(fc8, label)
return to_proto((loss, acc), {v: k for k, v in locals().iteritems()})
else:
return to_proto(loss, {v: k for k, v in locals().iteritems()})

def make_net():
with open('train.prototxt', 'w') as f:
print >>f, alexnet('/path/to/caffe-train-lmdb')

with open('test.prototxt', 'w') as f:
print >>f, alexnet('/path/to/caffe-val-lmdb', batch_size=50, include_acc=True)

if __name__ == '__main__':
make_net()
95 changes: 95 additions & 0 deletions python/caffe/layers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
from collections import OrderedDict
import re

from .proto import caffe_pb2
from google import protobuf

def uncamel(s):
"""Convert CamelCase to underscore_case."""

return re.sub('(?!^)([A-Z])(?=[^A-Z])', r'_\1', s).lower()

def assign_proto(proto, name, val):
if isinstance(val, list):
getattr(proto, name).extend(val)
elif isinstance(val, dict):
for k, v in val.iteritems():
assign_proto(getattr(proto, name), k, v)
else:
setattr(proto, name, val)

def to_proto(tops, names):
if not isinstance(tops, tuple):
tops = (tops,)
layers = OrderedDict()
for top in tops:
top.fn._to_proto(layers, names)

net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net

class Top:
def __init__(self, fn, n):
self.fn = fn
self.n = n

class Function:
def __init__(self, type_name, inputs, params):
self.type_name = type_name
self.inputs = inputs
self.params = params
self.ntop = self.params.get('ntop', 1)
if 'ntop' in self.params:
del self.params['ntop']
self.in_place = self.params.get('in_place', False)
if 'in_place' in self.params:
del self.params['in_place']
self.tops = tuple(Top(self, n) for n in range(self.ntop))

def _to_proto(self, layers, names):
bottom_names = []
for inp in self.inputs:
if inp.fn not in layers:
inp.fn._to_proto(layers, names)
bottom_names.append(layers[inp.fn].top[inp.n])
layer = caffe_pb2.LayerParameter()
layer.type = self.type_name
layer.bottom.extend(bottom_names)

if self.in_place:
layer.top.extend(layer.bottom)
layer.name = names[self.tops[0]]
else:
for top in self.tops:
layer.top.append(names[top])
layer.name = layer.top[0]

for k, v in self.params.iteritems():
# special case to handle generic *params
if k.endswith('param'):
assign_proto(layer, k, v)
else:
assign_proto(getattr(layer, uncamel(self.type_name) + '_param'), k, v)

layers[self] = layer

class Layers:
def __getattr__(self, name):
def layer_fn(*args, **kwargs):
fn = Function(name, args, kwargs)
if fn.ntop == 1:
return fn.tops[0]
else:
return fn.tops
return layer_fn

class Parameters:
def __getattr__(self, name):
class Param:
def __getattr__(self, param_name):
return getattr(getattr(caffe_pb2, name + 'Parameter'), param_name)
return Param()

layers = Layers()
params = Parameters()

0 comments on commit c81b580

Please sign in to comment.