forked from fastmachinelearning/hls4ml
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request fastmachinelearning#195 from vloncar/csim_integration
QKeras, predict/trace, API enhancements
- Loading branch information
Showing
54 changed files
with
18,627 additions
and
1,344 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,5 @@ | ||
from __future__ import absolute_import | ||
|
||
from . import converters | ||
from . import report | ||
from hls4ml import converters | ||
from hls4ml import report | ||
from hls4ml import utils |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,23 +1,90 @@ | ||
from __future__ import absolute_import | ||
import os | ||
import importlib | ||
|
||
from .keras_to_hls import keras_to_hls | ||
from hls4ml.utils.config import create_vivado_config | ||
|
||
from hls4ml.converters.keras_to_hls import keras_to_hls, get_supported_keras_layers, register_keras_layer_handler | ||
|
||
for module in os.listdir(os.path.dirname(__file__) + '/keras'): | ||
if module == '__init__.py' or module[-3:] != '.py': | ||
continue | ||
try: | ||
lib = importlib.import_module(__name__ + '.keras.' + module[:-3]) | ||
for name, func in list(lib.__dict__.items()): | ||
# if 'func' is callable (i.e., function, class...) | ||
# and has 'handles' attribute | ||
# and is defined in this module (i.e., not imported) | ||
if callable(func) and hasattr(func, 'handles') and func.__module__ == lib.__name__: | ||
for layer in func.handles: | ||
register_keras_layer_handler(layer, func) | ||
except ImportError: | ||
continue | ||
|
||
try: | ||
from .pytorch_to_hls import pytorch_to_hls | ||
from hls4ml.converters.pytorch_to_hls import pytorch_to_hls | ||
__pytorch_enabled__ = True | ||
except ImportError: | ||
__pytorch_enabled__ = False | ||
|
||
try: | ||
from .onnx_to_hls import onnx_to_hls | ||
from hls4ml.converters.onnx_to_hls import onnx_to_hls | ||
__onnx_enabled__ = True | ||
except ImportError: | ||
__onnx_enabled__ = False | ||
|
||
try: | ||
from .tf_to_hls import tf_to_hls | ||
from hls4ml.converters.tf_to_hls import tf_to_hls | ||
__tensorflow_enabled__ = True | ||
except ImportError: | ||
__tensorflow_enabled__ = False | ||
|
||
|
||
def convert_from_yaml_config(yamlConfig): | ||
model = None | ||
if 'OnnxModel' in yamlConfig: | ||
if __onnx_enabled__: | ||
model = onnx_to_hls(yamlConfig) | ||
else: | ||
raise Exception("ONNX not found. Please install ONNX.") | ||
elif 'PytorchModel' in yamlConfig: | ||
if __pytorch_enabled__: | ||
model = pytorch_to_hls(yamlConfig) | ||
else: | ||
raise Exception("PyTorch not found. Please install PyTorch.") | ||
elif 'TensorFlowModel' in yamlConfig: | ||
if __tensorflow_enabled__: | ||
model = tf_to_hls(yamlConfig) | ||
else: | ||
raise Exception("TensorFlow not found. Please install TensorFlow.") | ||
else: | ||
model = keras_to_hls(yamlConfig) | ||
|
||
return model | ||
|
||
def convert_from_keras_model(model, output_dir='my-hls-test', project_name='myproject', | ||
fpga_part='xcku115-flvb2104-2-i', clock_period=5, hls_config={}): | ||
config = create_vivado_config(output_dir=output_dir, | ||
project_name=project_name, fpga_part=fpga_part, clock_period=clock_period) | ||
config['KerasModel'] = model | ||
|
||
model_config = hls_config.get('Model', None) | ||
if model_config is not None: | ||
if not all(k in model_config for k in ('Precision', 'ReuseFactor')): | ||
raise Exception('Precision and ReuseFactor must be provided in the hls_config') | ||
else: | ||
model_config = {} | ||
model_config['Precision'] = 'ap_fixed<16,6>' | ||
model_config['ReuseFactor'] = '1' | ||
config['HLSConfig']['Model'] = model_config | ||
|
||
if 'LayerName' in hls_config: | ||
config['HLSConfig']['LayerName'] = hls_config['LayerName'] | ||
|
||
if 'LayerType' in hls_config: | ||
config['HLSConfig']['LayerType'] = hls_config['LayerType'] | ||
|
||
if 'Optimizers' in hls_config: | ||
config['HLSConfig']['Optimizers'] = hls_config['Optimizers'] | ||
|
||
return keras_to_hls(config) |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,99 @@ | ||
import math | ||
from hls4ml.converters.keras_to_hls import parse_default_keras_layer | ||
from hls4ml.converters.keras_to_hls import keras_handler | ||
|
||
|
||
@keras_handler('Conv1D') | ||
def parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader, config): | ||
assert('Conv1D' in keras_layer['class_name']) | ||
|
||
layer = parse_default_keras_layer(keras_layer, input_names) | ||
|
||
# weights_shape = (filter_width, n_channels, n_filters) | ||
weights_shape = data_reader.get_weights_shape(layer['name'], 'kernel') | ||
layer['n_in'] = input_shapes[0][1] | ||
layer['filt_width'] = weights_shape[0] # or keras_layer['config']['kernel_size'] | ||
layer['n_chan'] = weights_shape[1] | ||
layer['n_filt'] = weights_shape[2] # or keras_layer['config']['filters'] | ||
layer['stride'] = keras_layer['config']['strides'][0] | ||
layer['padding'] = keras_layer['config']['padding'] | ||
if layer['padding'] == 'same': | ||
in_width = input_shapes[0][1] | ||
layer['n_out'] = int(math.ceil(float(in_width) / float(layer['stride']))) | ||
if (in_width % layer['stride'] == 0): | ||
pad_along_width = max(layer['filt_width'] - layer['stride'], 0) | ||
else: | ||
pad_along_width = max(layer['filt_width'] - (in_width % layer['stride']), 0) | ||
layer['pad_left'] = pad_along_width // 2 | ||
layer['pad_right'] = pad_along_width - layer['pad_left'] | ||
elif layer['padding'] == 'valid': | ||
in_width = input_shapes[0][1] | ||
layer['n_out'] = int(math.ceil(float(in_width - layer['filt_width'] + 1) / float(layer['stride']))) | ||
layer['pad_left'] = 0 | ||
layer['pad_right'] = 0 | ||
layer['data_format'] = keras_layer['config'].get('data_format', 'channels_last') | ||
output_shape=[input_shapes[0][0], layer['n_out'], layer['n_filt']] | ||
|
||
return layer, output_shape | ||
|
||
|
||
@keras_handler('Conv2D') | ||
def parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader, config): | ||
assert('Conv2D' in keras_layer['class_name']) | ||
|
||
layer = parse_default_keras_layer(keras_layer, input_names) | ||
|
||
layer['data_format'] = keras_layer['config'].get('data_format', 'channels_last') | ||
# weights_shape = (filter_height, filter_width, n_channels, n_filters) | ||
weights_shape = data_reader.get_weights_shape(layer['name'], 'kernel') | ||
layer['in_height'] = input_shapes[0][1] | ||
layer['in_width'] = input_shapes[0][2] | ||
if layer['data_format'] == 'channels_first': | ||
layer['in_height'] = input_shapes[0][2] | ||
layer['in_width'] = input_shapes[0][3] | ||
layer['filt_height'] = weights_shape[0] | ||
layer['filt_width'] = weights_shape[1] | ||
layer['n_chan'] = weights_shape[2] | ||
layer['n_filt'] = weights_shape[3] | ||
layer['stride_height'] = keras_layer['config']['strides'][0] | ||
layer['stride_width'] = keras_layer['config']['strides'][1] | ||
layer['padding'] = keras_layer['config']['padding'] | ||
if layer['padding'] == 'same': | ||
#Height | ||
in_height = input_shapes[0][1] | ||
if layer['data_format'] == 'channels_first': in_height = input_shapes[0][2] | ||
layer['out_height'] = int(math.ceil(float(in_height) / float(layer['stride_height']))) | ||
if (in_height % layer['stride_height'] == 0): | ||
pad_along_height = max(layer['filt_height'] - layer['stride_height'], 0) | ||
else: | ||
pad_along_height = max(layer['filt_height'] - (in_height % layer['stride_height']), 0) | ||
layer['pad_top'] = pad_along_height // 2 | ||
layer['pad_bottom'] = pad_along_height - layer['pad_top'] | ||
#Width | ||
in_width = input_shapes[0][2] | ||
if layer['data_format'] == 'channels_first': in_width = input_shapes[0][3] | ||
layer['out_width'] = int(math.ceil(float(in_width) / float(layer['stride_width']))) | ||
if (in_width % layer['stride_width'] == 0): | ||
pad_along_width = max(layer['filt_width'] - layer['stride_width'], 0) | ||
else: | ||
pad_along_width = max(layer['filt_width'] - (in_width % layer['stride_width']), 0) | ||
layer['pad_left'] = pad_along_width // 2 | ||
layer['pad_right'] = pad_along_width - layer['pad_left'] | ||
elif layer['padding'] == 'valid': | ||
in_height = input_shapes[0][1] | ||
in_width = input_shapes[0][2] | ||
if layer['data_format'] == 'channels_first': | ||
in_height = input_shapes[0][2] | ||
in_width = input_shapes[0][3] | ||
layer['out_width'] = int(math.ceil(float(in_width - layer['filt_width'] + 1) / float(layer['stride_width']))) | ||
layer['out_height'] = int(math.ceil(float(in_height - layer['filt_height'] + 1) / float(layer['stride_height']))) | ||
layer['pad_top'] = 0 | ||
layer['pad_bottom'] = 0 | ||
layer['pad_left'] = 0 | ||
layer['pad_right'] = 0 | ||
if layer['data_format'] == 'channels_first': | ||
output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_height'], layer['out_width']] | ||
else: | ||
output_shape = [input_shapes[0][0], layer['out_height'], layer['out_width'], layer['n_filt']] | ||
|
||
return layer, output_shape |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,130 @@ | ||
import numpy as np | ||
|
||
from hls4ml.converters.keras_to_hls import parse_default_keras_layer | ||
from hls4ml.converters.keras_to_hls import keras_handler | ||
|
||
from hls4ml.model.hls_model import Quantizer | ||
from hls4ml.model.hls_model import IntegerPrecisionType | ||
|
||
@keras_handler('InputLayer') | ||
def parse_input_layer(keras_layer, input_names, input_shapes, data_reader, config): | ||
assert(keras_layer['class_name'] == 'InputLayer') | ||
|
||
layer = parse_default_keras_layer(keras_layer, input_names) | ||
|
||
layer['input_shape'] = keras_layer['config']['batch_input_shape'][1:] | ||
if keras_layer['config']['dtype'] == 'int32': | ||
layer['type_name'] = 'integer_input_t' | ||
layer['precision'] = IntegerPrecisionType(width=32) | ||
output_shape = keras_layer['config']['batch_input_shape'] | ||
|
||
return layer, output_shape | ||
|
||
|
||
@keras_handler('Reshape') | ||
def parse_reshape_layer(keras_layer, input_names, input_shapes, data_reader, config): | ||
assert(keras_layer["class_name"] == 'Reshape') | ||
|
||
layer = parse_default_keras_layer(keras_layer, input_names) | ||
|
||
layer['target_shape'] = keras_layer['config']['target_shape'] | ||
output_shape = input_shapes[0][:1] + keras_layer['config']['target_shape'] | ||
|
||
return layer, output_shape | ||
|
||
|
||
class BinaryQuantizer(Quantizer): | ||
def __init__(self, bits=2): | ||
if bits == 1: | ||
hls_type = IntegerPrecisionType(width=1, signed=False) | ||
elif bits == 2: | ||
hls_type = IntegerPrecisionType(width=2) | ||
else: | ||
raise Exception('BinaryQuantizer suppots 1 or 2 bits, but called with bits={}'.format(bits)) | ||
super(BinaryQuantizer, self).__init__(bits, hls_type) | ||
|
||
def __call__(self, data): | ||
zeros = np.zeros_like(data) | ||
ones = np.ones_like(data) | ||
quant_data = data | ||
if self.bits == 1: | ||
quant_data = np.where(data > 0, ones, zeros).astype('int') | ||
if self.bits == 2: | ||
quant_data = np.where(data > 0, ones, -ones) | ||
return quant_data | ||
|
||
class TernaryQuantizer(Quantizer): | ||
def __init__(self): | ||
super(TernaryQuantizer, self).__init__(2, IntegerPrecisionType(width=2)) | ||
|
||
def __call__(self, data): | ||
zeros = np.zeros_like(data) | ||
ones = np.ones_like(data) | ||
return np.where(data > 0.5, ones, np.where(data <= -0.5, -ones, zeros)) | ||
|
||
|
||
dense_layers = ['Dense', 'BinaryDense', 'TernaryDense'] | ||
@keras_handler(*dense_layers) | ||
def parse_dense_layer(keras_layer, input_names, input_shapes, data_reader, config): | ||
assert('Dense' in keras_layer['class_name']) | ||
|
||
layer = parse_default_keras_layer(keras_layer, input_names) | ||
|
||
weights_shape = data_reader.get_weights_shape(layer['name'], 'kernel') | ||
layer['n_in'] = weights_shape[0] | ||
layer['n_out'] = weights_shape[1] | ||
if 'Binary' in layer['class_name']: | ||
layer['weight_quantizer'] = BinaryQuantizer(bits=2) | ||
layer['bias_quantizer'] = BinaryQuantizer(bits=2) | ||
elif 'Ternary' in layer['class_name']: | ||
layer['weight_quantizer'] = TernaryQuantizer() | ||
layer['bias_quantizer'] = TernaryQuantizer() | ||
else: | ||
layer['weight_quantizer'] = None | ||
layer['bias_quantizer'] = None | ||
output_shape = [input_shapes[0][0], layer['n_out']] | ||
|
||
return layer, output_shape | ||
|
||
|
||
activation_layers = ['Activation', 'LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU'] | ||
@keras_handler(*activation_layers) | ||
def parse_activation_layer(keras_layer, input_names, input_shapes, data_reader, config): | ||
assert(keras_layer['class_name'] in activation_layers) | ||
|
||
layer = parse_default_keras_layer(keras_layer, input_names) | ||
|
||
if layer['class_name'] != 'Activation': | ||
layer['activation'] = layer['class_name'] | ||
if layer['class_name'] == 'LeakyReLU': | ||
layer['activ_param'] = keras_layer["config"].get('alpha', 0.3) | ||
elif layer['class_name'] == 'ThresholdedReLU': | ||
layer['activ_param'] = keras_layer["config"].get('theta', 1.) | ||
elif layer['class_name'] == 'ELU': | ||
layer['activ_param'] = keras_layer["config"].get('alpha', 1.) | ||
|
||
if layer['class_name'] == 'Activation' and layer['activation'] == 'softmax': | ||
layer['class_name'] = 'Softmax' | ||
|
||
return layer, [shape for shape in input_shapes[0]] | ||
|
||
|
||
@keras_handler('BatchNormalization') | ||
def parse_batchnorm_layer(keras_layer, input_names, input_shapes, data_reader, config): | ||
assert('BatchNormalization' in keras_layer['class_name']) | ||
|
||
layer = parse_default_keras_layer(keras_layer, input_names) | ||
|
||
in_size = 1 | ||
for dim in input_shapes[0][1:]: | ||
in_size *= dim | ||
layer['n_in'] = in_size | ||
layer['n_out'] = layer['n_in'] | ||
if len(input_shapes[0]) == 2: | ||
layer['n_filt'] = -1 | ||
elif len(input_shapes[0]) == 3: | ||
layer['n_filt']=input_shapes[0][2] | ||
elif len(input_shapes[0]) == 4: | ||
layer['n_filt']=input_shapes[0][3] | ||
|
||
return layer, [shape for shape in input_shapes[0]] |
Oops, something went wrong.