-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathsettings_import_on_pc.yaml
110 lines (83 loc) · 4.42 KB
/
settings_import_on_pc.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# execution pipeline type - currently only accuracy pipeline is defined
pipeline_type : 'accuracy'
# important parameter. set this to 'pc' to do import and inference in pc
# set this to 'j7' to run inference in device. for inference on device run_import
# below should be switched off and it is assumed that the artifacts are already created.
# supported values: 'j7' 'pc'
target_device : 'pc'
# quantization bit precision
# options are: 8 16 32
tensor_bits : 8
# run import of the model - only to be used in pc - set this to False for j7 evm
# for pc this can be True or False
run_import : True
# run inference - for inference in j7 evm, it is assumed that the artifacts folders are already available
run_inference : True
# for parallel execution on pc only (cpu or gpu).
# specify either a list of integers for parallel execution or null for sequentially execution
# if you are not using cuda compiled tidl on pc, these actual numbers in the list don't matter,
# but the size of the list determines the number of parallel processes
# if you have cuda compiled tidl, these integers wil be used for CUDA_VISIBLE_DEVICES. eg. [0,1,2,3,0,1,2,3]
# null will run the models sequentially.
parallel_devices : [0,1,2,3]
# number of frames for inference
num_frames : 10000 #50000
# number of frames to be used for post training quantization / calibration
calibration_frames : 50 #100
# number of itrations to be used for post training quantization / calibration
calibration_iterations : 50
# runtime_options to be passed to the core session. default: null or a dict
# eg. (in next line and with preceding spaces to indicate this is a dict entry) accuracy_level : 0
# runtime_options :
# accuracy_level: 1 #this is automaticallly set as 1 if you set tensor bits as 8
# advanced_options:output_feature_16bit_names_list: '363,561' #layers that you want to be treated as 16 bit
# folder where benchmark configs are defined. this should be python importable
# # if this is None, the internally defined minimal set of configs will be used
configs_path : './configs'
# folder where models are available
models_path : '../edgeai-modelzoo/models'
# create your datasets under this folder
datasets_path : './dependencies/datasets'
# path where precompiled modelartifacts are placed
modelartifacts_path : './work_dirs/modelartifacts'
# session types to use for each model type
session_type_dict : {'onnx':'onnxrt', 'tflite':'tflitert', 'mxnet':'tvmdlr'}
# wild card list to match against model_path, model_id or model_type - if null, all models wil be shortlisted
# only models matching these criteria will be considered - even for model_selection
# examples: ['onnx'] ['tflite'] ['mxnet'] ['onnx', 'tflite']
# examples: ['resnet18.onnx', 'resnet50_v1.tflite'] ['classification'] ['imagenet1k'] ['torchvision']
model_selection : null
# exclude from running, these models
# example: ['voc']
# example: ['cityscapes']
# example: ['vcls-10-306-0', 'vcls-10-404-0', 'vcls-10-434-0', 'vcls-10-442-0', 'vseg-16-300-0', 'vseg-16-301-0', 'vseg-16-400-0']
model_exclusion : null
# wild card list to match against the tasks. it null, all tasks will be run
# example: ['classification', 'detection', 'segmentation']
# example: 'classification'
# example: null (Note: null means no filter - run all the tasks)
task_selection : null
# wild card list to match against dataset type - if null, all datasets will be shortlisted
# example: ['coco']
# example: ['imagenet','cocoseg21','ade20k','cocokpts']
dataset_loading : null
# verbose mode - print out more information
verbose : False
# detection threshold
detection_thr : 0.3
# save detection, segmentation, human pose estimation output
save_output : False
# it defines if we want to use udp postprocessing in human pose estimation.
# Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased
# Data Processing for Human Pose Estimation (CVPR 2020).
with_udp : True
# it will add horizontally flipped images in info_dict and run inference over the flipped image also
flip_test : False
# the transformation that needs to be applied to the model itself. Note: this is different from pre-processing transforms
model_transformation_dict : null
# enable use of experimental models - the actual model files are not available in modelzoo
experimental_models : False
# dataset type to use if there are multiple variants for each dataset
# imagenetv2c is available for quick download - so use it in the release branch
dataset_type_dict:
'imagenet': 'imagenetv2c'