forked from liulangxing/edgeai-benchmark
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsettings_import_on_pc.yaml
22 lines (18 loc) · 1.01 KB
/
settings_import_on_pc.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# include_files is a special keyword - other files that need to be merged with this dict
include_files : ['settings_base.yaml']
# important parameter. set this to 'pc' to do import and inference in pc
# set this to 'evm' to run inference in device. for inference on device run_import
# below should be switched off and it is assumed that the artifacts are already created.
# supported values: 'evm' 'pc'
target_machine : 'pc'
# run import of the model - only to be used in pc - set this to False for evm
# for pc this can be True or False
run_import : True
# run inference - for inference in evm, it is assumed that the artifacts folders are already available
run_inference : True
# for parallel execution on pc only (cpu or gpu).
# number fo parallel processes to run.
# for example 8 will mean 8 models will run in parallel
# for example 1 will mean one model will run (but in a separae processs from that of the main process)
# null will mean one process will run, in the same process as the main
parallel_processes : 1 #null