forked from mlcommons/ck
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path_cm.yaml
181 lines (140 loc) · 3.53 KB
/
_cm.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
# Identification of this CM script
alias: app-mlperf-inference-nvidia
uid: 689e865b0059479b
automation_alias: script
automation_uid: 5b4e0237da074764
category: "Modular MLPerf benchmarks"
# User-friendly tags to find this CM script
tags:
- app
- mlcommons
- mlperf
- inference
- nvidia-harness
- nvidia
# Default environment
default_env:
CM_BATCH_COUNT: '1'
CM_BATCH_SIZE: '1'
CM_FAST_COMPILATION: "yes"
CM_MLPERF_LOADGEN_SCENARIO: Offline
# Map script inputs to environment variables
input_mapping:
count: CM_MLPERF_LOADGEN_QUERY_COUNT
max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE
mlperf_conf: CM_MLPERF_CONF
mode: CM_MLPERF_LOADGEN_MODE
output_dir: CM_MLPERF_OUTPUT_DIR
performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT
scenario: CM_MLPERF_LOADGEN_SCENARIO
user_conf: CM_MLPERF_USER_CONF
new_env_keys:
- CM_MLPERF_*
- CM_DATASET_*
- CM_HW_NAME
# Dependencies on other CM scripts
deps:
# Detect host OS features
- tags: detect,os
# Detect host CPU features
- tags: detect,cpu
# Install system dependencies on a given host
- tags: get,sys-utils-cm
# Detect CUDA
- tags: get,cuda,_cudnn
# Detect Tensorrt
- tags: get,tensorrt
# Detect Google Logger
- tags: get,generic,sys-util,_glog-dev
# Detect GFlags
- tags: get,generic,sys-util,_gflags-dev
########################################################################
# Install MLPerf inference dependencies
# Install MLPerf loadgen
- tags: get,loadgen
names:
- loadgen
# Download MLPerf inference source
- tags: get,mlcommons,inference,src
names:
- inference-src
# Download Nvidia Submission Code
- tags: get,nvidia,mlperf,inference,common-code
names:
- nvidia-inference-common-code
########################################################################
# Install ResNet50 model (ONNX) and ImageNet
- enable_if_env:
CM_MODEL:
- resnet50
names:
- imagenet-preprocessed
tags: get,dataset,preprocessed,imagenet,_NCHW
- enable_if_env:
CM_MODEL:
- resnet50
names:
- ml-model
- resnet50-model
tags: get,ml-model,resnet50,_onnx
########################################################################
# Install RetinaNet model (ONNX) and OpenImages
- enable_if_env:
CM_MODEL:
- retinanet
names:
- openimages-preprocessed
tags: get,dataset,preprocessed,openimages,_validation,_NCHW
- enable_if_env:
CM_MODEL:
- retinanet
names:
- ml-model
- retinanet-model
tags: get,ml-model,retinanet,_onnx,_fp32
- skip_if_env:
CM_MLPERF_DEVICE:
- cpu
tags: generate,nvidia,engine
names: tensorrt-engine-generator
# Creates user conf for given SUT
- tags: generate,user-conf,mlperf,inference
names:
- user-conf-generator
# Post dependencies to compile and run this app
post_deps:
- names:
- compile-program
tags: compile,cpp-program
- names:
- runner
tags: benchmark,program
# Variations to customize dependencies
variations:
# Target devices
cpu:
group: device
default: true
env:
CM_MLPERF_DEVICE: cpu
cuda:
env:
CM_MLPERF_DEVICE: gpu
CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart
pytorch:
group: framework
env:
CM_MLPERF_BACKEND: pytorch
# Reference MLPerf models
resnet50:
group: model
default: true
env:
CM_MODEL: resnet50
retinanet:
group: model
env:
CM_MODEL: retinanet
batch_size.#:
env:
CM_MODEL_BATCH_SIZE: #