Skip to content

Commit

Permalink
fixed forced installation of cuDNN and TensorRT when --tar_file is sp…
Browse files Browse the repository at this point in the history
…ecified ...
  • Loading branch information
gfursin committed Mar 1, 2023
1 parent 7bcac7f commit ddcb130
Show file tree
Hide file tree
Showing 4 changed files with 103 additions and 113 deletions.
2 changes: 1 addition & 1 deletion cm-mlops/script/get-cl/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def preprocess(i):
'env_path_key':'CM_CL_BIN_WITH_PATH',
'run_script_input':i['run_script_input'],
'recursion_spaces':recursion_spaces}

rr = automation.find_artifact(ii)
if rr['return'] >0 :
# If not found in PATH, try a longer search
Expand Down
1 change: 0 additions & 1 deletion cm-mlops/script/get-cuda-devices/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ def postprocess(i):
key_env = 'CM_CUDA_DEVICE_PROP_'+key.upper().replace(' ','_')
env[key_env] = val


state['cm_cuda_device_prop'] = p

return {'return':0}
120 changes: 55 additions & 65 deletions cm-mlops/script/get-cudnn/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,71 +34,61 @@ def preprocess(i):

recursion_spaces = i['recursion_spaces']

# if os_info['platform'] == 'windows':
# return {'return': 1, 'error': 'Windows is currently not supported for cudnn installation!'}
#
# if 'CM_TMP_PATH' in env:
# tmp_path = env['CM_TMP_PATH'].split(":")
# else:
# tmp_path = []
#
# for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []):
# if(os.path.exists(lib_path)):
# tmp_path.append(lib_path)

if os_info['platform'] == 'windows':
if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='':
# Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA"
paths = []
for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]:
if os.path.isdir(path):
dirs = os.listdir(path)
for dr in dirs:
path2 = os.path.join(path, dr, 'lib')
if os.path.isdir(path2):
paths.append(path2)

if len(paths)>0:
tmp_paths = ';'.join(paths)
tmp_paths += ';'+os.environ.get('PATH','')

env['CM_TMP_PATH'] = tmp_paths
env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'

else:
# paths to cuda are not always in PATH - add a few typical locations to search for
# (unless forced by a user)

if env.get('CM_INPUT','').strip()=='':
cm_tmp_path = env.get('CM_TMP_PATH','').strip()
if cm_tmp_path!='':
cm_tmp_path+=':'
cm_tmp_path+='/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib'
env['CM_TMP_PATH'] = cm_tmp_path
env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'

for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []):
if(os.path.exists(lib_path)):
env['CM_TMP_PATH']+=':'+lib_path

r = i['automation'].find_artifact({'file_name': libfilename,
'env': env,
'os_info':os_info,
'default_path_env_key': 'LD_LIBRARY_PATH',
'detect_version':False,
'env_path_key':'CM_CUDA_PATH_LIB_CUDNN',
'run_script_input':i['run_script_input'],
'recursion_spaces':recursion_spaces})
if r['return'] >0 :
if os_info['platform'] == 'windows':
return r

if r['return'] == 16:
env['CM_TMP_REQUIRE_INSTALL'] = "yes"
else:
return r
else:
return {'return':0}
# If TAR file is not explicitly specified, search
if env.get('CM_CUDNN_TAR_FILE_PATH','')=='':
if env.get('CM_INPUT','').strip()=='':
if os_info['platform'] == 'windows':
if env.get('CM_TMP_PATH','').strip()=='':
# Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA"
paths = []
for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]:
if os.path.isdir(path):
dirs = os.listdir(path)
for dr in dirs:
path2 = os.path.join(path, dr, 'lib')
if os.path.isdir(path2):
paths.append(path2)

if len(paths)>0:
tmp_paths = ';'.join(paths)
tmp_paths += ';'+os.environ.get('PATH','')

env['CM_TMP_PATH'] = tmp_paths
env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'

else:
# paths to cuda are not always in PATH - add a few typical locations to search for
# (unless forced by a user)

cm_tmp_path = env.get('CM_TMP_PATH','').strip()
if cm_tmp_path!='':
cm_tmp_path+=':'
cm_tmp_path+='/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib'
env['CM_TMP_PATH'] = cm_tmp_path
env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'

for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []):
if(os.path.exists(lib_path)):
env['CM_TMP_PATH']+=':'+lib_path

r = i['automation'].find_artifact({'file_name': libfilename,
'env': env,
'os_info':os_info,
'default_path_env_key': 'LD_LIBRARY_PATH',
'detect_version':False,
'env_path_key':'CM_CUDA_PATH_LIB_CUDNN',
'run_script_input':i['run_script_input'],
'recursion_spaces':recursion_spaces})
if r['return'] >0 :
if os_info['platform'] == 'windows':
return r

if r['return'] == 16:
env['CM_TMP_REQUIRE_INSTALL'] = "yes"
else:
return r
else:
return {'return':0}

if env.get('CM_HOST_OS_MACHINE','') == "aarch64":
return {'return': 1, 'error': 'Tar file installation is not available for cudnn on aarch64. Please do a package manager install!'}
Expand Down
93 changes: 47 additions & 46 deletions cm-mlops/script/get-tensorrt/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,52 +29,53 @@ def preprocess(i):
if not env.get('CM_TMP_PATH'):
env['CM_TMP_PATH'] = ''

if os_info['platform'] == 'windows':
if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='':
# Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA"
paths = []
for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]:
if os.path.isdir(path):
dirs = os.listdir(path)
for dr in dirs:
path2 = os.path.join(path, dr, 'lib')
if os.path.isdir(path2):
paths.append(path2)

if len(paths)>0:
tmp_paths = ';'.join(paths)
tmp_paths += ';'+os.environ.get('PATH','')

env['CM_TMP_PATH'] = tmp_paths
env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'

else:
# paths to cuda are not always in PATH - add a few typical locations to search for
# (unless forced by a user)

if env.get('CM_INPUT','').strip()=='':
if env.get('CM_TMP_PATH','').strip()!='':
env['CM_TMP_PATH']+=':'

env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'

for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []):
if(os.path.exists(lib_path)):
env['CM_TMP_PATH']+=':'+lib_path

r = i['automation'].find_artifact({'file_name': libfilename,
'env': env,
'os_info':os_info,
'default_path_env_key': 'LD_LIBRARY_PATH',
'detect_version':False,
'env_path_key':'CM_TENSORRT_LIB_WITH_PATH',
'run_script_input':i['run_script_input'],
'recursion_spaces':recursion_spaces})
if r['return'] >0 :
if os_info['platform'] == 'windows':
return r
else:
return {'return':0}
if env.get('CM_TENSORRT_TAR_FILE_PATH','')=='':
if os_info['platform'] == 'windows':
if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='':
# Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA"
paths = []
for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]:
if os.path.isdir(path):
dirs = os.listdir(path)
for dr in dirs:
path2 = os.path.join(path, dr, 'lib')
if os.path.isdir(path2):
paths.append(path2)

if len(paths)>0:
tmp_paths = ';'.join(paths)
tmp_paths += ';'+os.environ.get('PATH','')

env['CM_TMP_PATH'] = tmp_paths
env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'

else:
# paths to cuda are not always in PATH - add a few typical locations to search for
# (unless forced by a user)

if env.get('CM_INPUT','').strip()=='':
if env.get('CM_TMP_PATH','').strip()!='':
env['CM_TMP_PATH']+=':'

env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes'

for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []):
if(os.path.exists(lib_path)):
env['CM_TMP_PATH']+=':'+lib_path

r = i['automation'].find_artifact({'file_name': libfilename,
'env': env,
'os_info':os_info,
'default_path_env_key': 'LD_LIBRARY_PATH',
'detect_version':False,
'env_path_key':'CM_TENSORRT_LIB_WITH_PATH',
'run_script_input':i['run_script_input'],
'recursion_spaces':recursion_spaces})
if r['return'] >0 :
if os_info['platform'] == 'windows':
return r
else:
return {'return':0}

if env.get('CM_HOST_OS_MACHINE','') == "aarch64":
return {'return': 1, 'error': 'Tar file installation is not available for cudnn on aarch64. Please do a package manager install!'}
Expand Down

0 comments on commit ddcb130

Please sign in to comment.