You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
raceback (most recent call last):
File "/ossfs/workspace/vector/model/GLM-main/finetune_glm.py", line 470, in
main(args)
File "/ossfs/workspace/vector/model/GLM-main/tasks/superglue/finetune.py", line 119, in main
finetune(args, train_valid_datasets_provider, model_kwargs,
File "/ossfs/workspace/vector/model/GLM-main/finetune_glm.py", line 287, in finetune
tokenizer = prepare_tokenizer(args)
File "/ossfs/workspace/vector/model/GLM-main/configure_data.py", line 124, in prepare_tokenizer
tokenizer = make_tokenizer(args.tokenizer_type, None, args.tokenizer_path, args.vocab_size,
File "/ossfs/workspace/vector/model/GLM-main/data_utils/tokenization.py", line 50, in make_tokenizer
return ChineseSPTokenizer(fix_command_token=fix_command_token, **kwargs)
File "/ossfs/workspace/vector/model/GLM-main/data_utils/tokenization.py", line 1140, in init
self.text_tokenizer = sp_tokenizer.from_pretrained()
File "/ossfs/workspace/vector/model/GLM-main/data_utils/sp_tokenizer.py", line 150, in from_pretrained
return get_encoder(PRETRAINED_MODEL_FILE, "")
File "/ossfs/workspace/vector/model/GLM-main/data_utils/sp_tokenizer.py", line 136, in get_encoder
return Encoder_SP(encoder_file)
File "/ossfs/workspace/vector/model/GLM-main/data_utils/sp_tokenizer.py", line 101, in init
self.sp.Load(model_path)
File "/opt/conda/lib/python3.8/site-packages/sentencepiece/init.py", line 905, in Load
return self.LoadFromFile(model_file)
File "/opt/conda/lib/python3.8/site-packages/sentencepiece/init.py", line 310, in LoadFromFile
return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
OSError: Not found: "chinese_sentencepiece/cog-pretrain.model": No such file or directory Error #2
The text was updated successfully, but these errors were encountered:
raceback (most recent call last):
File "/ossfs/workspace/vector/model/GLM-main/finetune_glm.py", line 470, in
main(args)
File "/ossfs/workspace/vector/model/GLM-main/tasks/superglue/finetune.py", line 119, in main
finetune(args, train_valid_datasets_provider, model_kwargs,
File "/ossfs/workspace/vector/model/GLM-main/finetune_glm.py", line 287, in finetune
tokenizer = prepare_tokenizer(args)
File "/ossfs/workspace/vector/model/GLM-main/configure_data.py", line 124, in prepare_tokenizer
tokenizer = make_tokenizer(args.tokenizer_type, None, args.tokenizer_path, args.vocab_size,
File "/ossfs/workspace/vector/model/GLM-main/data_utils/tokenization.py", line 50, in make_tokenizer
return ChineseSPTokenizer(fix_command_token=fix_command_token, **kwargs)
File "/ossfs/workspace/vector/model/GLM-main/data_utils/tokenization.py", line 1140, in init
self.text_tokenizer = sp_tokenizer.from_pretrained()
File "/ossfs/workspace/vector/model/GLM-main/data_utils/sp_tokenizer.py", line 150, in from_pretrained
return get_encoder(PRETRAINED_MODEL_FILE, "")
File "/ossfs/workspace/vector/model/GLM-main/data_utils/sp_tokenizer.py", line 136, in get_encoder
return Encoder_SP(encoder_file)
File "/ossfs/workspace/vector/model/GLM-main/data_utils/sp_tokenizer.py", line 101, in init
self.sp.Load(model_path)
File "/opt/conda/lib/python3.8/site-packages/sentencepiece/init.py", line 905, in Load
return self.LoadFromFile(model_file)
File "/opt/conda/lib/python3.8/site-packages/sentencepiece/init.py", line 310, in LoadFromFile
return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
OSError: Not found: "chinese_sentencepiece/cog-pretrain.model": No such file or directory Error #2
The text was updated successfully, but these errors were encountered: