ariG23498 HF Staff commited on
Commit
8c833f0
·
verified ·
1 Parent(s): b2af4dd

Upload utter-project_EuroLLM-9B_2.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. utter-project_EuroLLM-9B_2.txt +15 -29
utter-project_EuroLLM-9B_2.txt CHANGED
@@ -8,6 +8,21 @@ model = AutoModelForCausalLM.from_pretrained("utter-project/EuroLLM-9B")
8
 
9
  ERROR:
10
  Traceback (most recent call last):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  File "/tmp/.cache/uv/environments-v2/31d9dd746308ee4e/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2343, in _from_pretrained
12
  tokenizer = cls(*init_inputs, **init_kwargs)
13
  File "/tmp/.cache/uv/environments-v2/31d9dd746308ee4e/lib/python3.13/site-packages/transformers/models/llama/tokenization_llama_fast.py", line 154, in __init__
@@ -25,32 +40,3 @@ Traceback (most recent call last):
25
  ...<2 lines>...
26
  )
27
  ValueError: Cannot instantiate this tokenizer from a slow version. If it's based on sentencepiece, make sure you have sentencepiece installed.
28
-
29
- During handling of the above exception, another exception occurred:
30
-
31
- Traceback (most recent call last):
32
- File "/tmp/utter-project_EuroLLM-9B_2eLuKOM.py", line 19, in <module>
33
- tokenizer = AutoTokenizer.from_pretrained("utter-project/EuroLLM-9B")
34
- File "/tmp/.cache/uv/environments-v2/31d9dd746308ee4e/lib/python3.13/site-packages/transformers/models/auto/tokenization_auto.py", line 1140, in from_pretrained
35
- return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
36
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
37
- File "/tmp/.cache/uv/environments-v2/31d9dd746308ee4e/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2097, in from_pretrained
38
- return cls._from_pretrained(
39
- ~~~~~~~~~~~~~~~~~~~~^
40
- resolved_vocab_files,
41
- ^^^^^^^^^^^^^^^^^^^^^
42
- ...<9 lines>...
43
- **kwargs,
44
- ^^^^^^^^^
45
- )
46
- ^
47
- File "/tmp/.cache/uv/environments-v2/31d9dd746308ee4e/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2344, in _from_pretrained
48
- except import_protobuf_decode_error():
49
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^
50
- File "/tmp/.cache/uv/environments-v2/31d9dd746308ee4e/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 87, in import_protobuf_decode_error
51
- raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message))
52
- ImportError:
53
- requires the protobuf library but it was not found in your environment. Check out the instructions on the
54
- installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones
55
- that match your environment. Please note that you may need to restart your runtime after installation.
56
-
 
8
 
9
  ERROR:
10
  Traceback (most recent call last):
11
+ File "/tmp/utter-project_EuroLLM-9B_21djTTe.py", line 24, in <module>
12
+ tokenizer = AutoTokenizer.from_pretrained("utter-project/EuroLLM-9B")
13
+ File "/tmp/.cache/uv/environments-v2/31d9dd746308ee4e/lib/python3.13/site-packages/transformers/models/auto/tokenization_auto.py", line 1140, in from_pretrained
14
+ return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
15
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
16
+ File "/tmp/.cache/uv/environments-v2/31d9dd746308ee4e/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2097, in from_pretrained
17
+ return cls._from_pretrained(
18
+ ~~~~~~~~~~~~~~~~~~~~^
19
+ resolved_vocab_files,
20
+ ^^^^^^^^^^^^^^^^^^^^^
21
+ ...<9 lines>...
22
+ **kwargs,
23
+ ^^^^^^^^^
24
+ )
25
+ ^
26
  File "/tmp/.cache/uv/environments-v2/31d9dd746308ee4e/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2343, in _from_pretrained
27
  tokenizer = cls(*init_inputs, **init_kwargs)
28
  File "/tmp/.cache/uv/environments-v2/31d9dd746308ee4e/lib/python3.13/site-packages/transformers/models/llama/tokenization_llama_fast.py", line 154, in __init__
 
40
  ...<2 lines>...
41
  )
42
  ValueError: Cannot instantiate this tokenizer from a slow version. If it's based on sentencepiece, make sure you have sentencepiece installed.