ariG23498 HF Staff commited on
Commit
79ecc97
·
verified ·
1 Parent(s): 1f3d7be

Upload nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0.txt with huggingface_hub

Browse files
nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0.txt CHANGED
@@ -4,7 +4,8 @@ from diffusers import DiffusionPipeline
4
  from diffusers.utils import load_image
5
 
6
  # switch to "mps" for apple devices
7
- pipe = DiffusionPipeline.from_pretrained("nvidia/ChronoEdit-14B-Diffusers-Upscaler-Lora", dtype=torch.bfloat16, device_map="cuda")
 
8
 
9
  prompt = "Turn this cat into a dog"
10
  input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
@@ -14,75 +15,33 @@ image = pipe(image=input_image, prompt=prompt).images[0]
14
 
15
  ERROR:
16
  Traceback (most recent call last):
17
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/utils/_http.py", line 402, in hf_raise_for_status
18
- response.raise_for_status()
19
- ~~~~~~~~~~~~~~~~~~~~~~~~~^^
20
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/requests/models.py", line 1026, in raise_for_status
21
- raise HTTPError(http_error_msg, response=self)
22
- requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/nvidia/ChronoEdit-14B-Diffusers-Upscaler-Lora/resolve/main/model_index.json
23
-
24
- The above exception was the direct cause of the following exception:
25
-
26
- Traceback (most recent call last):
27
- File "/tmp/nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0YHqQsW.py", line 28, in <module>
28
- pipe = DiffusionPipeline.from_pretrained("nvidia/ChronoEdit-14B-Diffusers-Upscaler-Lora", dtype=torch.bfloat16, device_map="cuda")
29
  File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
30
  return fn(*args, **kwargs)
31
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/diffusers/pipelines/pipeline_utils.py", line 833, in from_pretrained
32
- cached_folder = cls.download(
33
- pretrained_model_name_or_path,
34
- ...<14 lines>...
35
- **kwargs,
36
  )
 
 
37
  File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
38
  return fn(*args, **kwargs)
39
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/diffusers/pipelines/pipeline_utils.py", line 1490, in download
40
- config_file = hf_hub_download(
41
- pretrained_model_name,
42
- ...<5 lines>...
43
- token=token,
44
- )
45
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
46
- return fn(*args, **kwargs)
47
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 1007, in hf_hub_download
48
- return _hf_hub_download_to_cache_dir(
49
- # Destination
50
- ...<14 lines>...
51
- force_download=force_download,
52
- )
53
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 1070, in _hf_hub_download_to_cache_dir
54
- (url_to_download, etag, commit_hash, expected_size, xet_file_data, head_call_error) = _get_metadata_or_catch_error(
55
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
56
- repo_id=repo_id,
57
- ^^^^^^^^^^^^^^^^
58
- ...<10 lines>...
59
- relative_filename=relative_filename,
60
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
61
  )
62
  ^
63
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 1543, in _get_metadata_or_catch_error
64
- metadata = get_hf_file_metadata(
65
- url=url, proxies=proxies, timeout=etag_timeout, headers=headers, token=token, endpoint=endpoint
66
- )
67
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
68
- return fn(*args, **kwargs)
69
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 1460, in get_hf_file_metadata
70
- r = _request_wrapper(
71
- method="HEAD",
72
- ...<5 lines>...
73
- timeout=timeout,
74
- )
75
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 283, in _request_wrapper
76
- response = _request_wrapper(
77
- method=method,
78
- ...<2 lines>...
79
- **params,
80
- )
81
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 307, in _request_wrapper
82
- hf_raise_for_status(response)
83
- ~~~~~~~~~~~~~~~~~~~^^^^^^^^^^
84
- File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/utils/_http.py", line 413, in hf_raise_for_status
85
- raise _format(EntryNotFoundError, message, response) from e
86
- huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-6914338f-799fcd1b7e9cd21d39fac62b;c8cff68a-f516-4527-b975-d33f49677a26)
87
-
88
- Entry Not Found for url: https://huggingface.co/nvidia/ChronoEdit-14B-Diffusers-Upscaler-Lora/resolve/main/model_index.json.
 
4
  from diffusers.utils import load_image
5
 
6
  # switch to "mps" for apple devices
7
+ pipe = DiffusionPipeline.from_pretrained("nvidia/ChronoEdit-14B-Diffusers", dtype=torch.bfloat16, device_map="cuda")
8
+ pipe.load_lora_weights("nvidia/ChronoEdit-14B-Diffusers-Upscaler-Lora")
9
 
10
  prompt = "Turn this cat into a dog"
11
  input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
 
15
 
16
  ERROR:
17
  Traceback (most recent call last):
18
+ File "/tmp/nvidia_ChronoEdit-14B-Diffusers-Upscaler-Lora_0Mzpkav.py", line 28, in <module>
19
+ pipe = DiffusionPipeline.from_pretrained("nvidia/ChronoEdit-14B-Diffusers", dtype=torch.bfloat16, device_map="cuda")
 
 
 
 
 
 
 
 
 
 
20
  File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
21
  return fn(*args, **kwargs)
22
+ File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/diffusers/pipelines/pipeline_utils.py", line 1025, in from_pretrained
23
+ loaded_sub_model = load_sub_model(
24
+ library_name=library_name,
25
+ ...<21 lines>...
26
+ quantization_config=quantization_config,
27
  )
28
+ File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/diffusers/pipelines/pipeline_loading_utils.py", line 860, in load_sub_model
29
+ loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
30
  File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
31
  return fn(*args, **kwargs)
32
+ File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/diffusers/models/modeling_utils.py", line 1288, in from_pretrained
33
+ ) = cls._load_pretrained_model(
34
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~^
35
+ model,
36
+ ^^^^^^
37
+ ...<13 lines>...
38
+ is_parallel_loading_enabled=is_parallel_loading_enabled,
39
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  )
41
  ^
42
+ File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/diffusers/models/modeling_utils.py", line 1537, in _load_pretrained_model
43
+ _caching_allocator_warmup(model, expanded_device_map, dtype, hf_quantizer)
44
+ ~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
45
+ File "/tmp/.cache/uv/environments-v2/46417a0e48757af0/lib/python3.13/site-packages/diffusers/models/model_loading_utils.py", line 754, in _caching_allocator_warmup
46
+ _ = torch.empty(warmup_elems, dtype=dtype, device=device, requires_grad=False)
47
+ torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 30.54 GiB. GPU 0 has a total capacity of 22.03 GiB of which 21.84 GiB is free. Including non-PyTorch memory, this process has 186.00 MiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)