File size: 3,315 Bytes
110e4da
 
5be238a
110e4da
 
5be238a
 
 
 
 
110e4da
 
9f96495
 
 
e8675f9
9f96495
 
 
 
 
 
110e4da
9f96495
110e4da
 
 
e8675f9
 
 
 
 
110e4da
 
5be238a
110e4da
 
 
 
 
 
e8675f9
110e4da
 
 
 
 
 
 
9f96495
110e4da
 
 
 
 
 
9f96495
 
 
110e4da
9f96495
 
 
 
 
 
 
 
e8675f9
 
9f96495
 
 
 
 
 
e8675f9
9f96495
 
 
 
 
 
 
 
5be238a
e8675f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5be238a
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import gradio as gr
import subprocess
import os

from huggingface_hub import HfApi, snapshot_download
from gradio_huggingfacehub_search import HuggingfaceHubSearch

from apscheduler.schedulers.background import BackgroundScheduler

HF_TOKEN = os.environ.get("HF_TOKEN")


def process_model(
    model_id: str,
    file_path: str,
    file_path_dropdown: str,
    key: str,
    value: str,
    oauth_token: gr.OAuthToken | None,
):
    if oauth_token.token is None:
        raise ValueError("You must be logged in to use gguf-metadata-updater")

    api = HfApi(token=oauth_token.token)

    MODEL_NAME = model_id.split("/")[-1]

    if file_path_dropdown:
        FILE_PATH = file_path_dropdown
    else:
        FILE_PATH = file_path

    FILE_NAME = file_path.split("/")[-1]

    api.snapshot_download(
        repo_id=model_id,
        allow_patterns=file_path,
        local_dir=f"{MODEL_NAME}",
    )
    print("Model downloaded successully!")

    metadata_update = f"yes | python3 llama.cpp/gguf-py/scripts/gguf_set_metadata.py {MODEL_NAME}/{FILE_PATH} {key} {value}"
    subprocess.run(metadata_update, shell=True)
    print(f"Model metadata {key} updated to {value} successully!")

    # Upload gguf files
    api.upload_folder(
        folder_path=MODEL_NAME,
        repo_id=model_id,
        allow_patterns=["*.gguf"],
    )
    print("Uploaded successfully!")

    return "Processing complete."


with gr.Blocks() as demo:
    gr.Markdown("You must be logged in to use GGUF metadata updated.")
    gr.LoginButton(min_width=250)

    model_id = HuggingfaceHubSearch(
        label="Hub Model ID",
        placeholder="Search for model id on Huggingface",
        search_type="model",
    )

    file_path = gr.Textbox(lines=1, label="File path")

    file_path_dropdown = gr.Dropdown(["None"], label="File", visible=False)

    key = gr.Textbox(lines=1, label="Key")

    value = gr.Textbox(lines=1, label="Value")

    iface = gr.Interface(
        fn=process_model,
        inputs=[model_id, file_path, file_path_dropdown, key, value],
        outputs=[
            gr.Markdown(label="output"),
            gr.Image(show_label=False),
        ],
        title="Update metadata for a GGUF file",
        description="The space takes an HF repo, a file within that repo, a metadata key, and new metadata value to update it to.",
        api_name=False,
    )

    def updateFilePath(model_id: HuggingfaceHubSearch):
        try:
            api = HfApi()
            files = []
            for file in api.list_repo_tree(
                repo_id=model_id,
                recursive=True,
            ):
                files.append(file.path)

            return gr.update(visible=False), gr.update(visible=True, choices=files)
        except Exception:
            return gr.update(visible=True), gr.update(visible=False)

    model_id.change(
        fn=updateFilePath, inputs=model_id, outputs=[file_path, file_path_dropdown]
    )


def restart_space():
    HfApi().restart_space(
        repo_id="bartowski/gguf-metadata-updated", token=HF_TOKEN, factory_reboot=True
    )


scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=21600)
scheduler.start()

# Launch the interface
demo.queue(default_concurrency_limit=1, max_size=5).launch(debug=True, show_api=False)