Spaces:
Sleeping
Sleeping
| import torch | |
| from torch import Tensor, nn | |
| import gradio as gr | |
| from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer | |
| class HFEmbedder(nn.Module): | |
| def __init__(self, version: str, max_length: int, **hf_kwargs): | |
| super().__init__() | |
| self.is_clip = version.startswith("openai") | |
| self.max_length = max_length | |
| self.output_key = "pooler_output" if self.is_clip else "last_hidden_state" | |
| if self.is_clip: | |
| self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length) | |
| self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs) | |
| else: | |
| self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length) | |
| self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs) | |
| self.hf_module = self.hf_module.eval().requires_grad_(False) | |
| def forward(self, text: list[str]) -> Tensor: | |
| batch_encoding = self.tokenizer( | |
| text, | |
| truncation=True, | |
| max_length=self.max_length, | |
| return_length=False, | |
| return_overflowing_tokens=False, | |
| padding="max_length", | |
| return_tensors="pt", | |
| ) | |
| outputs = self.hf_module( | |
| input_ids=batch_encoding["input_ids"].to(self.hf_module.device), | |
| attention_mask=None, | |
| output_hidden_states=False, | |
| ) | |
| return outputs[self.output_key] | |
| def load_t5(device: str | torch.device = "cuda", max_length: int = 512) -> HFEmbedder: | |
| # max length 64, 128, 256 and 512 should work (if your sequence is short enough) | |
| return HFEmbedder("city96/t5-v1_1-xxl-encoder-bf16", max_length=max_length, torch_dtype=torch.bfloat16).to(device) | |
| def run_t5_and_save(text): | |
| try: | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"使用设备: {device}") | |
| t5 = load_t5(device, max_length=512) | |
| embeddings = t5([text]) | |
| filename = text.replace(" ", "-") | |
| output_path = f"/tmp/embedt5_{filename}.pt" | |
| torch.save(embeddings, output_path) | |
| return f"嵌入形状: {embeddings.shape}", output_path | |
| except Exception as e: | |
| return f"运行错误: {e}", None | |
| if __name__ == "__main__": | |
| iface = gr.Interface( | |
| fn=run_t5_and_save, | |
| inputs=gr.Textbox(label="输入文本"), | |
| outputs=[gr.Textbox(label="结果"), gr.File(label="下载嵌入文件")], | |
| title="T5 Embedder", | |
| description="输入文本,生成 T5 嵌入并保存为文件" | |
| ) | |
| iface.launch() |