Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, Request | |
| from transformers import AutoTokenizer, BertForSequenceClassification, BertConfig | |
| from huggingface_hub import hf_hub_download | |
| import torch | |
| import numpy as np | |
| import pickle | |
| import sys | |
| import collections | |
| import os # os λͺ¨λ μν¬νΈ | |
| import psutil # λ©λͺ¨λ¦¬ μ¬μ©λ νμΈμ μν΄ psutil μν¬νΈ (requirements.txtμ μΆκ° νμ) | |
| app = FastAPI() | |
| device = torch.device("cpu") | |
| # category.pkl λ‘λ | |
| try: | |
| with open("category.pkl", "rb") as f: | |
| category = pickle.load(f) | |
| print("category.pkl λ‘λ μ±κ³΅.") | |
| except FileNotFoundError: | |
| print("Error: category.pkl νμΌμ μ°Ύμ μ μμ΅λλ€. νλ‘μ νΈ λ£¨νΈμ μλμ§ νμΈνμΈμ.") | |
| sys.exit(1) | |
| # ν ν¬λμ΄μ λ‘λ | |
| tokenizer = AutoTokenizer.from_pretrained("skt/kobert-base-v1") | |
| print("ν ν¬λμ΄μ λ‘λ μ±κ³΅.") | |
| HF_MODEL_REPO_ID = "hiddenFront/TextClassifier" | |
| HF_MODEL_FILENAME = "textClassifierModel.pt" | |
| # --- λ©λͺ¨λ¦¬ μ¬μ©λ λ‘κΉ μμ --- | |
| process = psutil.Process(os.getpid()) | |
| mem_before_model_download = process.memory_info().rss / (1024 * 1024) # MB λ¨μ | |
| print(f"λͺ¨λΈ λ€μ΄λ‘λ μ λ©λͺ¨λ¦¬ μ¬μ©λ: {mem_before_model_download:.2f} MB") | |
| # --- λ©λͺ¨λ¦¬ μ¬μ©λ λ‘κΉ λ --- | |
| try: | |
| model_path = hf_hub_download(repo_id=HF_MODEL_REPO_ID, filename=HF_MODEL_FILENAME) | |
| print(f"λͺ¨λΈ νμΌμ΄ '{model_path}'μ μ±κ³΅μ μΌλ‘ λ€μ΄λ‘λλμμ΅λλ€.") | |
| # --- λ©λͺ¨λ¦¬ μ¬μ©λ λ‘κΉ μμ --- | |
| mem_after_model_download = process.memory_info().rss / (1024 * 1024) # MB λ¨μ | |
| print(f"λͺ¨λΈ λ€μ΄λ‘λ ν λ©λͺ¨λ¦¬ μ¬μ©λ: {mem_after_model_download:.2f} MB") | |
| # --- λ©λͺ¨λ¦¬ μ¬μ©λ λ‘κΉ λ --- | |
| # 1. λͺ¨λΈ μν€ν μ² μ μ (κ°μ€μΉλ λ‘λνμ§ μκ³ κ΅¬μ‘°λ§ μ΄κΈ°ν) | |
| config = BertConfig.from_pretrained("skt/kobert-base-v1", num_labels=len(category)) | |
| model = BertForSequenceClassification(config) | |
| # 2. λ€μ΄λ‘λλ νμΌμμ state_dictλ₯Ό λ‘λ | |
| loaded_state_dict = torch.load(model_path, map_location=device) | |
| # 3. λ‘λλ state_dictλ₯Ό μ μλ λͺ¨λΈμ μ μ© | |
| new_state_dict = collections.OrderedDict() | |
| for k, v in loaded_state_dict.items(): | |
| name = k | |
| if name.startswith('module.'): | |
| name = name[7:] | |
| new_state_dict[name] = v | |
| model.load_state_dict(new_state_dict) | |
| # --- λ©λͺ¨λ¦¬ μ¬μ©λ λ‘κΉ μμ --- | |
| mem_after_model_load = process.memory_info().rss / (1024 * 1024) # MB λ¨μ | |
| print(f"λͺ¨λΈ λ‘λ λ° state_dict μ μ© ν λ©λͺ¨λ¦¬ μ¬μ©λ: {mem_after_model_load:.2f} MB") | |
| # --- λ©λͺ¨λ¦¬ μ¬μ©λ λ‘κΉ λ --- | |
| model.eval() | |
| print("λͺ¨λΈ λ‘λ μ±κ³΅.") | |
| except Exception as e: | |
| print(f"Error: λͺ¨λΈ λ€μ΄λ‘λ λλ λ‘λ μ€ μ€λ₯ λ°μ: {e}") | |
| sys.exit(1) | |
| async def predict_api(request: Request): | |
| data = await request.json() | |
| text = data.get("text") | |
| if not text: | |
| return {"error": "No text provided", "classification": "null"} | |
| encoded = tokenizer.encode_plus( | |
| text, max_length=64, padding='max_length', truncation=True, return_tensors='pt' | |
| ) | |
| with torch.no_grad(): | |
| outputs = model(**encoded) | |
| probs = torch.nn.functional.softmax(outputs.logits, dim=1) | |
| predicted = torch.argmax(probs, dim=1).item() | |
| label = list(category.keys())[predicted] | |
| return {"text": text, "classification": label} | |