Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,11 +1,8 @@
|
|
|
|
|
| 1 |
import torch
|
| 2 |
from PIL import Image
|
| 3 |
from huggingface_hub import hf_hub_download
|
| 4 |
from transformers import VisionEncoderDecoderModel
|
| 5 |
-
from fastapi import FastAPI, File, UploadFile
|
| 6 |
-
from fastapi.responses import HTMLResponse
|
| 7 |
-
from fastapi.staticfiles import StaticFiles
|
| 8 |
-
from fastapi.templating import Jinja2Templates
|
| 9 |
|
| 10 |
|
| 11 |
import warnings
|
|
@@ -80,31 +77,17 @@ tokenizer = MBartTokenizer.from_pretrained(
|
|
| 80 |
)
|
| 81 |
processortext2 = CustomOCRProcessor(image_processor,tokenizer)
|
| 82 |
|
|
|
|
|
|
|
| 83 |
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
templates = Jinja2Templates(directory="templates")
|
| 87 |
-
import os
|
| 88 |
-
huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
|
| 89 |
-
# Download and load the model
|
| 90 |
-
model2 = VisionEncoderDecoderModel.from_pretrained("musadac/vilanocr-single-urdu",use_auth_token=huggingface_token)
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
@app.get("/", response_class=HTMLResponse)
|
| 94 |
-
async def root():
|
| 95 |
-
return templates.TemplateResponse("index.html", {"request": None})
|
| 96 |
-
|
| 97 |
-
@app.post("/upload/", response_class=HTMLResponse)
|
| 98 |
-
async def upload_image(image: UploadFile = File(...)):
|
| 99 |
-
# Preprocess image
|
| 100 |
-
img = Image.open(image.file).convert("RGB")
|
| 101 |
pixel_values = processortext2(img.convert("RGB"), return_tensors="pt").pixel_values
|
| 102 |
|
| 103 |
-
# Run the model
|
| 104 |
with torch.no_grad():
|
| 105 |
generated_ids = model2.generate(img_tensor)
|
| 106 |
|
| 107 |
-
# Extract OCR result
|
| 108 |
result = processortext2.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
|
|
|
|
|
|
| 109 |
|
| 110 |
-
return {"result": result}
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
import torch
|
| 3 |
from PIL import Image
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
from transformers import VisionEncoderDecoderModel
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
import warnings
|
|
|
|
| 77 |
)
|
| 78 |
processortext2 = CustomOCRProcessor(image_processor,tokenizer)
|
| 79 |
|
| 80 |
+
st.title("Image OCR with musadac/vilanocr")
|
| 81 |
+
uploaded_file = st.file_uploader("Choose an image", type=["jpg", "jpeg", "png"])
|
| 82 |
|
| 83 |
+
if uploaded_file is not None:
|
| 84 |
+
img = Image.open(uploaded_file).convert("RGB")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
pixel_values = processortext2(img.convert("RGB"), return_tensors="pt").pixel_values
|
| 86 |
|
|
|
|
| 87 |
with torch.no_grad():
|
| 88 |
generated_ids = model2.generate(img_tensor)
|
| 89 |
|
|
|
|
| 90 |
result = processortext2.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 91 |
+
st.write("OCR Result:")
|
| 92 |
+
st.write(result)
|
| 93 |
|
|
|