Spaces:
Sleeping
Sleeping
Update app.py
Browse filesmodel selection
app.py
CHANGED
|
@@ -45,6 +45,20 @@ elif page == "Invoices to Table":
|
|
| 45 |
index=3,
|
| 46 |
key="token_selector_sidebar"
|
| 47 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
uploaded_file = st.file_uploader("π€ Upload Invoice Image", type=["jpg", "jpeg", "png"])
|
| 50 |
col1, col2 = st.columns([1.5, 1]) # col1 = processing, col2 = image
|
|
@@ -68,7 +82,7 @@ elif page == "Invoices to Table":
|
|
| 68 |
st.text_area("OCR Text", text, height=300)
|
| 69 |
|
| 70 |
with st.spinner("π Extracting Table..."):
|
| 71 |
-
table_md = extract_table_from_text(text,max_tokens=selected_token_limit)
|
| 72 |
|
| 73 |
if st.button("π§ Parse Table"):
|
| 74 |
try:
|
|
|
|
| 45 |
index=3,
|
| 46 |
key="token_selector_sidebar"
|
| 47 |
)
|
| 48 |
+
# Model selector
|
| 49 |
+
st.sidebar.markdown("### π€ Choose LLM Model")
|
| 50 |
+
selected_model = st.sidebar.radio(
|
| 51 |
+
"Which model to use?",
|
| 52 |
+
options=[
|
| 53 |
+
"meta-llama/llama-4-maverick-17b-128e-instruct",
|
| 54 |
+
"meta-llama/llama-4-scout-17b-16e-instruct",
|
| 55 |
+
"qwen/qwen3-32b",
|
| 56 |
+
"llama-3.3-70b-versatile",
|
| 57 |
+
"gemma2-9b-it"
|
| 58 |
+
],
|
| 59 |
+
index=1,
|
| 60 |
+
key="model_selector_sidebar"
|
| 61 |
+
)
|
| 62 |
|
| 63 |
uploaded_file = st.file_uploader("π€ Upload Invoice Image", type=["jpg", "jpeg", "png"])
|
| 64 |
col1, col2 = st.columns([1.5, 1]) # col1 = processing, col2 = image
|
|
|
|
| 82 |
st.text_area("OCR Text", text, height=300)
|
| 83 |
|
| 84 |
with st.spinner("π Extracting Table..."):
|
| 85 |
+
table_md = extract_table_from_text(text,max_tokens=selected_token_limit,model=selected_model)
|
| 86 |
|
| 87 |
if st.button("π§ Parse Table"):
|
| 88 |
try:
|