|
|
import os |
|
|
import streamlit as st |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import streamlit as st |
|
|
import os |
|
|
import json |
|
|
|
|
|
|
|
|
SITE_DB_FILE = "sites_db.json" |
|
|
|
|
|
|
|
|
if "sites" not in st.session_state: |
|
|
|
|
|
if os.path.exists(SITE_DB_FILE): |
|
|
try: |
|
|
with open(SITE_DB_FILE, "r") as f: |
|
|
st.session_state["sites"] = json.load(f) |
|
|
except Exception as e: |
|
|
st.error(f"β οΈ Could not load sites database: {e}") |
|
|
st.session_state["sites"] = [] |
|
|
else: |
|
|
st.session_state["sites"] = [] |
|
|
|
|
|
if "active_site_idx" not in st.session_state: |
|
|
st.session_state["active_site_idx"] = None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_secret(key: str, required: bool = True, default: str = None): |
|
|
""" |
|
|
Load secret keys (Groq API, Earth Engine, etc.). |
|
|
1. st.secrets |
|
|
2. os.environ |
|
|
3. default (if given) |
|
|
""" |
|
|
value = None |
|
|
try: |
|
|
if key in st.secrets: |
|
|
value = st.secrets[key] |
|
|
elif key in os.environ: |
|
|
value = os.environ[key] |
|
|
elif default is not None: |
|
|
value = default |
|
|
elif required: |
|
|
st.error(f"β Missing required secret: {key}") |
|
|
except Exception as e: |
|
|
st.error(f"β οΈ Error loading secret `{key}`: {e}") |
|
|
return value |
|
|
|
|
|
|
|
|
GROQ_API_KEY = load_secret("GROQ_API_KEY") |
|
|
SERVICE_ACCOUNT = load_secret("SERVICE_ACCOUNT") |
|
|
EARTH_ENGINE_KEY = load_secret("EARTH_ENGINE_KEY", required=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def persist_sites(): |
|
|
"""Save sites to local JSON database""" |
|
|
try: |
|
|
with open(SITE_DB_FILE, "w") as f: |
|
|
json.dump(st.session_state["sites"], f, indent=2) |
|
|
except Exception as e: |
|
|
st.error(f"β οΈ Error saving sites: {e}") |
|
|
|
|
|
def get_active_site(): |
|
|
|
|
|
idx = st.session_state.get("active_site_idx", 0) |
|
|
|
|
|
|
|
|
sites = st.session_state.get("sites", []) |
|
|
|
|
|
|
|
|
if not sites: |
|
|
st.session_state["sites"] = [{"Site Name": "Site 1"}] |
|
|
st.session_state["active_site_idx"] = 0 |
|
|
return st.session_state["sites"][0] |
|
|
|
|
|
|
|
|
if idx < 0 or idx >= len(sites): |
|
|
st.session_state["active_site_idx"] = 0 |
|
|
idx = 0 |
|
|
|
|
|
return sites[idx] |
|
|
|
|
|
|
|
|
def save_active_site(site_data): |
|
|
sites = st.session_state.get("sites", []) |
|
|
|
|
|
if not sites: |
|
|
st.session_state["sites"] = [site_data] |
|
|
st.session_state["active_site_idx"] = 0 |
|
|
else: |
|
|
idx = st.session_state.get("active_site_idx", 0) |
|
|
if idx < 0 or idx >= len(sites): |
|
|
idx = 0 |
|
|
st.session_state["active_site_idx"] = 0 |
|
|
st.session_state["sites"][idx] = site_data |
|
|
|
|
|
|
|
|
def create_new_site(name: str): |
|
|
"""Create new site with maximum soil details and set active""" |
|
|
new_site = { |
|
|
"name": name, |
|
|
"Soil Profile": None, |
|
|
"USCS Classification": None, |
|
|
"AASHTO Classification": None, |
|
|
"Soil Recognizer Confidence": None, |
|
|
"Region": None, |
|
|
"Moisture Content (%)": None, |
|
|
"Dry Density (kN/mΒ³)": None, |
|
|
"Saturation (%)": None, |
|
|
"Void Ratio": None, |
|
|
"Porosity (%)": None, |
|
|
"Plastic Limit (%)": None, |
|
|
"Liquid Limit (%)": None, |
|
|
"Plasticity Index (%)": None, |
|
|
"Cohesion (kPa)": None, |
|
|
"Angle of Internal Friction (Ο, degrees)": None, |
|
|
"Permeability (m/s)": None, |
|
|
"Compression Index (Cc)": None, |
|
|
"Recompression Index (Cr)": None, |
|
|
"Bearing Capacity (kN/mΒ²)": None, |
|
|
"Settlement (mm)": None, |
|
|
"Slope Stability Factor of Safety": None, |
|
|
"Compaction Optimum Moisture Content (%)": None, |
|
|
"Compaction Maximum Dry Density (kN/mΒ³)": None, |
|
|
"Seepage Analysis Notes": None, |
|
|
"Consolidation Notes": None, |
|
|
"Engineering Recommendations": [], |
|
|
"LLM Insights": [], |
|
|
"Notes": "", |
|
|
} |
|
|
st.session_state["sites"].append(new_site) |
|
|
st.session_state["active_site_idx"] = len(st.session_state["sites"]) - 1 |
|
|
persist_sites() |
|
|
return new_site |
|
|
|
|
|
def list_sites(): |
|
|
"""Return list of all site names""" |
|
|
return [site["name"] for site in st.session_state["sites"]] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
PAGES = { |
|
|
"π Home": "home", |
|
|
"πΌοΈ Soil Recognizer": "soil_recognizer", |
|
|
"π Soil Classifier": "soil_classifier", |
|
|
"π€ RAG Chatbot": "rag_chatbot", |
|
|
"πΊοΈ Maps": "maps", |
|
|
"π PDF Export": "pdf_export", |
|
|
"π¬ Feedback": "feedback" |
|
|
} |
|
|
|
|
|
def sidebar_navigation(): |
|
|
st.sidebar.title("π GeoMate Navigation") |
|
|
|
|
|
|
|
|
st.sidebar.subheader("ποΈ Site Manager") |
|
|
sites = list_sites() |
|
|
|
|
|
if sites: |
|
|
selected = st.sidebar.selectbox( |
|
|
"Select Active Site", |
|
|
options=range(len(sites)), |
|
|
format_func=lambda i: sites[i], |
|
|
index=st.session_state.get("active_site_idx") or 0 |
|
|
) |
|
|
if selected is not None: |
|
|
st.session_state["active_site_idx"] = selected |
|
|
site = get_active_site() |
|
|
st.sidebar.success(f"Active Site: {site['name']}") |
|
|
|
|
|
if st.sidebar.button("ποΈ Delete Active Site"): |
|
|
idx = st.session_state.get("active_site_idx") |
|
|
if idx is not None and idx < len(st.session_state["sites"]): |
|
|
deleted_name = st.session_state["sites"][idx]["name"] |
|
|
st.session_state["sites"].pop(idx) |
|
|
st.session_state["active_site_idx"] = None |
|
|
persist_sites() |
|
|
st.sidebar.warning(f"Deleted site: {deleted_name}") |
|
|
|
|
|
else: |
|
|
st.sidebar.info("No sites available. Create one below.") |
|
|
|
|
|
with st.sidebar.expander("β Create New Site"): |
|
|
new_name = st.text_input("Enter new site name") |
|
|
if st.button("Create Site"): |
|
|
if new_name.strip(): |
|
|
new_site = create_new_site(new_name.strip()) |
|
|
st.sidebar.success(f"β
Created new site: {new_site['name']}") |
|
|
else: |
|
|
st.sidebar.error("Please enter a valid site name.") |
|
|
|
|
|
st.sidebar.markdown("---") |
|
|
|
|
|
|
|
|
st.sidebar.subheader("π Pages") |
|
|
page_choice = st.sidebar.radio( |
|
|
"Go to", |
|
|
list(PAGES.keys()) |
|
|
) |
|
|
return PAGES[page_choice] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def site_details_panel(): |
|
|
st.subheader("π Active Site Details") |
|
|
site = get_active_site() |
|
|
if not site: |
|
|
st.info("No active site selected. Please create or select one from the sidebar.") |
|
|
return |
|
|
|
|
|
|
|
|
site["location"] = st.text_input("π Location", value=site.get("location", "")) |
|
|
site["Soil Profile"] = st.text_input("π§± Soil Profile", value=site.get("Soil Profile", "")) |
|
|
site["Depth (m)"] = st.number_input("π Depth (m)", value=float(site.get("Depth (m)", 0.0))) |
|
|
site["Moisture Content (%)"] = st.number_input("π§ Moisture Content (%)", value=float(site.get("Moisture Content (%)", 0.0))) |
|
|
site["Dry Density (kN/mΒ³)"] = st.number_input("ποΈ Dry Density (kN/mΒ³)", value=float(site.get("Dry Density (kN/mΒ³)", 0.0))) |
|
|
site["Liquid Limit (%)"] = st.number_input("π Liquid Limit (%)", value=float(site.get("Liquid Limit (%)", 0.0))) |
|
|
site["Plastic Limit (%)"] = st.number_input("π Plastic Limit (%)", value=float(site.get("Plastic Limit (%)", 0.0))) |
|
|
site["Grain Size (%)"] = st.number_input("π¬ Grain Size (%)", value=float(site.get("Grain Size (%)", 0.0))) |
|
|
|
|
|
if st.button("πΎ Save Site Details"): |
|
|
save_active_site(site) |
|
|
st.success("Site details updated successfully!") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torchvision.models as models |
|
|
import torchvision.transforms as T |
|
|
from PIL import Image |
|
|
import streamlit as st |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@st.cache_resource |
|
|
def load_soil_model(path="soil_best_model.pth"): |
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
try: |
|
|
model = models.resnet18(pretrained=False) |
|
|
num_ftrs = model.fc.in_features |
|
|
model.fc = nn.Linear(num_ftrs, 6) |
|
|
|
|
|
|
|
|
state_dict = torch.load(path, map_location=device) |
|
|
model.load_state_dict(state_dict) |
|
|
model = model.to(device) |
|
|
model.eval() |
|
|
return model, device |
|
|
except Exception as e: |
|
|
st.error(f"β οΈ Could not load soil model: {e}") |
|
|
return None, device |
|
|
|
|
|
soil_model, device = load_soil_model() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
SOIL_CLASSES = ["Clay", "Gravel", "Loam", "Peat", "Sand", "Silt"] |
|
|
|
|
|
transform = T.Compose([ |
|
|
T.Resize((224, 224)), |
|
|
T.ToTensor(), |
|
|
T.Normalize([0.485, 0.456, 0.406], |
|
|
[0.229, 0.224, 0.225]) |
|
|
]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def predict_soil(img: Image.Image): |
|
|
if soil_model is None: |
|
|
return "Model not loaded", {} |
|
|
|
|
|
img = img.convert("RGB") |
|
|
inp = transform(img).unsqueeze(0).to(device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
logits = soil_model(inp) |
|
|
probs = torch.softmax(logits[0], dim=0) |
|
|
|
|
|
top_idx = torch.argmax(probs).item() |
|
|
predicted_class = SOIL_CLASSES[top_idx] |
|
|
|
|
|
result = {SOIL_CLASSES[i]: float(probs[i]) for i in range(len(SOIL_CLASSES))} |
|
|
return predicted_class, result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def soil_recognizer_page(): |
|
|
st.header("πΌοΈ Soil Recognizer (ResNet18)") |
|
|
|
|
|
site = get_active_site() |
|
|
if site is None: |
|
|
st.warning("β οΈ No active site selected. Please add or select a site from the sidebar.") |
|
|
return |
|
|
|
|
|
uploaded = st.file_uploader("Upload soil image", type=["jpg", "jpeg", "png"]) |
|
|
if uploaded is not None: |
|
|
img = Image.open(uploaded) |
|
|
st.image(img, caption="Uploaded soil image", use_column_width=True) |
|
|
|
|
|
predicted_class, confidence_scores = predict_soil(img) |
|
|
st.success(f"β
Predicted: **{predicted_class}**") |
|
|
|
|
|
st.subheader("Confidence Scores") |
|
|
for cls, score in confidence_scores.items(): |
|
|
st.write(f"{cls}: {score:.2%}") |
|
|
|
|
|
if st.button("Save to site"): |
|
|
site["Soil Profile"] = predicted_class |
|
|
site["Soil Recognizer Confidence"] = confidence_scores[predicted_class] |
|
|
save_active_site(site) |
|
|
st.success("Saved prediction to active site memory.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import re |
|
|
import io |
|
|
import json |
|
|
from math import floor |
|
|
from typing import Dict, Any, Tuple |
|
|
from PIL import Image |
|
|
import pytesseract |
|
|
import requests |
|
|
import streamlit as st |
|
|
|
|
|
|
|
|
from reportlab.lib.pagesizes import A4 |
|
|
from reportlab.lib.units import mm |
|
|
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, Image as RLImage, PageBreak |
|
|
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle |
|
|
from reportlab.lib import colors |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_active_site(): |
|
|
idx = st.session_state.get("active_site_idx", 0) |
|
|
sites = st.session_state.get("sites", []) |
|
|
if 0 <= idx < len(sites): |
|
|
return sites[idx] |
|
|
|
|
|
if sites == []: |
|
|
st.session_state["sites"] = [{"Site Name": "Site 1"}] |
|
|
st.session_state["active_site_idx"] = 0 |
|
|
return st.session_state["sites"][0] |
|
|
return None |
|
|
|
|
|
def save_active_site(site: dict): |
|
|
idx = st.session_state.get("active_site_idx", 0) |
|
|
st.session_state["sites"][idx] = site |
|
|
st.session_state.modified = True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _readf(inputs: Dict[str,Any], *keys, default: float = 0.0) -> float: |
|
|
for k in keys: |
|
|
if k in inputs and inputs[k] is not None and inputs[k] != "": |
|
|
try: |
|
|
return float(inputs[k]) |
|
|
except Exception: |
|
|
try: |
|
|
return float(str(inputs[k]).replace("%","").strip()) |
|
|
except Exception: |
|
|
pass |
|
|
return default |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def classify_aashto_verbatim(inputs: Dict[str,Any]) -> Tuple[str, str, int, str]: |
|
|
""" |
|
|
Returns (ResultCode_str, description_str, GI_int, decision_path_str) |
|
|
Inputs keys expected: |
|
|
- P200 or P2 : percent passing sieve no.200 |
|
|
- P4 : percent passing sieve no.40 (your script uses 'P4' labelled that way) |
|
|
- P10 or P1 : percent passing sieve no.10 (optional) |
|
|
- LL, PL |
|
|
""" |
|
|
P2 = _readf(inputs, "P200", "P2") |
|
|
P4 = _readf(inputs, "P40", "P4") |
|
|
LL = _readf(inputs, "LL") |
|
|
PL = _readf(inputs, "PL") |
|
|
PI = LL - PL |
|
|
decision = [] |
|
|
def note(s): decision.append(s) |
|
|
|
|
|
note(f"Input AASHTO: P2={P2}, P4={P4}, LL={LL}, PL={PL}, PI={PI}") |
|
|
|
|
|
Result = None |
|
|
desc = "" |
|
|
|
|
|
|
|
|
if P2 <= 35: |
|
|
note("P2 <= 35% β Granular branch") |
|
|
if (P2 <= 15) and (P4 <= 30) and (PI <= 6): |
|
|
note("Condition matched: P2<=15 and P4<=30 and PI<=6 β need P10 to decide A-1-a") |
|
|
P1 = _readf(inputs, "P10", "P1") |
|
|
if P1 == 0: |
|
|
|
|
|
note("P10 not provided; cannot fully decide A-1-a. Returning tentative 'A-1-a(?)'") |
|
|
return "A-1-a(?)", "Candidate A-1-a (P10 missing).", 0, " -> ".join(decision) |
|
|
else: |
|
|
if P1 <= 50: |
|
|
Result = "A-1-a" |
|
|
desc = "Granular material with very good quality (A-1-a)." |
|
|
note("P10 <= 50 -> A-1-a") |
|
|
else: |
|
|
note("P10 > 50 -> inconsistent for A-1-a -> input check required") |
|
|
return "ERROR", "Inconsistent inputs for A-1-a (P10 > 50).", 0, " -> ".join(decision) |
|
|
elif (P2 <= 25) and (P4 <= 50) and (PI <= 6): |
|
|
Result = "A-1-b" |
|
|
desc = "Granular material (A-1-b)." |
|
|
note("P2 <= 25 and P4 <= 50 and PI <= 6 -> A-1-b") |
|
|
elif (P2 <= 35) and (P4 > 0): |
|
|
note("P2 <= 35 and P4 > 0 -> A-2 family branch") |
|
|
if LL <= 40 and PI <= 10: |
|
|
Result = "A-2-4" |
|
|
desc = "A-2-4: granular material with silt-like fines." |
|
|
note("LL <= 40 and PI <= 10 -> A-2-4") |
|
|
elif LL >= 41 and PI <= 10: |
|
|
Result = "A-2-5" |
|
|
desc = "A-2-5: granular with higher LL fines." |
|
|
note("LL >= 41 and PI <= 10 -> A-2-5") |
|
|
elif LL <= 40 and PI >= 11: |
|
|
Result = "A-2-6" |
|
|
desc = "A-2-6: granular with clay-like fines." |
|
|
note("LL <= 40 and PI >= 11 -> A-2-6") |
|
|
elif LL >= 41 and PI >= 11: |
|
|
Result = "A-2-7" |
|
|
desc = "A-2-7: granular with high plasticity fines." |
|
|
note("LL >= 41 and PI >= 11 -> A-2-7") |
|
|
else: |
|
|
Result = "A-2-?" |
|
|
desc = "A-2 family ambiguous - needs more data." |
|
|
note("A-2 branch ambigous.") |
|
|
else: |
|
|
Result = "A-3" |
|
|
desc = "A-3: clean sand." |
|
|
note("Else -> A-3 (clean sands)") |
|
|
else: |
|
|
|
|
|
note("P2 > 35% -> Fine (silt/clay) branch") |
|
|
if LL <= 40 and PI <= 10: |
|
|
Result = "A-4" |
|
|
desc = "A-4: silt of low LL/PI." |
|
|
note("LL <= 40 and PI <= 10 -> A-4") |
|
|
elif LL >= 41 and PI <= 10: |
|
|
Result = "A-5" |
|
|
desc = "A-5: elastic silt (higher LL but low PI)." |
|
|
note("LL >= 41 and PI <= 10 -> A-5") |
|
|
elif LL <= 40 and PI >= 11: |
|
|
Result = "A-6" |
|
|
desc = "A-6: clay of low LL and higher PI." |
|
|
note("LL <= 40 and PI >= 11 -> A-6") |
|
|
else: |
|
|
|
|
|
if PI <= (LL - 30): |
|
|
Result = "A-7-5" |
|
|
desc = "A-7-5: clay of intermediate plasticity." |
|
|
note("PI <= (LL - 30) -> A-7-5") |
|
|
elif PI > (LL - 30): |
|
|
Result = "A-7-6" |
|
|
desc = "A-7-6: clay of relatively higher plasticity." |
|
|
note("PI > (LL - 30) -> A-7-6") |
|
|
else: |
|
|
Result = "ERROR" |
|
|
desc = "Ambiguous A-7 branch." |
|
|
note("AASHTO A-7 branch ambiguous") |
|
|
|
|
|
|
|
|
a = P2 - 35 |
|
|
if a <= 40 and a >= 0: |
|
|
a_val = a |
|
|
elif a < 0: |
|
|
a_val = 0 |
|
|
else: |
|
|
a_val = 40 |
|
|
|
|
|
b = P2 - 15 |
|
|
if b <= 40 and b >= 0: |
|
|
b_val = b |
|
|
elif b < 0: |
|
|
b_val = 0 |
|
|
else: |
|
|
b_val = 40 |
|
|
|
|
|
c = LL - 40 |
|
|
if c <= 20 and c >= 0: |
|
|
c_val = c |
|
|
elif c < 0: |
|
|
c_val = 0 |
|
|
else: |
|
|
c_val = 20 |
|
|
|
|
|
d = PI - 10 |
|
|
if d <= 20 and d >= 0: |
|
|
d_val = d |
|
|
elif d < 0: |
|
|
d_val = 0 |
|
|
else: |
|
|
d_val = 20 |
|
|
|
|
|
GI = floor(0.2 * a_val + 0.005 * a_val * c_val + 0.01 * b_val * d_val) |
|
|
note(f"GI compute -> a={a_val}, b={b_val}, c={c_val}, d={d_val}, GI={GI}") |
|
|
|
|
|
decision_path = " -> ".join(decision) |
|
|
full_code = f"{Result} ({GI})" if Result not in [None, "ERROR", "A-1-a(?)"] else (Result if Result != "A-1-a(?)" else "A-1-a (?)") |
|
|
return full_code, desc, GI, decision_path |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def classify_uscs_verbatim(inputs: Dict[str,Any]) -> Tuple[str, str, str]: |
|
|
""" |
|
|
Returns (USCS_code_str, description_str, decision_path_str) |
|
|
Accepts inputs: |
|
|
- organic (bool or 'y'/'n') |
|
|
- P200 / P2 percent passing #200 |
|
|
- P4 : percent passing sieve no.4 (4.75 mm) |
|
|
- D60, D30, D10 (mm) |
|
|
- LL, PL |
|
|
- nDS, nDIL, nTG options for fines behaviour (integers) |
|
|
Implementation follows your original code's branches exactly. |
|
|
""" |
|
|
decision = [] |
|
|
def note(s): decision.append(s) |
|
|
|
|
|
organic = inputs.get("organic", False) |
|
|
if isinstance(organic, str): |
|
|
organic = organic.lower() in ("y","yes","true","1") |
|
|
|
|
|
if organic: |
|
|
note("Organic content indicated -> Pt") |
|
|
return "Pt", "Peat / Organic soil β compressible, poor engineering properties.", "Organic branch: Pt" |
|
|
|
|
|
P2 = _readf(inputs, "P200", "P2") |
|
|
note(f"P200 = {P2}%") |
|
|
|
|
|
if P2 <= 50: |
|
|
|
|
|
P4 = _readf(inputs, "P4", "P4_sieve", "P40") |
|
|
note(f"% passing #4 (P4) = {P4}%") |
|
|
op = inputs.get("d_values_provided", None) |
|
|
D60 = _readf(inputs, "D60") |
|
|
D30 = _readf(inputs, "D30") |
|
|
D10 = _readf(inputs, "D10") |
|
|
if D60 != 0 and D30 != 0 and D10 != 0: |
|
|
Cu = (D60 / D10) if D10 != 0 else 0 |
|
|
Cc = ((D30 ** 2) / (D10 * D60)) if (D10 * D60) != 0 else 0 |
|
|
note(f"D-values present -> D60={D60}, D30={D30}, D10={D10}, Cu={Cu}, Cc={Cc}") |
|
|
else: |
|
|
Cu = 0 |
|
|
Cc = 0 |
|
|
note("D-values missing or incomplete -> using Atterberg/fines-based branches") |
|
|
|
|
|
LL = _readf(inputs, "LL") |
|
|
PL = _readf(inputs, "PL") |
|
|
PI = LL - PL |
|
|
note(f"LL={LL}, PL={PL}, PI={PI}") |
|
|
|
|
|
|
|
|
if P4 <= 50: |
|
|
note("P4 <= 50 -> Gravel family") |
|
|
if (Cu != 0) and (Cc != 0): |
|
|
if (Cu >= 4) and (1 <= Cc <= 3): |
|
|
note("Cu >=4 and 1<=Cc<=3 -> GW") |
|
|
return "GW", "Well-graded gravel with excellent load-bearing capacity.", "GW via Cu/Cc" |
|
|
elif not ((Cu < 4) and (1 <= Cc <= 3)): |
|
|
note("Cu <4 or Cc out of 1..3 -> GP") |
|
|
return "GP", "Poorly-graded gravel.", "GP via Cu/Cc" |
|
|
else: |
|
|
|
|
|
if (PI < 4) or (PI < 0.73 * (LL - 20)): |
|
|
note("PI < 4 or PI < 0.73*(LL-20) -> GM") |
|
|
return "GM", "Silty gravel with moderate properties.", "GM via fines" |
|
|
elif (PI > 7) and (PI > 0.73 * (LL - 20)): |
|
|
note("PI > 7 and PI > 0.73*(LL-20) -> GC") |
|
|
return "GC", "Clayey gravel β reduced drainage.", "GC via fines" |
|
|
else: |
|
|
note("Intermediate fines -> GM-GC") |
|
|
return "GM-GC", "Mixed silt/clay in gravel β variable.", "GM-GC via fines" |
|
|
else: |
|
|
|
|
|
note("P4 > 50 -> Sand family") |
|
|
if (Cu != 0) and (Cc != 0): |
|
|
if (Cu >= 6) and (1 <= Cc <= 3): |
|
|
note("Cu >= 6 and 1 <= Cc <= 3 -> SW") |
|
|
return "SW", "Well-graded sand with good engineering behavior.", "SW via Cu/Cc" |
|
|
elif not ((Cu < 6) and (1 <= Cc <= 3)): |
|
|
note("Cu <6 or Cc out of 1..3 -> SP") |
|
|
return "SP", "Poorly-graded sand.", "SP via Cu/Cc" |
|
|
else: |
|
|
if (PI < 4) or (PI <= 0.73 * (LL - 20)): |
|
|
note("PI < 4 or PI <= 0.73*(LL-20) -> SM") |
|
|
return "SM", "Silty sand β moderate engineering quality.", "SM via fines" |
|
|
elif (PI > 7) and (PI > 0.73 * (LL - 20)): |
|
|
note("PI > 7 and PI > 0.73*(LL-20) -> SC") |
|
|
return "SC", "Clayey sand β reduced permeability and strength.", "SC via fines" |
|
|
else: |
|
|
note("Intermediate -> SM-SC") |
|
|
return "SM-SC", "Sand mixed with fines (silt/clay).", "SM-SC via fines" |
|
|
else: |
|
|
|
|
|
note("P200 > 50 -> Fine-grained path") |
|
|
LL = _readf(inputs, "LL") |
|
|
PL = _readf(inputs, "PL") |
|
|
PI = LL - PL |
|
|
note(f"LL={LL}, PL={PL}, PI={PI}") |
|
|
|
|
|
|
|
|
nDS = int(_readf(inputs, "nDS", default=0)) |
|
|
nDIL = int(_readf(inputs, "nDIL", default=0)) |
|
|
nTG = int(_readf(inputs, "nTG", default=0)) |
|
|
note(f"Behavior options (nDS,nDIL,nTG) = ({nDS},{nDIL},{nTG})") |
|
|
|
|
|
|
|
|
if LL < 50: |
|
|
note("LL < 50 -> low plasticity branch") |
|
|
if (20 <= LL < 50) and (PI <= 0.73 * (LL - 20)): |
|
|
note("20 <= LL < 50 and PI <= 0.73*(LL-20)") |
|
|
if (nDS == 1) or (nDIL == 3) or (nTG == 3): |
|
|
note("-> ML") |
|
|
return "ML", "Silt of low plasticity.", "ML via LL/PI/observations" |
|
|
elif (nDS == 3) or (nDIL == 3) or (nTG == 3): |
|
|
note("-> OL (organic silt)") |
|
|
return "OL", "Organic silt β compressible.", "OL via observations" |
|
|
else: |
|
|
note("-> ML-OL (ambiguous)") |
|
|
return "ML-OL", "Mixed silt/organic.", "ML-OL via ambiguity" |
|
|
elif (10 <= LL <= 30) and (4 <= PI <= 7) and (PI > 0.72 * (LL - 20)): |
|
|
note("10 <= LL <=30 and 4<=PI<=7 and PI > 0.72*(LL-20)") |
|
|
if (nDS == 1) or (nDIL == 1) or (nTG == 1): |
|
|
note("-> ML") |
|
|
return "ML", "Low plasticity silt", "ML via specific conditions" |
|
|
elif (nDS == 2) or (nDIL == 2) or (nTG == 2): |
|
|
note("-> CL") |
|
|
return "CL", "Low plasticity clay", "CL via specific conditions" |
|
|
else: |
|
|
note("-> ML-CL (ambiguous)") |
|
|
return "ML-CL", "Mixed ML/CL", "ML-CL via ambiguity" |
|
|
else: |
|
|
note("Default low-plasticity branch -> CL") |
|
|
return "CL", "Low plasticity clay", "CL default" |
|
|
else: |
|
|
|
|
|
note("LL >= 50 -> high plasticity branch") |
|
|
if PI < 0.73 * (LL - 20): |
|
|
note("PI < 0.73*(LL-20)") |
|
|
if (nDS == 3) or (nDIL == 4) or (nTG == 4): |
|
|
note("-> MH") |
|
|
return "MH", "Elastic silt (high LL)", "MH via observations" |
|
|
elif (nDS == 2) or (nDIL == 2) or (nTG == 4): |
|
|
note("-> OH") |
|
|
return "OH", "Organic high plasticity silt/clay", "OH via observations" |
|
|
else: |
|
|
note("-> MH-OH (ambiguous)") |
|
|
return "MH-OH", "Mixed MH/OH", "MH-OH via ambiguity" |
|
|
else: |
|
|
note("PI >= 0.73*(LL-20) -> CH") |
|
|
return "CH", "High plasticity clay β compressible, problematic for foundations.", "CH default high-PL" |
|
|
|
|
|
note("Fell through branches -> UNCLASSIFIED") |
|
|
return "UNCLASSIFIED", "Insufficient data for USCS classification.", "No valid decision path" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ENGINEERING_TABLE = { |
|
|
"Gravel": { |
|
|
"Settlement": "None", |
|
|
"Quicksand": "Impossible", |
|
|
"Frost": "None", |
|
|
"Groundwater lowering": "Possible", |
|
|
"Cement grouting": "Possible", |
|
|
"Silicate/bitumen": "Unsuitable", |
|
|
"Compressed air": "Possible (loss of air, slow progress)" |
|
|
}, |
|
|
"Coarse sand": { |
|
|
"Settlement": "None", |
|
|
"Quicksand": "Impossible", |
|
|
"Frost": "None", |
|
|
"Groundwater lowering": "Suitable", |
|
|
"Cement grouting": "Possible only if very coarse", |
|
|
"Silicate/bitumen": "Suitable", |
|
|
"Compressed air": "Suitable" |
|
|
}, |
|
|
"Medium sand": { |
|
|
"Settlement": "None", |
|
|
"Quicksand": "Unlikely", |
|
|
"Frost": "None", |
|
|
"Groundwater lowering": "Suitable", |
|
|
"Cement grouting": "Impossible", |
|
|
"Silicate/bitumen": "Suitable", |
|
|
"Compressed air": "Suitable" |
|
|
}, |
|
|
"Fine sand": { |
|
|
"Settlement": "None", |
|
|
"Quicksand": "Liable", |
|
|
"Frost": "None", |
|
|
"Groundwater lowering": "Suitable", |
|
|
"Cement grouting": "Impossible", |
|
|
"Silicate/bitumen": "Not possible in very fine sands", |
|
|
"Compressed air": "Suitable" |
|
|
}, |
|
|
"Silt": { |
|
|
"Settlement": "Occurs", |
|
|
"Quicksand": "Liable (coarse silts / silty sands)", |
|
|
"Frost": "Occurs", |
|
|
"Groundwater lowering": "Impossible (except electro-osmosis)", |
|
|
"Cement grouting": "Impossible", |
|
|
"Silicate/bitumen": "Impossible", |
|
|
"Compressed air": "Suitable" |
|
|
}, |
|
|
"Clay": { |
|
|
"Settlement": "Occurs", |
|
|
"Quicksand": "Impossible", |
|
|
"Frost": "None", |
|
|
"Groundwater lowering": "Impossible", |
|
|
"Cement grouting": "Only in stiff, fissured clay", |
|
|
"Silicate/bitumen": "Impossible", |
|
|
"Compressed air": "Used for support only (Glossop & Skempton)" |
|
|
} |
|
|
} |
|
|
|
|
|
def engineering_characteristics_from_uscs(uscs_code: str) -> Dict[str,str]: |
|
|
|
|
|
if uscs_code.startswith("G"): |
|
|
return ENGINEERING_TABLE["Gravel"] |
|
|
if uscs_code.startswith("S"): |
|
|
|
|
|
return ENGINEERING_TABLE["Medium sand"] |
|
|
if uscs_code in ("ML","MH","OL","OH"): |
|
|
return ENGINEERING_TABLE["Silt"] |
|
|
if uscs_code.startswith("C") or uscs_code == "CL" or uscs_code == "CH": |
|
|
return ENGINEERING_TABLE["Clay"] |
|
|
|
|
|
return {"Settlement":"Varies", "Quicksand":"Varies", "Frost":"Varies"} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def classify_all(inputs: Dict[str,Any]) -> Dict[str,Any]: |
|
|
""" |
|
|
Run both AASHTO & USCS verbatim logic and return a dictionary with: |
|
|
- AASHTO_code, AASHTO_desc, GI, AASHTO_decision_path |
|
|
- USCS_code, USCS_desc, USCS_decision_path |
|
|
- engineering_characteristics (dict) |
|
|
- engineering_summary (short deterministic summary) |
|
|
""" |
|
|
aashto_code, aashto_desc, GI, aashto_path = classify_aashto_verbatim(inputs) |
|
|
uscs_code, uscs_desc, uscs_path = classify_uscs_verbatim(inputs) |
|
|
|
|
|
eng_chars = engineering_characteristics_from_uscs(uscs_code) |
|
|
|
|
|
|
|
|
summary_lines = [] |
|
|
summary_lines.append(f"USCS: {uscs_code} β {uscs_desc}") |
|
|
summary_lines.append(f"AASHTO: {aashto_code} β {aashto_desc}") |
|
|
summary_lines.append(f"Group Index: {GI}") |
|
|
|
|
|
if uscs_code.startswith("C") or uscs_code in ("CH","CL"): |
|
|
summary_lines.append("Clayey behavior: expect significant compressibility, low permeability, potential long-term settlement β advisable to assess consolidation & use deep foundations for heavy loads.") |
|
|
elif uscs_code.startswith("G") or uscs_code.startswith("S"): |
|
|
summary_lines.append("Granular behavior: good drainage and bearing; suitable for shallow foundations/pavements when properly compacted.") |
|
|
elif uscs_code in ("ML","MH","OL","OH"): |
|
|
summary_lines.append("Silty/organic behavior: moderate-to-high compressibility; frost-susceptible; avoid as direct support for heavy structures without treatment.") |
|
|
else: |
|
|
summary_lines.append("Mixed or unclear behavior; recommend targeted lab testing and conservative design assumptions.") |
|
|
|
|
|
out = { |
|
|
"AASHTO_code": aashto_code, |
|
|
"AASHTO_description": aashto_desc, |
|
|
"GI": GI, |
|
|
"AASHTO_decision_path": aashto_path, |
|
|
"USCS_code": uscs_code, |
|
|
"USCS_description": uscs_desc, |
|
|
"USCS_decision_path": uscs_path, |
|
|
"engineering_characteristics": eng_chars, |
|
|
"engineering_summary": "\n".join(summary_lines) |
|
|
} |
|
|
return out |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def call_groq_for_explanation(prompt: str, model_name: str = "meta-llama/llama-4-maverick-17b-128e-instruct", max_tokens: int = 800) -> str: |
|
|
""" |
|
|
Use Groq client via REST if GROQ_API_KEY in st.secrets |
|
|
(Note: adapt to your Groq client wrapper if you have it) |
|
|
""" |
|
|
key = None |
|
|
|
|
|
if "GROQ_API_KEY" in st.secrets: |
|
|
key = st.secrets["GROQ_API_KEY"] |
|
|
else: |
|
|
key = st.session_state.get("GROQ_API_KEY") or None |
|
|
|
|
|
if not key: |
|
|
return "Groq API key not found. LLM humanized explanation not available." |
|
|
|
|
|
url = "https://api.groq.com/v1/chat/completions" |
|
|
headers = {"Authorization": f"Bearer {key}", "Content-Type":"application/json"} |
|
|
payload = { |
|
|
"model": model_name, |
|
|
"messages": [ |
|
|
{"role":"system","content":"You are GeoMate, a professional geotechnical engineering assistant."}, |
|
|
{"role":"user","content": prompt} |
|
|
], |
|
|
"temperature": 0.2, |
|
|
"max_tokens": max_tokens |
|
|
} |
|
|
try: |
|
|
resp = requests.post(url, headers=headers, json=payload, timeout=60) |
|
|
resp.raise_for_status() |
|
|
data = resp.json() |
|
|
|
|
|
if "choices" in data and len(data["choices"])>0: |
|
|
content = data["choices"][0].get("message", {}).get("content") or data["choices"][0].get("text") or str(data["choices"][0]) |
|
|
return content |
|
|
return json.dumps(data) |
|
|
except Exception as e: |
|
|
return f"LLM call failed: {e}" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def build_classification_pdf_bytes(site: Dict[str,Any], classification: Dict[str,Any], explanation_text: str) -> bytes: |
|
|
buf = io.BytesIO() |
|
|
doc = SimpleDocTemplate(buf, pagesize=A4, leftMargin=18*mm, rightMargin=18*mm, topMargin=18*mm, bottomMargin=18*mm) |
|
|
styles = getSampleStyleSheet() |
|
|
title_style = ParagraphStyle("title", parent=styles["Title"], fontSize=18, textColor=colors.HexColor("#FF6600"), alignment=1) |
|
|
h1 = ParagraphStyle("h1", parent=styles["Heading1"], fontSize=12, textColor=colors.HexColor("#FF6600")) |
|
|
body = ParagraphStyle("body", parent=styles["BodyText"], fontSize=10) |
|
|
|
|
|
elems = [] |
|
|
elems.append(Paragraph("GeoMate V2 β Classification Report", title_style)) |
|
|
elems.append(Spacer(1,6)) |
|
|
elems.append(Paragraph(f"Site: {site.get('Site Name','Unnamed')}", h1)) |
|
|
elems.append(Paragraph(f"Date: {st.datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC')}", body)) |
|
|
elems.append(Spacer(1,8)) |
|
|
|
|
|
|
|
|
elems.append(Paragraph("Laboratory Inputs", h1)) |
|
|
inputs = site.get("classifier_inputs", {}) |
|
|
if inputs: |
|
|
data = [["Parameter","Value"]] |
|
|
for k,v in inputs.items(): |
|
|
data.append([str(k), str(v)]) |
|
|
table = Table(data, colWidths=[80*mm, 80*mm]) |
|
|
table.setStyle(TableStyle([("GRID",(0,0),(-1,-1),0.5,colors.grey), ("BACKGROUND",(0,0),(-1,0),colors.HexColor("#FF6600")), ("TEXTCOLOR",(0,0),(-1,0),colors.white)])) |
|
|
elems.append(table) |
|
|
else: |
|
|
elems.append(Paragraph("No lab inputs recorded.", body)) |
|
|
elems.append(Spacer(1,8)) |
|
|
|
|
|
|
|
|
elems.append(Paragraph("Deterministic Classification Results", h1)) |
|
|
elems.append(Paragraph(f"USCS: {classification.get('USCS_code','N/A')} β {classification.get('USCS_description','')}", body)) |
|
|
elems.append(Paragraph(f"AASHTO: {classification.get('AASHTO_code','N/A')} β {classification.get('AASHTO_description','')}", body)) |
|
|
elems.append(Paragraph(f"Group Index: {classification.get('GI','N/A')}", body)) |
|
|
elems.append(Spacer(1,6)) |
|
|
elems.append(Paragraph("USCS decision path (verbatim):", h1)) |
|
|
elems.append(Paragraph(classification.get("USCS_decision_path","Not recorded"), body)) |
|
|
elems.append(Spacer(1,6)) |
|
|
elems.append(Paragraph("AASHTO decision path (verbatim):", h1)) |
|
|
elems.append(Paragraph(classification.get("AASHTO_decision_path","Not recorded"), body)) |
|
|
elems.append(Spacer(1,8)) |
|
|
|
|
|
|
|
|
elems.append(Paragraph("Engineering Characteristics (from reference table)", h1)) |
|
|
eng = classification.get("engineering_characteristics", {}) |
|
|
if eng: |
|
|
eng_data = [["Property","Value"]] |
|
|
for k,v in eng.items(): |
|
|
eng_data.append([k, v]) |
|
|
t2 = Table(eng_data, colWidths=[60*mm, 100*mm]) |
|
|
t2.setStyle(TableStyle([("GRID",(0,0),(-1,-1),0.5,colors.grey), ("BACKGROUND",(0,0),(-1,0),colors.HexColor("#FF6600")), ("TEXTCOLOR",(0,0),(-1,0),colors.white)])) |
|
|
elems.append(t2) |
|
|
elems.append(Spacer(1,8)) |
|
|
|
|
|
|
|
|
elems.append(Paragraph("Humanized Engineering Explanation (LLM)", h1)) |
|
|
if explanation_text: |
|
|
|
|
|
for para in explanation_text.strip().split("\n\n"): |
|
|
elems.append(Paragraph(para.strip().replace("\n"," "), body)) |
|
|
elems.append(Spacer(1,4)) |
|
|
else: |
|
|
elems.append(Paragraph("No LLM explanation available.", body)) |
|
|
|
|
|
|
|
|
if "map_snapshot" in site and site["map_snapshot"]: |
|
|
snap = site["map_snapshot"] |
|
|
|
|
|
if isinstance(snap, str) and snap.lower().endswith((".png",".jpg",".jpeg")) and os.path.exists(snap): |
|
|
elems.append(PageBreak()) |
|
|
elems.append(Paragraph("Map Snapshot", h1)) |
|
|
elems.append(RLImage(snap, width=160*mm, height=90*mm)) |
|
|
|
|
|
doc.build(elems) |
|
|
pdf_bytes = buf.getvalue() |
|
|
buf.close() |
|
|
return pdf_bytes |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def soil_classifier_page(): |
|
|
st.header("π§ Soil Classifier β USCS & AASHTO (Verbatim)") |
|
|
|
|
|
site = get_active_site() |
|
|
if site is None: |
|
|
st.warning("No active site. Add a site first in the sidebar.") |
|
|
return |
|
|
|
|
|
|
|
|
site.setdefault("classifier_inputs", {}) |
|
|
|
|
|
col1, col2 = st.columns([2,1]) |
|
|
with col1: |
|
|
st.markdown("**Upload lab sheet (image) for OCR** β the extracted values will auto-fill classifier inputs.") |
|
|
uploaded = st.file_uploader("Upload image (png/jpg)", type=["png","jpg","jpeg"], key="clf_ocr_upload") |
|
|
if uploaded: |
|
|
img = Image.open(uploaded) |
|
|
st.image(img, caption="Uploaded lab sheet (OCR)", use_column_width=True) |
|
|
try: |
|
|
raw_text = pytesseract.image_to_string(img) |
|
|
st.text_area("OCR raw text (preview)", raw_text, height=180) |
|
|
|
|
|
|
|
|
def find_first(pattern): |
|
|
m = re.search(pattern, raw_text, re.IGNORECASE) |
|
|
return float(m.group(1)) if m else None |
|
|
|
|
|
possible = {} |
|
|
for pat_key, pats in { |
|
|
"LL": [r"LL\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"Liquid\s*Limit\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"PL": [r"PL\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"Plastic\s*Limit\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"P200":[r"%\s*Passing\s*#?200\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"P200\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"Passing\s*0\.075\s*mm\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"P4":[r"%\s*Passing\s*#?4\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"P4\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"D60":[r"D60\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"D_{60}\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"D30":[r"D30\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"D10":[r"D10\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"] |
|
|
}.items(): |
|
|
for p in pats: |
|
|
v = find_first(p) |
|
|
if v is not None: |
|
|
possible[pat_key] = v |
|
|
break |
|
|
|
|
|
for k,v in possible.items(): |
|
|
site["classifier_inputs"][k] = v |
|
|
save_active_site(site) |
|
|
st.success(f"OCR auto-filled: {', '.join([f'{k}={v}' for k,v in possible.items()])}") |
|
|
except Exception as e: |
|
|
st.error(f"OCR parsing failed: {e}") |
|
|
|
|
|
st.markdown("**Or type soil parameters / paste lab line** (e.g. `LL=45 PL=22 P200=58 P4=12 D60=1.2 D30=0.45 D10=0.08`) β chat-style input below.") |
|
|
user_text = st.text_area("Enter parameters or notes", value="", key="clf_text_input", height=120) |
|
|
|
|
|
if st.button("Run Classification"): |
|
|
|
|
|
txt = user_text or "" |
|
|
|
|
|
kvs = dict(re.findall(r"([A-Za-z0-9_%]+)\s*[=:\-]\s*([0-9]+(?:\.[0-9]+)?)", txt)) |
|
|
|
|
|
norm = {} |
|
|
for k,v in kvs.items(): |
|
|
klow = k.strip().lower() |
|
|
if klow in ("ll","liquidlimit","liquid_limit","liquid"): |
|
|
norm["LL"] = float(v) |
|
|
elif klow in ("pl","plasticlimit","plastic_limit","plastic"): |
|
|
norm["PL"] = float(v) |
|
|
elif klow in ("pi","plasticityindex"): |
|
|
norm["PI"] = float(v) |
|
|
elif klow in ("p200","%200","p_200","passing200"): |
|
|
norm["P200"] = float(v) |
|
|
elif klow in ("p4","p_4","passing4"): |
|
|
norm["P4"] = float(v) |
|
|
elif klow in ("d60","d_60"): |
|
|
norm["D60"] = float(v) |
|
|
elif klow in ("d30","d_30"): |
|
|
norm["D30"] = float(v) |
|
|
elif klow in ("d10","d_10"): |
|
|
norm["D10"] = float(v) |
|
|
|
|
|
site["classifier_inputs"].update(norm) |
|
|
save_active_site(site) |
|
|
|
|
|
|
|
|
inputs_for_class = site["classifier_inputs"] |
|
|
|
|
|
result = classify_all(inputs_for_class) |
|
|
|
|
|
site["classification_report"] = result |
|
|
save_active_site(site) |
|
|
|
|
|
st.success("Deterministic classification complete.") |
|
|
st.markdown("**USCS result:** " + str(result.get("USCS_code"))) |
|
|
st.markdown("**AASHTO result:** " + str(result.get("AASHTO_code")) + f" (GI={result.get('GI')})") |
|
|
st.markdown("**Engineering summary (deterministic):**") |
|
|
st.info(result.get("engineering_summary")) |
|
|
|
|
|
|
|
|
prompt = f""" |
|
|
You are GeoMate, a professional geotechnical engineer assistant. |
|
|
Given the following laboratory inputs and deterministic classification, produce a clear, technical |
|
|
and human-friendly classification report, explaining what the soil is, how it behaves, engineering |
|
|
implications (bearing, settlement, stiffness), suitability for shallow foundations and road subgrades, |
|
|
and practical recommendations for site engineering. |
|
|
|
|
|
Site: {site.get('Site Name','Unnamed')} |
|
|
Inputs (as parsed): {json.dumps(site.get('classifier_inputs',{}), indent=2)} |
|
|
Deterministic classification results: |
|
|
USCS: {result.get('USCS_code')} |
|
|
USCS decision path: {result.get('USCS_decision_path')} |
|
|
AASHTO: {result.get('AASHTO_code')} |
|
|
AASHTO decision path: {result.get('AASHTO_decision_path')} |
|
|
Group Index: {result.get('GI')} |
|
|
Engineering characteristics reference table: {json.dumps(result.get('engineering_characteristics',{}), indent=2)} |
|
|
|
|
|
Provide: |
|
|
- Executive summary (3-5 sentences) |
|
|
- Engineering interpretation (detailed) |
|
|
- Specific recommendations (foundations, drainage, compaction, stabilization) |
|
|
- Short checklist of items for further testing. |
|
|
""" |
|
|
st.info("Generating humanized report via LLM (Groq) β this may take a few seconds.") |
|
|
explanation = call_groq_for_explanation(prompt) |
|
|
|
|
|
if explanation.startswith("LLM call failed") or explanation.startswith("Groq API key not found"): |
|
|
|
|
|
explanation = ("Humanized explanation not available via LLM. " |
|
|
"Deterministic summary: \n\n" + result.get("engineering_summary", "No summary.")) |
|
|
|
|
|
|
|
|
site.setdefault("reports", {}) |
|
|
site["reports"]["last_classification_explanation"] = explanation |
|
|
save_active_site(site) |
|
|
|
|
|
st.markdown("**Humanized Explanation (LLM or fallback):**") |
|
|
st.write(explanation) |
|
|
|
|
|
|
|
|
pdf_bytes = build_classification_pdf_bytes(site, result, explanation) |
|
|
st.download_button("Download Classification PDF", data=pdf_bytes, file_name=f"classification_{site.get('Site Name','site')}.pdf", mime="application/pdf") |
|
|
|
|
|
|
|
|
with col2: |
|
|
st.markdown("**Current parsed inputs**") |
|
|
st.json(site.get("classifier_inputs", {})) |
|
|
st.markdown("**Last deterministic classification (if any)**") |
|
|
st.json(site.get("classification_report", {})) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def call_groq_for_explanation(prompt: str, model_name: str = "meta-llama/llama-4-maverick-17b-128e-instruct", max_tokens: int = 800) -> str: |
|
|
""" |
|
|
Use Groq client via REST if GROQ_API_KEY in st.secrets |
|
|
(Note: adapt to your Groq client wrapper if you have it) |
|
|
""" |
|
|
key = None |
|
|
|
|
|
if "GROQ_API_KEY" in st.secrets: |
|
|
key = st.secrets["GROQ_API_KEY"] |
|
|
else: |
|
|
key = st.session_state.get("GROQ_API_KEY") or None |
|
|
|
|
|
if not key: |
|
|
return "Groq API key not found. LLM humanized explanation not available." |
|
|
|
|
|
url = "https://api.groq.com/v1/chat/completions" |
|
|
headers = {"Authorization": f"Bearer {key}", "Content-Type":"application/json"} |
|
|
payload = { |
|
|
"model": model_name, |
|
|
"messages": [ |
|
|
{"role":"system","content":"You are GeoMate, a professional geotechnical engineering assistant."}, |
|
|
{"role":"user","content": prompt} |
|
|
], |
|
|
"temperature": 0.2, |
|
|
"max_tokens": max_tokens |
|
|
} |
|
|
try: |
|
|
resp = requests.post(url, headers=headers, json=payload, timeout=60) |
|
|
resp.raise_for_status() |
|
|
data = resp.json() |
|
|
|
|
|
if "choices" in data and len(data["choices"])>0: |
|
|
content = data["choices"][0].get("message", {}).get("content") or data["choices"][0].get("text") or str(data["choices"][0]) |
|
|
return content |
|
|
return json.dumps(data) |
|
|
except Exception as e: |
|
|
return f"LLM call failed: {e}" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def build_classification_pdf_bytes(site: Dict[str,Any], classification: Dict[str,Any], explanation_text: str) -> bytes: |
|
|
buf = io.BytesIO() |
|
|
doc = SimpleDocTemplate(buf, pagesize=A4, leftMargin=18*mm, rightMargin=18*mm, topMargin=18*mm, bottomMargin=18*mm) |
|
|
styles = getSampleStyleSheet() |
|
|
title_style = ParagraphStyle("title", parent=styles["Title"], fontSize=18, textColor=colors.HexColor("#FF6600"), alignment=1) |
|
|
h1 = ParagraphStyle("h1", parent=styles["Heading1"], fontSize=12, textColor=colors.HexColor("#FF6600")) |
|
|
body = ParagraphStyle("body", parent=styles["BodyText"], fontSize=10) |
|
|
|
|
|
elems = [] |
|
|
elems.append(Paragraph("GeoMate V2 β Classification Report", title_style)) |
|
|
elems.append(Spacer(1,6)) |
|
|
elems.append(Paragraph(f"Site: {site.get('Site Name','Unnamed')}", h1)) |
|
|
elems.append(Paragraph(f"Date: {st.datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC')}", body)) |
|
|
elems.append(Spacer(1,8)) |
|
|
|
|
|
|
|
|
elems.append(Paragraph("Laboratory Inputs", h1)) |
|
|
inputs = site.get("classifier_inputs", {}) |
|
|
if inputs: |
|
|
data = [["Parameter","Value"]] |
|
|
for k,v in inputs.items(): |
|
|
data.append([str(k), str(v)]) |
|
|
table = Table(data, colWidths=[80*mm, 80*mm]) |
|
|
table.setStyle(TableStyle([("GRID",(0,0),(-1,-1),0.5,colors.grey), ("BACKGROUND",(0,0),(-1,0),colors.HexColor("#FF6600")), ("TEXTCOLOR",(0,0),(-1,0),colors.white)])) |
|
|
elems.append(table) |
|
|
else: |
|
|
elems.append(Paragraph("No lab inputs recorded.", body)) |
|
|
elems.append(Spacer(1,8)) |
|
|
|
|
|
|
|
|
elems.append(Paragraph("Deterministic Classification Results", h1)) |
|
|
elems.append(Paragraph(f"USCS: {classification.get('USCS_code','N/A')} β {classification.get('USCS_description','')}", body)) |
|
|
elems.append(Paragraph(f"AASHTO: {classification.get('AASHTO_code','N/A')} β {classification.get('AASHTO_description','')}", body)) |
|
|
elems.append(Paragraph(f"Group Index: {classification.get('GI','N/A')}", body)) |
|
|
elems.append(Spacer(1,6)) |
|
|
elems.append(Paragraph("USCS decision path (verbatim):", h1)) |
|
|
elems.append(Paragraph(classification.get("USCS_decision_path","Not recorded"), body)) |
|
|
elems.append(Spacer(1,6)) |
|
|
elems.append(Paragraph("AASHTO decision path (verbatim):", h1)) |
|
|
elems.append(Paragraph(classification.get("AASHTO_decision_path","Not recorded"), body)) |
|
|
elems.append(Spacer(1,8)) |
|
|
|
|
|
|
|
|
elems.append(Paragraph("Engineering Characteristics (from reference table)", h1)) |
|
|
eng = classification.get("engineering_characteristics", {}) |
|
|
if eng: |
|
|
eng_data = [["Property","Value"]] |
|
|
for k,v in eng.items(): |
|
|
eng_data.append([k, v]) |
|
|
t2 = Table(eng_data, colWidths=[60*mm, 100*mm]) |
|
|
t2.setStyle(TableStyle([("GRID",(0,0),(-1,-1),0.5,colors.grey), ("BACKGROUND",(0,0),(-1,0),colors.HexColor("#FF6600")), ("TEXTCOLOR",(0,0),(-1,0),colors.white)])) |
|
|
elems.append(t2) |
|
|
elems.append(Spacer(1,8)) |
|
|
|
|
|
|
|
|
elems.append(Paragraph("Humanized Engineering Explanation (LLM)", h1)) |
|
|
if explanation_text: |
|
|
|
|
|
for para in explanation_text.strip().split("\n\n"): |
|
|
elems.append(Paragraph(para.strip().replace("\n"," "), body)) |
|
|
elems.append(Spacer(1,4)) |
|
|
else: |
|
|
elems.append(Paragraph("No LLM explanation available.", body)) |
|
|
|
|
|
|
|
|
if "map_snapshot" in site and site["map_snapshot"]: |
|
|
snap = site["map_snapshot"] |
|
|
|
|
|
if isinstance(snap, str) and snap.lower().endswith((".png",".jpg",".jpeg")) and os.path.exists(snap): |
|
|
elems.append(PageBreak()) |
|
|
elems.append(Paragraph("Map Snapshot", h1)) |
|
|
elems.append(RLImage(snap, width=160*mm, height=90*mm)) |
|
|
|
|
|
doc.build(elems) |
|
|
pdf_bytes = buf.getvalue() |
|
|
buf.close() |
|
|
return pdf_bytes |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def soil_classifier_page(): |
|
|
st.header("π§ Soil Classifier β USCS & AASHTO (Verbatim)") |
|
|
|
|
|
site = get_active_site() |
|
|
if site is None: |
|
|
st.warning("No active site. Add a site first in the sidebar.") |
|
|
return |
|
|
|
|
|
|
|
|
site.setdefault("classifier_inputs", {}) |
|
|
|
|
|
col1, col2 = st.columns([2,1]) |
|
|
with col1: |
|
|
st.markdown("**Upload lab sheet (image) for OCR** β the extracted values will auto-fill classifier inputs.") |
|
|
uploaded = st.file_uploader("Upload image (png/jpg)", type=["png","jpg","jpeg"], key="clf_ocr_upload") |
|
|
if uploaded: |
|
|
img = Image.open(uploaded) |
|
|
st.image(img, caption="Uploaded lab sheet (OCR)", use_column_width=True) |
|
|
try: |
|
|
raw_text = pytesseract.image_to_string(img) |
|
|
st.text_area("OCR raw text (preview)", raw_text, height=180) |
|
|
|
|
|
|
|
|
def find_first(pattern): |
|
|
m = re.search(pattern, raw_text, re.IGNORECASE) |
|
|
return float(m.group(1)) if m else None |
|
|
|
|
|
possible = {} |
|
|
for pat_key, pats in { |
|
|
"LL": [r"LL\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"Liquid\s*Limit\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"PL": [r"PL\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"Plastic\s*Limit\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"P200":[r"%\s*Passing\s*#?200\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"P200\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"Passing\s*0\.075\s*mm\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"P4":[r"%\s*Passing\s*#?4\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"P4\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"D60":[r"D60\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)", r"D_{60}\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"D30":[r"D30\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"], |
|
|
"D10":[r"D10\s*[:=]?\s*([0-9]+(?:\.[0-9]+)?)"] |
|
|
}.items(): |
|
|
for p in pats: |
|
|
v = find_first(p) |
|
|
if v is not None: |
|
|
possible[pat_key] = v |
|
|
break |
|
|
|
|
|
for k,v in possible.items(): |
|
|
site["classifier_inputs"][k] = v |
|
|
save_active_site(site) |
|
|
st.success(f"OCR auto-filled: {', '.join([f'{k}={v}' for k,v in possible.items()])}") |
|
|
except Exception as e: |
|
|
st.error(f"OCR parsing failed: {e}") |
|
|
|
|
|
st.markdown("**Or type soil parameters / paste lab line** (e.g. `LL=45 PL=22 P200=58 P4=12 D60=1.2 D30=0.45 D10=0.08`) β chat-style input below.") |
|
|
user_text = st.text_area("Enter parameters or notes", value="", key="clf_text_input", height=120) |
|
|
|
|
|
if st.button("Run Classification"): |
|
|
|
|
|
txt = user_text or "" |
|
|
|
|
|
kvs = dict(re.findall(r"([A-Za-z0-9_%]+)\s*[=:\-]\s*([0-9]+(?:\.[0-9]+)?)", txt)) |
|
|
|
|
|
norm = {} |
|
|
for k,v in kvs.items(): |
|
|
klow = k.strip().lower() |
|
|
if klow in ("ll","liquidlimit","liquid_limit","liquid"): |
|
|
norm["LL"] = float(v) |
|
|
elif klow in ("pl","plasticlimit","plastic_limit","plastic"): |
|
|
norm["PL"] = float(v) |
|
|
elif klow in ("pi","plasticityindex"): |
|
|
norm["PI"] = float(v) |
|
|
elif klow in ("p200","%200","p_200","passing200"): |
|
|
norm["P200"] = float(v) |
|
|
elif klow in ("p4","p_4","passing4"): |
|
|
norm["P4"] = float(v) |
|
|
elif klow in ("d60","d_60"): |
|
|
norm["D60"] = float(v) |
|
|
elif klow in ("d30","d_30"): |
|
|
norm["D30"] = float(v) |
|
|
elif klow in ("d10","d_10"): |
|
|
norm["D10"] = float(v) |
|
|
|
|
|
site["classifier_inputs"].update(norm) |
|
|
save_active_site(site) |
|
|
|
|
|
|
|
|
inputs_for_class = site["classifier_inputs"] |
|
|
|
|
|
result = classify_all(inputs_for_class) |
|
|
|
|
|
site["classification_report"] = result |
|
|
save_active_site(site) |
|
|
|
|
|
st.success("Deterministic classification complete.") |
|
|
st.markdown("**USCS result:** " + str(result.get("USCS_code"))) |
|
|
st.markdown("**AASHTO result:** " + str(result.get("AASHTO_code")) + f" (GI={result.get('GI')})") |
|
|
st.markdown("**Engineering summary (deterministic):**") |
|
|
st.info(result.get("engineering_summary")) |
|
|
|
|
|
|
|
|
prompt = f""" |
|
|
You are GeoMate, a professional geotechnical engineer assistant. |
|
|
Given the following laboratory inputs and deterministic classification, produce a clear, technical |
|
|
and human-friendly classification report, explaining what the soil is, how it behaves, engineering |
|
|
implications (bearing, settlement, stiffness), suitability for shallow foundations and road subgrades, |
|
|
and practical recommendations for site engineering. |
|
|
|
|
|
Site: {site.get('Site Name','Unnamed')} |
|
|
Inputs (as parsed): {json.dumps(site.get('classifier_inputs',{}), indent=2)} |
|
|
Deterministic classification results: |
|
|
USCS: {result.get('USCS_code')} |
|
|
USCS decision path: {result.get('USCS_decision_path')} |
|
|
AASHTO: {result.get('AASHTO_code')} |
|
|
AASHTO decision path: {result.get('AASHTO_decision_path')} |
|
|
Group Index: {result.get('GI')} |
|
|
Engineering characteristics reference table: {json.dumps(result.get('engineering_characteristics',{}), indent=2)} |
|
|
|
|
|
Provide: |
|
|
- Executive summary (3-5 sentences) |
|
|
- Engineering interpretation (detailed) |
|
|
- Specific recommendations (foundations, drainage, compaction, stabilization) |
|
|
- Short checklist of items for further testing. |
|
|
""" |
|
|
st.info("Generating humanized report via LLM (Groq) β this may take a few seconds.") |
|
|
explanation = call_groq_for_explanation(prompt) |
|
|
|
|
|
if explanation.startswith("LLM call failed") or explanation.startswith("Groq API key not found"): |
|
|
|
|
|
explanation = ("Humanized explanation not available via LLM. " |
|
|
"Deterministic summary: \n\n" + result.get("engineering_summary", "No summary.")) |
|
|
|
|
|
|
|
|
site.setdefault("reports", {}) |
|
|
site["reports"]["last_classification_explanation"] = explanation |
|
|
save_active_site(site) |
|
|
|
|
|
st.markdown("**Humanized Explanation (LLM or fallback):**") |
|
|
st.write(explanation) |
|
|
|
|
|
|
|
|
pdf_bytes = build_classification_pdf_bytes(site, result, explanation) |
|
|
st.download_button("Download Classification PDF", data=pdf_bytes, file_name=f"classification_{site.get('Site Name','site')}.pdf", mime="application/pdf") |
|
|
|
|
|
|
|
|
with col2: |
|
|
st.markdown("**Current parsed inputs**") |
|
|
st.json(site.get("classifier_inputs", {})) |
|
|
st.markdown("**Last deterministic classification (if any)**") |
|
|
st.json(site.get("classification_report", {})) |
|
|
|
|
|
|
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
def locator_page(): |
|
|
st.header("π Locator (Earth Engine Powered)") |
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
def rag_chatbot_page(): |
|
|
st.header("π¬ Knowledge Assistant (RAG + Groq)") |
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
def report_page(): |
|
|
st.header("π Generate Report") |
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
def feedback_page(): |
|
|
st.header("π Feedback & Suggestions") |
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
PAGES = { |
|
|
"Soil Recognizer": soil_recognizer_page, |
|
|
"Soil Classifier": soil_classifier_page, |
|
|
"Locator": locator_page, |
|
|
"Knowledge Assistant": rag_chatbot_page, |
|
|
"Report": report_page, |
|
|
"Feedback": feedback_page, |
|
|
} |
|
|
|
|
|
def main(): |
|
|
st.sidebar.title("π GeoMate V2") |
|
|
choice = st.sidebar.radio("Navigate", list(PAGES.keys())) |
|
|
|
|
|
|
|
|
if st.sidebar.button("β Add Site"): |
|
|
st.session_state["sites"].append({}) |
|
|
st.session_state["active_site_idx"] = len(st.session_state["sites"]) - 1 |
|
|
if st.session_state["sites"]: |
|
|
st.sidebar.write("Sites:") |
|
|
for i, s in enumerate(st.session_state["sites"]): |
|
|
label = f"Site {i+1}" |
|
|
if st.sidebar.button(label, key=f"site_{i}"): |
|
|
st.session_state["active_site_idx"] = i |
|
|
|
|
|
|
|
|
PAGES[choice]() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |