Spaces:
Sleeping
Sleeping
p2002814
commited on
Commit
Β·
be3e7e8
1
Parent(s):
d02e800
working gradual semantics !
Browse files- aba/{exemples β examples}/atomic.txt +0 -0
- aba/{exemples β examples}/circular.txt +0 -0
- aba/{exemples β examples}/exemple.txt +0 -0
- aba/{exemples β examples}/td4_1.txt +0 -0
- aba/{exemples β examples}/td4_2.txt +0 -0
- aba/{exemples β examples}/td4_2and.txt +0 -0
- aba/test.py +78 -0
- app.py +67 -6
- gradual/computations.py +21 -0
- gradual/examples/complex.json +4 -0
- gradual/examples/simple.json +4 -0
- gradual/h_categorizer.py +47 -0
- gradual/models.py +74 -0
- relations/{exemples β examples}/claims.py +0 -0
- relations/{exemples β examples}/samples/ai-benefits-humanity.csv +0 -0
- relations/{exemples β examples}/samples/big-exemple.csv +0 -0
- relations/{exemples β examples}/samples/universal-basic-income.csv +0 -0
aba/{exemples β examples}/atomic.txt
RENAMED
|
File without changes
|
aba/{exemples β examples}/circular.txt
RENAMED
|
File without changes
|
aba/{exemples β examples}/exemple.txt
RENAMED
|
File without changes
|
aba/{exemples β examples}/td4_1.txt
RENAMED
|
File without changes
|
aba/{exemples β examples}/td4_2.txt
RENAMED
|
File without changes
|
aba/{exemples β examples}/td4_2and.txt
RENAMED
|
File without changes
|
aba/test.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Main entry point for generating and analyzing an ABA+ framework.
|
| 3 |
+
|
| 4 |
+
This script:
|
| 5 |
+
1. Builds an ABA framework from a text specification.
|
| 6 |
+
2. Prints the original (classical) ABA framework.
|
| 7 |
+
3. Prepares the framework for ABA+ (atomic transformation + argument/attack generation).
|
| 8 |
+
4. Generates ABA+ components (assumption combinations, normal/reverse attacks).
|
| 9 |
+
5. Prints the resulting ABA+ framework components.
|
| 10 |
+
6. Plots the ABA+ attack graph between sets of assumptions.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from copy import deepcopy
|
| 14 |
+
from aba.aba_builder import build_aba_framework, prepare_aba_plus_framework
|
| 15 |
+
from aba.aba_utils import print_aba_plus_results
|
| 16 |
+
from aba.aba_framework import ABAFramework
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def testABA(aba_framework: ABAFramework):
|
| 20 |
+
|
| 21 |
+
copy_framework = deepcopy(aba_framework)
|
| 22 |
+
|
| 23 |
+
transformed_framework: ABAFramework = copy_framework.transform_aba()
|
| 24 |
+
print("\n ------- Transformed ABA framework -------\n ")
|
| 25 |
+
print(transformed_framework)
|
| 26 |
+
|
| 27 |
+
# Generate arguments
|
| 28 |
+
transformed_framework.generate_arguments()
|
| 29 |
+
gen_args = transformed_framework.arguments
|
| 30 |
+
print("\n ------- Generated arguments -------\n ")
|
| 31 |
+
print(gen_args)
|
| 32 |
+
|
| 33 |
+
# Generate attacks
|
| 34 |
+
transformed_framework.generate_attacks()
|
| 35 |
+
attacks = transformed_framework.attacks
|
| 36 |
+
print("\n ------- Generated attacks -------\n ")
|
| 37 |
+
print(attacks, "\n")
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def testABAPlus(aba_framework: ABAFramework):
|
| 41 |
+
|
| 42 |
+
# === Step 2: Prepare the framework for ABA+ ===
|
| 43 |
+
|
| 44 |
+
aba_framework: ABAFramework = prepare_aba_plus_framework(aba_framework)
|
| 45 |
+
|
| 46 |
+
# === Step 3: Generate ABA+ components ===
|
| 47 |
+
print("\n" + "=" * 50)
|
| 48 |
+
print("Generating ABA+ Components")
|
| 49 |
+
print("=" * 50)
|
| 50 |
+
aba_framework.make_aba_plus()
|
| 51 |
+
|
| 52 |
+
# === Step 4: Print ABA+ results ===
|
| 53 |
+
print_aba_plus_results(aba_framework)
|
| 54 |
+
return aba_framework
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def main():
|
| 58 |
+
"""
|
| 59 |
+
Main function to generate and analyze an ABA+ framework.
|
| 60 |
+
"""
|
| 61 |
+
# === Step 1: Build the ABA framework from input file ===
|
| 62 |
+
print("\n" + "=" * 50)
|
| 63 |
+
print("Building ABA+ Framework")
|
| 64 |
+
print("=" * 50)
|
| 65 |
+
|
| 66 |
+
# Build framework from the given input specification file
|
| 67 |
+
aba_framework = build_aba_framework("aba\exemples\exemple.txt")
|
| 68 |
+
print(f"\n ------- Original ABA framework -------\n{aba_framework}")
|
| 69 |
+
|
| 70 |
+
base_framework = deepcopy(aba_framework)
|
| 71 |
+
testABA(base_framework)
|
| 72 |
+
|
| 73 |
+
aba_for_plus = deepcopy(aba_framework)
|
| 74 |
+
testABAPlus(aba_for_plus)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
if __name__ == "__main__":
|
| 78 |
+
main()
|
app.py
CHANGED
|
@@ -11,18 +11,21 @@ from pathlib import Path
|
|
| 11 |
|
| 12 |
import pandas as pd
|
| 13 |
import torch
|
| 14 |
-
from fastapi import FastAPI, UploadFile, File, Form
|
| 15 |
from fastapi.middleware.cors import CORSMiddleware
|
| 16 |
-
from fastapi.responses import FileResponse, StreamingResponse
|
| 17 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 18 |
|
| 19 |
from relations.predict_bert import predict_relation
|
| 20 |
from aba.aba_builder import prepare_aba_plus_framework, build_aba_framework_from_text
|
|
|
|
|
|
|
| 21 |
|
| 22 |
# -------------------- Config -------------------- #
|
| 23 |
|
| 24 |
-
|
| 25 |
-
SAMPLES_DIR = Path("./relations/
|
|
|
|
| 26 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 27 |
|
| 28 |
model_name = "edgar-demeude/bert-argument"
|
|
@@ -48,6 +51,8 @@ def root():
|
|
| 48 |
return {"message": "Argument Mining API is running..."}
|
| 49 |
|
| 50 |
|
|
|
|
|
|
|
| 51 |
@app.post("/predict-text")
|
| 52 |
def predict_text(arg1: str = Form(...), arg2: str = Form(...)):
|
| 53 |
"""Predict relation between two text arguments using BERT."""
|
|
@@ -101,6 +106,8 @@ def get_sample(filename: str):
|
|
| 101 |
return FileResponse(file_path, media_type="text/csv")
|
| 102 |
|
| 103 |
|
|
|
|
|
|
|
| 104 |
@app.post("/aba-upload")
|
| 105 |
async def aba_upload(file: UploadFile = File(...)):
|
| 106 |
content = await file.read()
|
|
@@ -138,13 +145,67 @@ async def aba_upload(file: UploadFile = File(...)):
|
|
| 138 |
|
| 139 |
@app.get("/aba-examples")
|
| 140 |
def list_aba_examples():
|
| 141 |
-
examples = [f.name for f in
|
| 142 |
return {"examples": examples}
|
| 143 |
|
| 144 |
|
| 145 |
@app.get("/aba-examples/{filename}")
|
| 146 |
def get_aba_example(filename: str):
|
| 147 |
-
file_path =
|
| 148 |
if not file_path.exists() or not file_path.is_file():
|
| 149 |
return {"error": "File not found"}
|
| 150 |
return FileResponse(file_path, media_type="text/plain", filename=filename)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
import pandas as pd
|
| 13 |
import torch
|
| 14 |
+
from fastapi import FastAPI, UploadFile, File, Form, HTTPException
|
| 15 |
from fastapi.middleware.cors import CORSMiddleware
|
| 16 |
+
from fastapi.responses import FileResponse, StreamingResponse, JSONResponse
|
| 17 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 18 |
|
| 19 |
from relations.predict_bert import predict_relation
|
| 20 |
from aba.aba_builder import prepare_aba_plus_framework, build_aba_framework_from_text
|
| 21 |
+
from gradual.computations import compute_gradual_semantics
|
| 22 |
+
from gradual.models import GradualInput, GradualOutput
|
| 23 |
|
| 24 |
# -------------------- Config -------------------- #
|
| 25 |
|
| 26 |
+
ABA_EXAMPLES_DIR = Path("./aba/examples")
|
| 27 |
+
SAMPLES_DIR = Path("./relations/examples/samples")
|
| 28 |
+
GRADUAL_EXAMPLES_DIR = Path("./gradual/examples")
|
| 29 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 30 |
|
| 31 |
model_name = "edgar-demeude/bert-argument"
|
|
|
|
| 51 |
return {"message": "Argument Mining API is running..."}
|
| 52 |
|
| 53 |
|
| 54 |
+
# --- Predictions --- #
|
| 55 |
+
|
| 56 |
@app.post("/predict-text")
|
| 57 |
def predict_text(arg1: str = Form(...), arg2: str = Form(...)):
|
| 58 |
"""Predict relation between two text arguments using BERT."""
|
|
|
|
| 106 |
return FileResponse(file_path, media_type="text/csv")
|
| 107 |
|
| 108 |
|
| 109 |
+
# --- ABA --- #
|
| 110 |
+
|
| 111 |
@app.post("/aba-upload")
|
| 112 |
async def aba_upload(file: UploadFile = File(...)):
|
| 113 |
content = await file.read()
|
|
|
|
| 145 |
|
| 146 |
@app.get("/aba-examples")
|
| 147 |
def list_aba_examples():
|
| 148 |
+
examples = [f.name for f in ABA_EXAMPLES_DIR.glob("*.txt")]
|
| 149 |
return {"examples": examples}
|
| 150 |
|
| 151 |
|
| 152 |
@app.get("/aba-examples/{filename}")
|
| 153 |
def get_aba_example(filename: str):
|
| 154 |
+
file_path = ABA_EXAMPLES_DIR / filename
|
| 155 |
if not file_path.exists() or not file_path.is_file():
|
| 156 |
return {"error": "File not found"}
|
| 157 |
return FileResponse(file_path, media_type="text/plain", filename=filename)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# --- Gradual semantics --- #
|
| 161 |
+
|
| 162 |
+
@app.post("/gradual", response_model=GradualOutput)
|
| 163 |
+
def compute_gradual(input_data: GradualInput):
|
| 164 |
+
"""API endpoint to compute Weighted h-Categorizer samples and convex hull."""
|
| 165 |
+
return compute_gradual_semantics(
|
| 166 |
+
A=input_data.A,
|
| 167 |
+
R=input_data.R,
|
| 168 |
+
n_samples=input_data.n_samples,
|
| 169 |
+
max_iter=input_data.max_iter
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
@app.get("/gradual-examples")
|
| 174 |
+
def list_gradual_examples():
|
| 175 |
+
"""
|
| 176 |
+
List all available gradual semantics example files.
|
| 177 |
+
Each example must be a JSON file with structure:
|
| 178 |
+
{
|
| 179 |
+
"args": ["A", "B", "C"],
|
| 180 |
+
"relations": [["A", "B"], ["B", "C"]]
|
| 181 |
+
}
|
| 182 |
+
"""
|
| 183 |
+
if not GRADUAL_EXAMPLES_DIR.exists():
|
| 184 |
+
return {"examples": []}
|
| 185 |
+
|
| 186 |
+
examples = []
|
| 187 |
+
for file in GRADUAL_EXAMPLES_DIR.glob("*.json"):
|
| 188 |
+
examples.append({
|
| 189 |
+
"name": file.stem,
|
| 190 |
+
"path": file.name,
|
| 191 |
+
"content": None
|
| 192 |
+
})
|
| 193 |
+
return {"examples": examples}
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
@app.get("/gradual-examples/{example_name}")
|
| 197 |
+
def get_gradual_example(example_name: str):
|
| 198 |
+
"""
|
| 199 |
+
Return the content of a specific gradual example.
|
| 200 |
+
Example: GET /gradual-examples/simple.json
|
| 201 |
+
"""
|
| 202 |
+
file_path = GRADUAL_EXAMPLES_DIR / example_name
|
| 203 |
+
if not file_path.exists():
|
| 204 |
+
raise HTTPException(status_code=404, detail="Example not found")
|
| 205 |
+
|
| 206 |
+
try:
|
| 207 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 208 |
+
content = json.load(f)
|
| 209 |
+
return JSONResponse(content=content)
|
| 210 |
+
except json.JSONDecodeError:
|
| 211 |
+
raise HTTPException(status_code=400, detail="Invalid JSON format in example file")
|
gradual/computations.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from scipy.spatial import ConvexHull
|
| 2 |
+
import numpy as np
|
| 3 |
+
from .h_categorizer import sample_and_compute_X
|
| 4 |
+
|
| 5 |
+
def compute_gradual_semantics(A, R, n_samples=1000, max_iter=1000):
|
| 6 |
+
"""Compute samples and convex hull information for the given argumentation framework."""
|
| 7 |
+
X_res = sample_and_compute_X(A, R, max_iter=max_iter, n_samples=n_samples)
|
| 8 |
+
result = {"num_args": len(A)}
|
| 9 |
+
|
| 10 |
+
if len(A) > 1:
|
| 11 |
+
hull = ConvexHull(X_res)
|
| 12 |
+
result["hull_volume"] = float(hull.volume)
|
| 13 |
+
result["hull_area"] = float(hull.area)
|
| 14 |
+
result["hull_points"] = hull.points[hull.vertices].tolist()
|
| 15 |
+
else:
|
| 16 |
+
result["hull_volume"] = None
|
| 17 |
+
result["hull_area"] = None
|
| 18 |
+
result["hull_points"] = X_res.tolist()
|
| 19 |
+
|
| 20 |
+
result["samples"] = X_res.tolist()
|
| 21 |
+
return result
|
gradual/examples/complex.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"args": ["A", "B", "C", "D", "E"],
|
| 3 |
+
"relations": [["A", "B"], ["B", "C"], ["C", "D"], ["D", "E"], ["E", "A"]]
|
| 4 |
+
}
|
gradual/examples/simple.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"args": ["A", "B", "C"],
|
| 3 |
+
"relations": [["A", "B"], ["B", "C"]]
|
| 4 |
+
}
|
gradual/h_categorizer.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
def build_att(A, R):
|
| 4 |
+
"""Builds a dictionary listing attackers for each argument."""
|
| 5 |
+
att_list = {a: [] for a in A}
|
| 6 |
+
for att, target in R:
|
| 7 |
+
if target in att_list:
|
| 8 |
+
att_list[target].append(att)
|
| 9 |
+
else:
|
| 10 |
+
att_list[target] = [att]
|
| 11 |
+
return att_list
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def h_categorizer(A, R, w, max_iter, epsi=1e-4):
|
| 15 |
+
"""Computes the h-Categorizer gradual semantics for a given framework (A, R) and weights."""
|
| 16 |
+
attackers = build_att(A, R)
|
| 17 |
+
hc = {a: w[a] for a in A}
|
| 18 |
+
|
| 19 |
+
for _ in range(max_iter):
|
| 20 |
+
new_hc = {}
|
| 21 |
+
for a in A:
|
| 22 |
+
sum_attackers = sum(hc[b] for b in attackers[a])
|
| 23 |
+
new_hc[a] = w[a] / (1 + sum_attackers)
|
| 24 |
+
diff = max(abs(new_hc[a] - hc[a]) for a in A)
|
| 25 |
+
hc = new_hc
|
| 26 |
+
if diff < epsi:
|
| 27 |
+
break
|
| 28 |
+
|
| 29 |
+
return hc
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def dict_to_vector(A, d):
|
| 33 |
+
"""Converts a dictionary {arg: value} into a numpy vector following the order of A."""
|
| 34 |
+
return np.array([d[a] for a in A], dtype=float)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def sample_and_compute_X(A, R, epsilon=1e-4, max_iter=1000, n_samples=10000, seed=42):
|
| 38 |
+
"""Generates n_samples random weight vectors and computes corresponding h-Categorizer results."""
|
| 39 |
+
rng = np.random.default_rng(seed)
|
| 40 |
+
X = np.zeros((n_samples, len(A)), dtype=float)
|
| 41 |
+
|
| 42 |
+
for i in range(n_samples):
|
| 43 |
+
w = dict(zip(A, rng.random(len(A))))
|
| 44 |
+
HC = h_categorizer(A, R, w, max_iter, epsilon)
|
| 45 |
+
X[i, :] = dict_to_vector(A, HC)
|
| 46 |
+
|
| 47 |
+
return X
|
gradual/models.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
from typing import List, Tuple, Optional
|
| 3 |
+
|
| 4 |
+
class GradualInput(BaseModel):
|
| 5 |
+
"""
|
| 6 |
+
Input model for the Weighted h-Categorizer API.
|
| 7 |
+
|
| 8 |
+
Attributes
|
| 9 |
+
----------
|
| 10 |
+
A : List[str]
|
| 11 |
+
List of argument names.
|
| 12 |
+
R : List[Tuple[str, str]]
|
| 13 |
+
List of attack relations between arguments.
|
| 14 |
+
n_samples : int
|
| 15 |
+
Number of random weight samples to generate (default: 1000).
|
| 16 |
+
max_iter : int
|
| 17 |
+
Maximum number of iterations for convergence (default: 1000).
|
| 18 |
+
|
| 19 |
+
Example
|
| 20 |
+
-------
|
| 21 |
+
# Example JSON body for POST /gradual/compute
|
| 22 |
+
{
|
| 23 |
+
"A": ["A", "B", "C"],
|
| 24 |
+
"R": [["A", "B"], ["B", "C"]],
|
| 25 |
+
"n_samples": 500,
|
| 26 |
+
"max_iter": 1000
|
| 27 |
+
}
|
| 28 |
+
"""
|
| 29 |
+
A: List[str]
|
| 30 |
+
R: List[Tuple[str, str]]
|
| 31 |
+
n_samples: int = 1000
|
| 32 |
+
max_iter: int = 1000
|
| 33 |
+
|
| 34 |
+
class GradualOutput(BaseModel):
|
| 35 |
+
"""
|
| 36 |
+
Output model for the Weighted h-Categorizer API.
|
| 37 |
+
|
| 38 |
+
Attributes
|
| 39 |
+
----------
|
| 40 |
+
num_args : int
|
| 41 |
+
Number of arguments in the framework.
|
| 42 |
+
hull_volume : Optional[float]
|
| 43 |
+
Volume of the Convex Hull (None if |A| <= 1).
|
| 44 |
+
hull_area : Optional[float]
|
| 45 |
+
Surface area of the Convex Hull (None if |A| <= 1).
|
| 46 |
+
hull_points : List[List[float]]
|
| 47 |
+
Coordinates of the Convex Hull vertices.
|
| 48 |
+
samples : List[List[float]]
|
| 49 |
+
Sampled points (h-Categorizer outputs) used to compute the hull.
|
| 50 |
+
|
| 51 |
+
Example
|
| 52 |
+
-------
|
| 53 |
+
# Example response JSON from POST /gradual/compute
|
| 54 |
+
{
|
| 55 |
+
"num_args": 3,
|
| 56 |
+
"hull_volume": 0.018,
|
| 57 |
+
"hull_area": 0.143,
|
| 58 |
+
"hull_points": [
|
| 59 |
+
[0.83, 0.12, 0.45],
|
| 60 |
+
[0.10, 0.54, 0.92],
|
| 61 |
+
[0.44, 0.80, 0.33]
|
| 62 |
+
],
|
| 63 |
+
"samples": [
|
| 64 |
+
[0.2, 0.3, 0.7],
|
| 65 |
+
[0.6, 0.4, 0.2],
|
| 66 |
+
...
|
| 67 |
+
]
|
| 68 |
+
}
|
| 69 |
+
"""
|
| 70 |
+
num_args: int
|
| 71 |
+
hull_volume: Optional[float]
|
| 72 |
+
hull_area: Optional[float]
|
| 73 |
+
hull_points: List[List[float]]
|
| 74 |
+
samples: List[List[float]]
|
relations/{exemples β examples}/claims.py
RENAMED
|
File without changes
|
relations/{exemples β examples}/samples/ai-benefits-humanity.csv
RENAMED
|
File without changes
|
relations/{exemples β examples}/samples/big-exemple.csv
RENAMED
|
File without changes
|
relations/{exemples β examples}/samples/universal-basic-income.csv
RENAMED
|
File without changes
|