p2002814 commited on
Commit
23ecacd
·
1 Parent(s): 0674654
Files changed (2) hide show
  1. aba/models.py +29 -21
  2. app.py +287 -238
aba/models.py CHANGED
@@ -1,16 +1,13 @@
1
  from pydantic import BaseModel
2
  from typing import List, Optional, Dict, Tuple, Any
3
 
4
-
5
  # === Basic DTOs ===
6
-
7
  class RuleDTO(BaseModel):
8
  """Represents a single inference rule in the ABA framework."""
9
  id: str
10
  head: str
11
  body: List[str]
12
 
13
-
14
  class FrameworkSnapshot(BaseModel):
15
  """Snapshot of an ABA framework at a specific stage (original or transformed)."""
16
  language: List[str]
@@ -19,9 +16,7 @@ class FrameworkSnapshot(BaseModel):
19
  contraries: List[Tuple[str, str]]
20
  preferences: Optional[Dict[str, List[str]]] = None
21
 
22
-
23
  # === Transformation tracking ===
24
-
25
  class TransformationStep(BaseModel):
26
  """Represents one transformation step (non-circular, atomic, etc.)."""
27
  step: str # 'non_circular' | 'atomic' | 'none'
@@ -30,18 +25,25 @@ class TransformationStep(BaseModel):
30
  description: Optional[str] = None
31
  result_snapshot: Optional[FrameworkSnapshot] = None
32
 
33
-
34
  # === ABA+ details ===
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  class ABAPlusDTO(BaseModel):
37
- """Results specific to ABA+ semantics (extended attacks between assumption sets)."""
38
- assumption_combinations: List[str]
39
- normal_attacks: List[str]
40
- reverse_attacks: List[str]
41
-
42
 
43
  # === Meta info ===
44
-
45
  class MetaInfo(BaseModel):
46
  """Metadata about the ABA computation process."""
47
  request_id: str
@@ -51,19 +53,25 @@ class MetaInfo(BaseModel):
51
  warnings: Optional[List[str]] = []
52
  errors: Optional[List[str]] = []
53
 
 
 
 
 
 
54
 
55
- # === Full API response ===
 
 
 
 
56
 
 
57
  class ABAApiResponseModel(BaseModel):
58
  """
59
  Represents the full backend response for an ABA/ABA+ computation request.
60
- Includes both the original and transformed frameworks, all transformation steps,
61
- and computed results (arguments, attacks, ABA+ extensions).
62
  """
63
  meta: MetaInfo
64
- original_framework: FrameworkSnapshot
65
- transformations: List[TransformationStep]
66
- final_framework: FrameworkSnapshot
67
- arguments: List[str]
68
- attacks: List[str]
69
- aba_plus: ABAPlusDTO
 
1
  from pydantic import BaseModel
2
  from typing import List, Optional, Dict, Tuple, Any
3
 
 
4
  # === Basic DTOs ===
 
5
  class RuleDTO(BaseModel):
6
  """Represents a single inference rule in the ABA framework."""
7
  id: str
8
  head: str
9
  body: List[str]
10
 
 
11
  class FrameworkSnapshot(BaseModel):
12
  """Snapshot of an ABA framework at a specific stage (original or transformed)."""
13
  language: List[str]
 
16
  contraries: List[Tuple[str, str]]
17
  preferences: Optional[Dict[str, List[str]]] = None
18
 
 
19
  # === Transformation tracking ===
 
20
  class TransformationStep(BaseModel):
21
  """Represents one transformation step (non-circular, atomic, etc.)."""
22
  step: str # 'non_circular' | 'atomic' | 'none'
 
25
  description: Optional[str] = None
26
  result_snapshot: Optional[FrameworkSnapshot] = None
27
 
 
28
  # === ABA+ details ===
29
+ class ABAPlusAttacks(BaseModel):
30
+ """Attacks in ABA+ with distinction between argument attacks and assumption set attacks."""
31
+ # Arguments attacks (classique ABA - entre les arguments)
32
+ argument_attacks: List[Tuple[str, str]] # [(attacker_arg, attacked_arg), ...]
33
+ # Assumption set attacks (ABA+ - entre les assumption sets)
34
+ assumption_set_attacks: List[Tuple[List[str], List[str]]] # [(attacking_set, attacked_set), ...]
35
+
36
+ class ABAPlusFrameworkResults(BaseModel):
37
+ """ABA+ results for a specific framework state (before or after transformation)."""
38
+ assumption_sets: List[List[str]] # Liste des assumption sets
39
+ attacks: ABAPlusAttacks
40
 
41
  class ABAPlusDTO(BaseModel):
42
+ """Results specific to ABA+ semantics with before/after transformation."""
43
+ before_transformation: ABAPlusFrameworkResults
44
+ after_transformation: ABAPlusFrameworkResults
 
 
45
 
46
  # === Meta info ===
 
47
  class MetaInfo(BaseModel):
48
  """Metadata about the ABA computation process."""
49
  request_id: str
 
53
  warnings: Optional[List[str]] = []
54
  errors: Optional[List[str]] = []
55
 
56
+ class FrameworkWithArgumentsAndAttacks(BaseModel):
57
+ """Framework snapshot with its computed arguments and attacks."""
58
+ framework: FrameworkSnapshot
59
+ arguments: List[str]
60
+ attacks: List[Tuple[str, str]] # [(attacker, attacked), ...]
61
 
62
+ class TransformationResult(BaseModel):
63
+ """Transformation results with before/after snapshots."""
64
+ before_transformation: FrameworkWithArgumentsAndAttacks
65
+ after_transformation: FrameworkWithArgumentsAndAttacks
66
+ transformations: List[TransformationStep]
67
 
68
+ # === Full API response ===
69
  class ABAApiResponseModel(BaseModel):
70
  """
71
  Represents the full backend response for an ABA/ABA+ computation request.
72
+ Includes original and transformed frameworks with before/after structure,
73
+ transformation steps, and computed results (arguments, attacks, ABA+ extensions).
74
  """
75
  meta: MetaInfo
76
+ transformation: TransformationResult
77
+ aba_plus: Optional[ABAPlusDTO] = None # None if not ABA+, populated if ABA+
 
 
 
 
app.py CHANGED
@@ -1,15 +1,24 @@
1
- import io
2
- import json
3
- import asyncio
4
- from pathlib import Path
5
- import pandas as pd
6
- import torch
7
- from fastapi import FastAPI, UploadFile, File, Form, HTTPException
8
- from fastapi.middleware.cors import CORSMiddleware
9
- from fastapi.responses import FileResponse, StreamingResponse, JSONResponse
10
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
11
- from relations.predict_bert import predict_relation
12
  from aba.aba_builder import prepare_aba_plus_framework, build_aba_framework_from_text
 
 
 
 
 
 
 
 
 
 
 
13
  from aba.models import (
14
  RuleDTO,
15
  FrameworkSnapshot,
@@ -18,52 +27,12 @@ from aba.models import (
18
  ABAPlusDTO,
19
  MetaInfo,
20
  )
21
- from gradual.computations import compute_gradual_space
22
- from gradual.models import GradualInput, GradualOutput
23
- import os
24
  from copy import deepcopy
25
  from datetime import datetime
26
 
27
- cache_dir = "/tmp/hf_cache"
28
- os.environ["TRANSFORMERS_CACHE"] = cache_dir
29
- os.makedirs(cache_dir, exist_ok=True)
30
-
31
-
32
- def _make_snapshot(fw) -> FrameworkSnapshot:
33
- return FrameworkSnapshot(
34
- language=[str(l) for l in sorted(fw.language, key=str)],
35
- assumptions=[str(a) for a in sorted(fw.assumptions, key=str)],
36
- rules=[
37
- RuleDTO(
38
- id=r.rule_name,
39
- head=str(r.head),
40
- body=[str(b) for b in sorted(r.body, key=str)],
41
- )
42
- for r in sorted(fw.rules, key=lambda r: r.rule_name)
43
- ],
44
- contraries=[
45
- (str(c.contraried_literal), str(c.contrary_attacker))
46
- for c in sorted(fw.contraries, key=str)
47
- ],
48
- preferences={
49
- str(k): [str(v) for v in sorted(vals, key=str)]
50
- for k, vals in (fw.preferences or {}).items()
51
- } if getattr(fw, "preferences", None) else None,
52
- )
53
-
54
-
55
- def _format_set(s) -> str:
56
- # s may be a Python set/frozenset of Literal or strings.
57
- try:
58
- items = sorted([str(x) for x in s], key=str)
59
- except Exception:
60
- # fallback if s is already a string like "{a,b}"
61
- return str(s)
62
- return "{" + ",".join(items) + "}"
63
 
64
  # -------------------- Config -------------------- #
65
 
66
-
67
  ABA_EXAMPLES_DIR = Path("./aba/examples")
68
  SAMPLES_DIR = Path("./relations/examples/samples")
69
  GRADUAL_EXAMPLES_DIR = Path("./gradual/examples")
@@ -149,229 +118,298 @@ def get_sample(filename: str):
149
  return FileResponse(file_path, media_type="text/csv")
150
 
151
 
 
152
  # --- ABA --- #
153
 
154
- @app.post("/aba-upload")
155
- async def aba_upload(file: UploadFile = File(...)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  """
157
- Handle classical ABA generation.
 
 
 
 
 
158
  Returns:
159
- - original_framework: before transformations
160
- - final_framework: after transformations
161
- - transformations: steps applied (non-circular / atomic)
162
- - arguments, attacks
163
- - empty aba_plus section
164
  """
165
- content = await file.read()
166
- text = content.decode("utf-8")
167
-
168
- # === 1. Build original ABA framework ===
169
  base_framework = build_aba_framework_from_text(text)
 
 
170
  original_snapshot = _make_snapshot(base_framework)
171
 
172
- # === 2. Transform the framework ===
173
- copy_framework = deepcopy(base_framework)
174
- transformed_framework = copy_framework.transform_aba()
175
-
176
- was_circular = base_framework.is_aba_circular()
177
- was_atomic = base_framework.is_aba_atomic()
178
 
 
179
  transformed_framework = deepcopy(base_framework).transform_aba()
 
 
 
 
 
 
 
 
 
 
180
 
181
- # Detect transformation type
182
- transformations: list[TransformationStep] = []
183
- if transformed_framework.language != base_framework.language or transformed_framework.rules != base_framework.rules:
184
- # Some transformation happened
185
- if was_circular:
186
- transformations.append(
187
- TransformationStep(
188
- step="non_circular",
189
- applied=True,
190
- reason="The framework contained circular dependencies.",
191
- description="Transformed into a non-circular version.",
192
- result_snapshot=_make_snapshot(transformed_framework),
193
- )
194
- )
195
- elif not was_atomic:
196
- transformations.append(
197
- TransformationStep(
198
- step="atomic",
199
- applied=True,
200
- reason="The framework contained rules with non-assumption bodies.",
201
- description="Transformed into an atomic version.",
202
- result_snapshot=_make_snapshot(transformed_framework),
203
- )
204
  )
205
- else:
206
- # No transformation
207
- transformations.append(
208
- TransformationStep(
209
- step="none",
210
- applied=False,
211
- reason="The framework was already non-circular and atomic.",
212
- description="No transformation applied.",
213
- result_snapshot=None,
214
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
  )
216
 
217
- # === 3. Generate arguments and attacks on transformed ===
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  transformed_framework.generate_arguments()
219
  transformed_framework.generate_attacks()
220
 
 
 
 
 
 
 
 
 
221
  final_snapshot = _make_snapshot(transformed_framework)
222
 
223
- # === 4. Build response model ===
224
- response = ABAApiResponseModel(
225
- meta=MetaInfo(
226
- request_id=f"req-{datetime.utcnow().timestamp()}",
227
- timestamp=datetime.utcnow().isoformat(),
228
- transformed=any(t.applied for t in transformations),
229
- transformations_applied=[
230
- t.step for t in transformations if t.applied
231
  ],
232
- warnings=[],
233
- errors=[],
234
- ),
235
- original_framework=original_snapshot,
236
- transformations=transformations,
237
- final_framework=final_snapshot,
238
- arguments=[str(arg) for arg in sorted(
239
- transformed_framework.arguments, key=str)],
240
- attacks=[str(att)
241
- for att in sorted(transformed_framework.attacks, key=str)],
242
- aba_plus=ABAPlusDTO(
243
- assumption_combinations=[],
244
- normal_attacks=[],
245
- reverse_attacks=[],
246
- ),
247
- )
 
 
 
 
 
248
 
249
  return response
250
 
251
 
252
- @app.post("/aba-plus-upload", response_model=ABAApiResponseModel)
253
- async def aba_plus_upload(file: UploadFile = File(...)):
 
 
254
  """
255
- Handle ABA+ generation.
256
- Returns:
257
- - original_framework / final_framework with snapshots
258
- - transformations applied (non_circular / atomic)
259
- - arguments, classical attacks (from transformed framework)
260
- - aba_plus: assumption_combinations, normal_attacks, reverse_attacks (string lists)
261
  """
262
- content = await file.read()
263
- text = content.decode("utf-8")
264
-
265
- # 1) Build base framework + original snapshot
266
- base_fw = build_aba_framework_from_text(text)
267
- original_snapshot = _make_snapshot(base_fw)
268
-
269
- was_circular = base_fw.is_aba_circular()
270
- was_atomic = base_fw.is_aba_atomic()
271
-
272
- # 2) Transform (deepcopy → transform_aba)
273
- transformed = deepcopy(base_fw).transform_aba()
274
-
275
- # Track transformation step(s)
276
- transformations: list[TransformationStep] = []
277
- if transformed.language != base_fw.language or transformed.rules != base_fw.rules:
278
- if was_circular:
279
- transformations.append(
280
- TransformationStep(
281
- step="non_circular",
282
- applied=True,
283
- reason="The framework contained circular dependencies.",
284
- description="Transformed into a non-circular version.",
285
- result_snapshot=_make_snapshot(transformed),
286
- )
287
- )
288
- elif not was_atomic:
289
- transformations.append(
290
- TransformationStep(
291
- step="atomic",
292
- applied=True,
293
- reason="The framework contained non-atomic rules.",
294
- description="Transformed into an atomic version.",
295
- result_snapshot=_make_snapshot(transformed),
296
- )
297
- )
298
- else:
299
- transformations.append(
300
- TransformationStep(
301
- step="none",
302
- applied=False,
303
- reason="The framework was already non-circular and atomic.",
304
- description="No transformation applied.",
305
- result_snapshot=None,
306
- )
307
- )
308
 
309
- # 3) Prepare for ABA+ (on the transformed copy) and compute
310
- # generates arguments + classical attacks
311
- fw_plus = prepare_aba_plus_framework(transformed)
312
- fw_plus.make_aba_plus() # fills assumption_combinations, normal_attacks, reverse_attacks
313
 
 
 
 
 
314
  warnings = []
315
- if fw_plus.preferences:
316
- all_assumpptions = {str(a) for a in fw_plus.assumptions}
317
- pref_keys = {str(k) for k in fw_plus.preferences.keys()}
318
- if not pref_keys.issubset(all_assumpptions):
 
 
319
  warnings.append(
320
- "Incomplete preference relation detected: not all assumptions appear in the preference mapping."
321
  )
 
 
322
 
323
- # 4) Final snapshot
324
- final_snapshot = _make_snapshot(fw_plus)
325
 
326
- # 5) Serialize ABA+ pieces as strings
327
- assumption_sets = sorted(
328
- [_format_set(s)
329
- for s in getattr(fw_plus, "assumption_combinations", [])],
330
- key=lambda x: (len(x), x)
331
- )
332
 
333
- normal_str = [
334
- f"{_format_set(src)} → {_format_set(dst)}"
335
- for (src, dst) in sorted(
336
- getattr(fw_plus, "normal_attacks", []),
337
- key=lambda p: (str(p[0]), str(p[1])),
338
- )
339
- ]
340
 
341
- reverse_str = [
342
- f"{_format_set(src)} {_format_set(dst)}"
343
- for (src, dst) in sorted(
344
- getattr(fw_plus, "reverse_attacks", []),
345
- key=lambda p: (str(p[0]), str(p[1])),
346
- )
347
- ]
348
- # arguments/attacks from transformed framework (already prepared)
349
- arguments = [str(arg) for arg in sorted(fw_plus.arguments, key=str)]
350
- attacks = [str(att) for att in sorted(fw_plus.attacks, key=str)]
351
-
352
- # 6) Build response
353
- resp = ABAApiResponseModel(
354
- meta=MetaInfo(
355
- request_id=f"req-{datetime.utcnow().timestamp()}",
356
- timestamp=datetime.utcnow().isoformat(),
357
- transformed=any(t.applied for t in transformations),
358
- transformations_applied=[
359
- t.step for t in transformations if t.applied],
360
- warnings=warnings,
361
- errors=[],
362
- ),
363
- original_framework=original_snapshot,
364
- transformations=transformations,
365
- final_framework=final_snapshot,
366
- arguments=arguments,
367
- attacks=attacks,
368
- aba_plus=ABAPlusDTO(
369
- assumption_combinations=assumption_sets,
370
- normal_attacks=normal_str,
371
- reverse_attacks=reverse_str,
372
- ),
373
- )
374
- return resp
375
 
376
 
377
  @app.get("/aba-examples")
@@ -390,6 +428,16 @@ def get_aba_example(filename: str):
390
 
391
  # --- Gradual semantics --- #
392
 
 
 
 
 
 
 
 
 
 
 
393
  @app.post("/gradual", response_model=GradualOutput)
394
  def compute_gradual(input_data: GradualInput):
395
  """
@@ -456,3 +504,4 @@ def get_gradual_example(example_name: str):
456
  except json.JSONDecodeError:
457
  raise HTTPException(
458
  status_code=400, detail="Invalid JSON format in example file")
 
 
1
+ import os
2
+
3
+ cache_dir = "/tmp/hf_cache"
4
+ os.environ["TRANSFORMERS_CACHE"] = cache_dir
5
+ os.makedirs(cache_dir, exist_ok=True)
6
+
7
+ from gradual.models import GradualInput, GradualOutput
8
+ # from gradual.computations import compute_gradual_semantics
9
+ from gradual.computations import compute_gradual_space
 
 
10
  from aba.aba_builder import prepare_aba_plus_framework, build_aba_framework_from_text
11
+ from relations.predict_bert import predict_relation
12
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
13
+ from fastapi.responses import FileResponse, StreamingResponse, JSONResponse
14
+ from fastapi.middleware.cors import CORSMiddleware
15
+ from fastapi import FastAPI, UploadFile, File, Form, HTTPException
16
+ import torch
17
+ import pandas as pd
18
+ from pathlib import Path
19
+ import asyncio
20
+ import json
21
+ import io
22
  from aba.models import (
23
  RuleDTO,
24
  FrameworkSnapshot,
 
27
  ABAPlusDTO,
28
  MetaInfo,
29
  )
 
 
 
30
  from copy import deepcopy
31
  from datetime import datetime
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  # -------------------- Config -------------------- #
35
 
 
36
  ABA_EXAMPLES_DIR = Path("./aba/examples")
37
  SAMPLES_DIR = Path("./relations/examples/samples")
38
  GRADUAL_EXAMPLES_DIR = Path("./gradual/examples")
 
118
  return FileResponse(file_path, media_type="text/csv")
119
 
120
 
121
+
122
  # --- ABA --- #
123
 
124
+ def _make_snapshot(fw) -> FrameworkSnapshot:
125
+ return FrameworkSnapshot(
126
+ language=[str(l) for l in sorted(fw.language, key=str)],
127
+ assumptions=[str(a) for a in sorted(fw.assumptions, key=str)],
128
+ rules=[
129
+ RuleDTO(
130
+ id=r.rule_name,
131
+ head=str(r.head),
132
+ body=[str(b) for b in sorted(r.body, key=str)],
133
+ )
134
+ for r in sorted(fw.rules, key=lambda r: r.rule_name)
135
+ ],
136
+ contraries=[
137
+ (str(c.contraried_literal), str(c.contrary_attacker))
138
+ for c in sorted(fw.contraries, key=str)
139
+ ],
140
+ preferences={
141
+ str(k): [str(v) for v in sorted(vals, key=str)]
142
+ for k, vals in (fw.preferences or {}).items()
143
+ } if getattr(fw, "preferences", None) else None,
144
+ )
145
+
146
+
147
+ def _format_set(s) -> str:
148
+ # s may be a Python set/frozenset of Literal or strings.
149
+ try:
150
+ items = sorted([str(x) for x in s], key=str)
151
+ except Exception:
152
+ # fallback if s is already a string like "{a,b}"
153
+ return str(s)
154
+ return "{" + ",".join(items) + "}"
155
+
156
+
157
+ async def _process_aba_framework(
158
+ text: str,
159
+ enable_aba_plus: bool = False,
160
+ ) -> dict:
161
  """
162
+ Core processing logic for ABA frameworks.
163
+
164
+ Args:
165
+ text: The uploaded file content as text
166
+ enable_aba_plus: If True, compute ABA+ elements
167
+
168
  Returns:
169
+ Complete response with before/after snapshots and all computations
 
 
 
 
170
  """
171
+ # === 1. Build original framework ===
 
 
 
172
  base_framework = build_aba_framework_from_text(text)
173
+ base_framework.generate_arguments()
174
+ base_framework.generate_attacks()
175
  original_snapshot = _make_snapshot(base_framework)
176
 
177
+ # --- Classical (argument-level) data ---
178
+ original_arguments = [str(arg) for arg in sorted(base_framework.arguments, key=str)]
179
+ original_attacks = [str(att) for att in sorted(base_framework.attacks, key=str)]
180
+ original_reverse_attacks = []
 
 
181
 
182
+ # === 2. Transform framework ===
183
  transformed_framework = deepcopy(base_framework).transform_aba()
184
+ transformations = _detect_transformations(base_framework, transformed_framework)
185
+
186
+ # --- Initialize containers ---
187
+ original_assumption_sets = []
188
+ final_assumption_sets = []
189
+ original_aba_plus_attacks = []
190
+ final_aba_plus_attacks = []
191
+ original_reverse_attacks = []
192
+ final_reverse_attacks = []
193
+ warnings = []
194
 
195
+ # === 3. ABA+ computations ===
196
+ if enable_aba_plus:
197
+ # --- ABA+ on original framework ---
198
+ fw_plus_original = prepare_aba_plus_framework(deepcopy(base_framework))
199
+ fw_plus_original.generate_arguments()
200
+ fw_plus_original.generate_attacks()
201
+ fw_plus_original.make_aba_plus()
202
+
203
+ original_assumption_sets = sorted(
204
+ [_format_set(s) for s in getattr(fw_plus_original, "assumption_combinations", [])],
205
+ key=lambda x: (len(x), x),
206
+ )
207
+
208
+ original_aba_plus_attacks = [
209
+ f"{_format_set(src)} {_format_set(dst)}"
210
+ for (src, dst) in sorted(
211
+ getattr(fw_plus_original, "normal_attacks", []),
212
+ key=lambda p: (str(p[0]), str(p[1])),
 
 
 
 
 
213
  )
214
+ ]
215
+
216
+ original_reverse_attacks = [
217
+ f"{_format_set(src)} → {_format_set(dst)}"
218
+ for (src, dst) in sorted(
219
+ getattr(fw_plus_original, "reverse_attacks", []),
220
+ key=lambda p: (str(p[0]), str(p[1])),
 
 
221
  )
222
+ ]
223
+
224
+ # --- Ensure transformed framework is consistent before ABA+ ---
225
+ transformed_framework.generate_arguments()
226
+ transformed_framework.generate_attacks()
227
+
228
+ # --- Compute ABA+ on transformed framework ---
229
+ fw_plus_transformed = prepare_aba_plus_framework(deepcopy(transformed_framework))
230
+ fw_plus_transformed.generate_arguments()
231
+ fw_plus_transformed.generate_attacks()
232
+ fw_plus_transformed.make_aba_plus()
233
+
234
+ final_assumption_sets = sorted(
235
+ [_format_set(s) for s in getattr(fw_plus_transformed, "assumption_combinations", [])],
236
+ key=lambda x: (len(x), x),
237
  )
238
 
239
+ # Debug sanity checks
240
+ print("DEBUG: fw_plus_transformed.assumptions =", getattr(fw_plus_transformed, "assumptions", []))
241
+ print("DEBUG: fw_plus_transformed.normal_attacks =", getattr(fw_plus_transformed, "normal_attacks", []))
242
+
243
+ final_aba_plus_attacks = [
244
+ f"{_format_set(src)} → {_format_set(dst)}"
245
+ for (src, dst) in sorted(
246
+ getattr(fw_plus_transformed, "normal_attacks", []),
247
+ key=lambda p: (str(p[0]), str(p[1])),
248
+ )
249
+ ]
250
+
251
+ final_reverse_attacks = [
252
+ f"{_format_set(src)} → {_format_set(dst)}"
253
+ for (src, dst) in sorted(
254
+ getattr(fw_plus_transformed, "reverse_attacks", []),
255
+ key=lambda p: (str(p[0]), str(p[1])),
256
+ )
257
+ ]
258
+
259
+ warnings = _validate_aba_plus_framework(fw_plus_transformed)
260
+ else:
261
+ warnings = _validate_framework(transformed_framework)
262
+
263
+ # === 4. Classical ABA computations (arguments + attacks) ===
264
+ base_framework.generate_arguments()
265
+ base_framework.generate_attacks()
266
+
267
  transformed_framework.generate_arguments()
268
  transformed_framework.generate_attacks()
269
 
270
+ original_arguments = [str(arg) for arg in sorted(base_framework.arguments, key=str)]
271
+ original_arguments_attacks = [str(att) for att in sorted(base_framework.attacks, key=str)]
272
+
273
+ final_arguments = [str(arg) for arg in sorted(transformed_framework.arguments, key=str)]
274
+ final_arguments_attacks = [str(att) for att in sorted(transformed_framework.attacks, key=str)]
275
+
276
+ # === 5. Snapshots ===
277
+ original_snapshot = _make_snapshot(base_framework)
278
  final_snapshot = _make_snapshot(transformed_framework)
279
 
280
+ # === 6. Build response ===
281
+ response = {
282
+ "meta": {
283
+ "request_id": f"req-{datetime.utcnow().timestamp()}",
284
+ "timestamp": datetime.utcnow().isoformat(),
285
+ "transformed": any(t["applied"] for t in [_transform_to_dict(t) for t in transformations]),
286
+ "transformations_applied": [
287
+ t["step"] for t in [_transform_to_dict(t) for t in transformations] if t["applied"]
288
  ],
289
+ "warnings": warnings,
290
+ "errors": [],
291
+ },
292
+ "original_framework": {
293
+ "framework": original_snapshot,
294
+ "arguments": original_arguments,
295
+ "arguments_attacks": original_arguments_attacks,
296
+ "normal_attacks": original_aba_plus_attacks if enable_aba_plus else [],
297
+ "reverse_attacks": original_reverse_attacks if enable_aba_plus else [],
298
+ "assumption_sets": original_assumption_sets if enable_aba_plus else [],
299
+ },
300
+ "transformations": [_transform_to_dict(t) for t in transformations],
301
+ "final_framework": {
302
+ "framework": final_snapshot,
303
+ "arguments": final_arguments,
304
+ "arguments_attacks": final_arguments_attacks,
305
+ "normal_attacks": final_aba_plus_attacks if enable_aba_plus else [],
306
+ "reverse_attacks": final_reverse_attacks if enable_aba_plus else [],
307
+ "assumption_sets": final_assumption_sets if enable_aba_plus else [],
308
+ },
309
+ }
310
 
311
  return response
312
 
313
 
314
+ def _detect_transformations(
315
+ base_framework,
316
+ transformed_framework,
317
+ ) -> list:
318
  """
319
+ Detect and describe which transformations were applied.
 
 
 
 
 
320
  """
321
+ transformations = []
322
+
323
+ if transformed_framework.language == base_framework.language and \
324
+ transformed_framework.rules == base_framework.rules:
325
+ # No transformation needed
326
+ transformations.append({
327
+ "step": "none",
328
+ "applied": False,
329
+ "reason": "The framework was already non-circular and atomic.",
330
+ "description": "No transformation applied.",
331
+ "result_snapshot": None,
332
+ })
333
+ return transformations
334
+
335
+ # Determine transformation type
336
+ was_circular = base_framework.is_aba_circular()
337
+ was_atomic = base_framework.is_aba_atomic()
338
+
339
+ step_name = "non_circular" if was_circular else "atomic"
340
+ reason = "circular dependencies" if was_circular else "non-atomic rules"
341
+
342
+ transformations.append({
343
+ "step": step_name,
344
+ "applied": True,
345
+ "reason": f"The framework contained {reason}.",
346
+ "description": f"Transformed into a {step_name.replace('_', '-')} version.",
347
+ "result_snapshot": _make_snapshot(transformed_framework),
348
+ })
349
+
350
+ return transformations
351
+
352
+
353
+ def _transform_to_dict(t):
354
+ """Convert TransformationStep to dict if needed."""
355
+ if isinstance(t, dict):
356
+ return t
357
+ return {
358
+ "step": t.step,
359
+ "applied": t.applied,
360
+ "reason": t.reason,
361
+ "description": t.description,
362
+ "result_snapshot": t.result_snapshot,
363
+ }
 
 
 
364
 
 
 
 
 
365
 
366
+ def _validate_framework(framework) -> list[str]:
367
+ """
368
+ Validate framework and return any warnings.
369
+ """
370
  warnings = []
371
+
372
+ if hasattr(framework, "preferences") and framework.preferences:
373
+ all_assumptions = {str(a) for a in framework.assumptions}
374
+ pref_keys = {str(k) for k in framework.preferences.keys()}
375
+
376
+ if not pref_keys.issubset(all_assumptions):
377
  warnings.append(
378
+ "Incomplete preference relation: not all assumptions appear in the preference mapping."
379
  )
380
+
381
+ return warnings
382
 
 
 
383
 
384
+ def _validate_aba_plus_framework(framework) -> list[str]:
385
+ """
386
+ Validate ABA+ framework and return any warnings.
387
+ """
388
+ return _validate_framework(framework)
 
389
 
 
 
 
 
 
 
 
390
 
391
+ @app.post("/aba-upload")
392
+ async def aba_upload(file: UploadFile = File(...)):
393
+ """
394
+ Handle classical ABA framework generation.
395
+
396
+ Returns: original & final frameworks with arguments and attacks (no ABA+ data)
397
+ """
398
+ content = await file.read()
399
+ text = content.decode("utf-8")
400
+ return await _process_aba_framework(text, enable_aba_plus=False)
401
+
402
+
403
+ @app.post("/aba-plus-upload")
404
+ async def aba_plus_upload(file: UploadFile = File(...)):
405
+ """
406
+ Handle ABA+ framework generation.
407
+
408
+ Returns: original & final frameworks with arguments, attacks, AND reverse_attacks for both
409
+ """
410
+ content = await file.read()
411
+ text = content.decode("utf-8")
412
+ return await _process_aba_framework(text, enable_aba_plus=True)
 
 
 
 
 
 
 
 
 
 
 
 
413
 
414
 
415
  @app.get("/aba-examples")
 
428
 
429
  # --- Gradual semantics --- #
430
 
431
+ # @app.post("/gradual", response_model=GradualOutput)
432
+ # def compute_gradual(input_data: GradualInput):
433
+ # """API endpoint to compute Weighted h-Categorizer samples and convex hull."""
434
+ # return compute_gradual_semantics(
435
+ # A=input_data.A,
436
+ # R=input_data.R,
437
+ # n_samples=input_data.n_samples,
438
+ # max_iter=input_data.max_iter
439
+ # )
440
+
441
  @app.post("/gradual", response_model=GradualOutput)
442
  def compute_gradual(input_data: GradualInput):
443
  """
 
504
  except json.JSONDecodeError:
505
  raise HTTPException(
506
  status_code=400, detail="Invalid JSON format in example file")
507
+