Corin1998 commited on
Commit
e1db6bc
·
verified ·
1 Parent(s): 4b0ddf0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -30
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import json
3
  import hashlib
4
  import gradio as gr
@@ -11,31 +12,26 @@ from pipelines.openai_ingest import (
11
  from pipelines.parsing import normalize_resume
12
  from pipelines.merge import merge_normalized_records
13
  from pipelines.skills import extract_skills
 
14
  from pipelines.scoring import compute_quality_score
 
15
  from pipelines.utils import detect_filetype, load_doc_text
16
 
17
- APP_TITLE = "候補者インテーク & レジュメ標準化(安定・軽量版)"
18
 
19
 
20
  def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
21
- """
22
- 軽量安定版:
23
- - OCR/抽出 → 構造化 → 正規化 → 統合 → スキル抽出 → 要約 → スコア
24
- - 匿名化PDF・HF保存など重い処理は全てOFF
25
- 入力: gr.Files(type="filepath") のパス配列
26
- 出力: JSON文字列 / スキルJSON文字列 / スコアJSON文字列 / 要約(3種)
27
- """
28
  if not filepaths:
29
  raise gr.Error("少なくとも1ファイルをアップロードしてください。")
30
 
31
  partial_records = []
32
- merged_plain_texts = []
33
 
 
34
  for path in filepaths:
 
 
35
  fname = os.path.basename(path)
36
- with open(path, "rb") as rf:
37
- raw_bytes = rf.read()
38
-
39
  filetype = detect_filetype(fname, raw_bytes)
40
 
41
  # 1) テキスト抽出
@@ -45,7 +41,7 @@ def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
45
  base_text = load_doc_text(filetype, raw_bytes)
46
  text = extract_text_with_openai(base_text.encode("utf-8"), filename=fname, filetype="txt")
47
 
48
- merged_plain_texts.append(text)
49
 
50
  # 2) 構造化 → 3) 正規化
51
  structured = structure_with_openai(text)
@@ -55,7 +51,6 @@ def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
55
  "certifications": structured.get("certifications_raw", ""),
56
  "skills": ", ".join(structured.get("skills_list", [])),
57
  })
58
-
59
  partial_records.append({
60
  "source": fname,
61
  "text": text,
@@ -63,11 +58,11 @@ def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
63
  "normalized": normalized,
64
  })
65
 
66
- # 4) 統合(複数ファイル→1候補者)
67
  merged = merge_normalized_records([r["normalized"] for r in partial_records])
68
 
69
- # 5) スキル抽出(軽量辞書/正規表現)
70
- merged_text = "\n\n".join(merged_plain_texts)
71
  skills = extract_skills(merged_text, {
72
  "work_experience": merged.get("raw_sections", {}).get("work_experience", ""),
73
  "education": merged.get("raw_sections", {}).get("education", ""),
@@ -75,43 +70,66 @@ def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
75
  "skills": ", ".join(merged.get("skills", [])),
76
  })
77
 
78
- # 6) 品質スコア
 
 
 
 
79
  score = compute_quality_score(merged_text, merged)
80
 
81
- # 7) 要約(300/100/1文)
82
  summaries = summarize_with_openai(merged_text)
83
 
84
- # 8) 構造化出力
 
85
  result_json = {
86
- "candidate_id": candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16],
87
  "files": [os.path.basename(p) for p in filepaths],
88
  "merged": merged,
89
  "skills": skills,
90
  "quality_score": score,
91
  "summaries": summaries,
 
92
  "notes": additional_notes,
93
  }
94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  return (
96
- json.dumps(result_json, ensure_ascii=False, indent=2), # out_json -> Code
97
- json.dumps(skills, ensure_ascii=False, indent=2), # out_skills -> Code
98
- json.dumps(score, ensure_ascii=False, indent=2), # out_score -> Code
99
  summaries.get("300chars", ""),
100
  summaries.get("100chars", ""),
101
  summaries.get("onesent", ""),
 
 
102
  )
103
 
104
 
105
  with gr.Blocks(title=APP_TITLE) as demo:
106
- gr.Markdown(f"# {APP_TITLE}\nOpenAIでOCR/構造化/要約→統合→スコア(匿名化・保存なし)")
107
 
108
  with gr.Row():
109
- # ★ Gradio v4 の仕様に合わせて 'filepath' を使用('file' は不可)
110
  in_files = gr.Files(
111
  label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
112
  file_count="multiple",
113
  file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
114
- type="filepath"
115
  )
116
  candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
117
  notes = gr.Textbox(label="補足メモ(任意)", lines=3)
@@ -122,7 +140,7 @@ with gr.Blocks(title=APP_TITLE) as demo:
122
  out_json = gr.Code(label="統合出力 (JSON)")
123
 
124
  with gr.Tab("抽出スキル"):
125
- # ★ GradioのJSONスキーマ推論バグを避けるため Code に統一
126
  out_skills = gr.Code(label="スキル一覧 (JSON)")
127
 
128
  with gr.Tab("品質スコア"):
@@ -133,13 +151,19 @@ with gr.Blocks(title=APP_TITLE) as demo:
133
  out_sum_100 = gr.Textbox(label="100字要約")
134
  out_sum_1 = gr.Textbox(label="1文要約")
135
 
 
 
 
 
 
 
136
  run_btn.click(
137
  process_resumes,
138
  inputs=[in_files, candidate_id, notes],
139
- outputs=[out_json, out_skills, out_score, out_sum_300, out_sum_100, out_sum_1],
140
  )
141
 
142
 
143
  if __name__ == "__main__":
144
- # ローカル到達不可環境でも動くように share=True を明示
145
  demo.launch(share=True, server_name="0.0.0.0", server_port=7860)
 
1
  import os
2
+ import io
3
  import json
4
  import hashlib
5
  import gradio as gr
 
12
  from pipelines.parsing import normalize_resume
13
  from pipelines.merge import merge_normalized_records
14
  from pipelines.skills import extract_skills
15
+ from pipelines.anonymize import anonymize_text, render_anonymized_pdf
16
  from pipelines.scoring import compute_quality_score
17
+ from pipelines.storage import persist_to_hf
18
  from pipelines.utils import detect_filetype, load_doc_text
19
 
20
+ APP_TITLE = "候補者インテーク & レジュメ標準化(OpenAI版)"
21
 
22
 
23
  def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
 
 
 
 
 
 
 
24
  if not filepaths:
25
  raise gr.Error("少なくとも1ファイルをアップロードしてください。")
26
 
27
  partial_records = []
28
+ raw_texts = []
29
 
30
+ # gr.Files(type="filepath") から渡されるのは文字列パス
31
  for path in filepaths:
32
+ with open(path, "rb") as f:
33
+ raw_bytes = f.read()
34
  fname = os.path.basename(path)
 
 
 
35
  filetype = detect_filetype(fname, raw_bytes)
36
 
37
  # 1) テキスト抽出
 
41
  base_text = load_doc_text(filetype, raw_bytes)
42
  text = extract_text_with_openai(base_text.encode("utf-8"), filename=fname, filetype="txt")
43
 
44
+ raw_texts.append({"filename": fname, "text": text})
45
 
46
  # 2) 構造化 → 3) 正規化
47
  structured = structure_with_openai(text)
 
51
  "certifications": structured.get("certifications_raw", ""),
52
  "skills": ", ".join(structured.get("skills_list", [])),
53
  })
 
54
  partial_records.append({
55
  "source": fname,
56
  "text": text,
 
58
  "normalized": normalized,
59
  })
60
 
61
+ # 4) 統合
62
  merged = merge_normalized_records([r["normalized"] for r in partial_records])
63
 
64
+ # 5) スキル抽出
65
+ merged_text = "\n\n".join([r["text"] for r in partial_records])
66
  skills = extract_skills(merged_text, {
67
  "work_experience": merged.get("raw_sections", {}).get("work_experience", ""),
68
  "education": merged.get("raw_sections", {}).get("education", ""),
 
70
  "skills": ", ".join(merged.get("skills", [])),
71
  })
72
 
73
+ # 6) 匿名化(軽量正規表現ベース)
74
+ anonymized_text, anon_map = anonymize_text(merged_text)
75
+ anon_pdf_bytes = render_anonymized_pdf(anonymized_text)
76
+
77
+ # 7) 品質スコア
78
  score = compute_quality_score(merged_text, merged)
79
 
80
+ # 8) 要約
81
  summaries = summarize_with_openai(merged_text)
82
 
83
+ # 9) 出力組み立て
84
+ cid = candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16]
85
  result_json = {
86
+ "candidate_id": cid,
87
  "files": [os.path.basename(p) for p in filepaths],
88
  "merged": merged,
89
  "skills": skills,
90
  "quality_score": score,
91
  "summaries": summaries,
92
+ "anonymization_map": anon_map,
93
  "notes": additional_notes,
94
  }
95
 
96
+ # 10) HF Datasets 保存(任意)
97
+ dataset_repo = os.environ.get("DATASET_REPO")
98
+ commit_info = None
99
+ if dataset_repo:
100
+ commit_info = persist_to_hf(
101
+ dataset_repo=dataset_repo,
102
+ record=result_json,
103
+ anon_pdf_bytes=anon_pdf_bytes,
104
+ parquet_path=f"candidates/{cid}.parquet",
105
+ json_path=f"candidates/{cid}.json",
106
+ pdf_path=f"candidates/{cid}.anon.pdf",
107
+ )
108
+
109
+ anon_pdf = (f"{cid}.anon.pdf", anon_pdf_bytes)
110
+
111
  return (
112
+ json.dumps(result_json, ensure_ascii=False, indent=2),
113
+ json.dumps(skills, ensure_ascii=False, indent=2), # JSONは Code で返す
114
+ json.dumps(score, ensure_ascii=False, indent=2),
115
  summaries.get("300chars", ""),
116
  summaries.get("100chars", ""),
117
  summaries.get("onesent", ""),
118
+ anon_pdf,
119
+ json.dumps(commit_info or {"status": "skipped (DATASET_REPO not set)"}, ensure_ascii=False, indent=2),
120
  )
121
 
122
 
123
  with gr.Blocks(title=APP_TITLE) as demo:
124
+ gr.Markdown(f"# {APP_TITLE}\n複数ファイルを統合→OpenAIで読み込み/構造化/要約→匿名化→Datasets保存")
125
 
126
  with gr.Row():
127
+ # ★ Gradio v4 type="file" 非対応。'filepath' を使用
128
  in_files = gr.Files(
129
  label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
130
  file_count="multiple",
131
  file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
132
+ type="filepath",
133
  )
134
  candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
135
  notes = gr.Textbox(label="補足メモ(任意)", lines=3)
 
140
  out_json = gr.Code(label="統合出力 (JSON)")
141
 
142
  with gr.Tab("抽出スキル"):
143
+ # ★ JSONウィジェットの型推論バグを回避。Codeで安全に表示
144
  out_skills = gr.Code(label="スキル一覧 (JSON)")
145
 
146
  with gr.Tab("品質スコア"):
 
151
  out_sum_100 = gr.Textbox(label="100字要約")
152
  out_sum_1 = gr.Textbox(label="1文要約")
153
 
154
+ with gr.Tab("匿名PDF"):
155
+ out_pdf = gr.File(label="匿名PDFダウンロード")
156
+
157
+ with gr.Tab("Datasets 保存ログ"):
158
+ out_commit = gr.Code(label="コミット情報")
159
+
160
  run_btn.click(
161
  process_resumes,
162
  inputs=[in_files, candidate_id, notes],
163
+ outputs=[out_json, out_skills, out_score, out_sum_300, out_sum_100, out_sum_1, out_pdf, out_commit],
164
  )
165
 
166
 
167
  if __name__ == "__main__":
168
+ # ローカル到達不可環境対策
169
  demo.launch(share=True, server_name="0.0.0.0", server_port=7860)