Corin1998 commited on
Commit
44dd3d1
·
verified ·
1 Parent(s): 49c1832

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -61
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import os
2
- import io
3
  import json
4
  import hashlib
5
  import gradio as gr
@@ -12,55 +11,64 @@ from pipelines.openai_ingest import (
12
  from pipelines.parsing import normalize_resume
13
  from pipelines.merge import merge_normalized_records
14
  from pipelines.skills import extract_skills
15
- from pipelines.anonymize import anonymize_text, render_anonymized_pdf
16
  from pipelines.scoring import compute_quality_score
17
- from pipelines.storage import persist_to_hf
18
  from pipelines.utils import detect_filetype, load_doc_text
19
 
20
- APP_TITLE = "候補者インテーク & レジュメ標準化(OpenAI版)"
21
 
22
 
23
- def process_resumes(files, candidate_id: str, additional_notes: str = ""):
24
- if not files:
 
 
 
 
 
 
 
 
 
25
  raise gr.Error("少なくとも1ファイルをアップロードしてください。")
26
 
27
  partial_records = []
28
  raw_texts = []
29
 
30
- for f in files:
31
- raw_bytes = f.read()
32
- filetype = detect_filetype(f.name, raw_bytes)
 
 
 
33
 
34
  # 1) テキスト抽出:画像/PDFはOpenAI Vision OCR、docx/txtは生文面+OpenAI整形
35
  if filetype in {"pdf", "image"}:
36
- text = extract_text_with_openai(raw_bytes, filename=f.name, filetype=filetype)
37
  else:
38
  base_text = load_doc_text(filetype, raw_bytes)
39
- # 生テキストをそのままOpenAIへ渡し、軽く整形した全文を返す
40
- text = extract_text_with_openai(base_text.encode("utf-8"), filename=f.name, filetype="txt")
41
 
42
- raw_texts.append({"filename": f.name, "text": text})
43
 
44
- # 2) OpenAIでセクション構造化
45
  structured = structure_with_openai(text)
46
- # 念のためルールベース正規化も適用(期間抽出など補助)
47
  normalized = normalize_resume({
48
  "work_experience": structured.get("work_experience_raw", ""),
49
  "education": structured.get("education_raw", ""),
50
  "certifications": structured.get("certifications_raw", ""),
51
  "skills": ", ".join(structured.get("skills_list", [])),
52
  })
 
53
  partial_records.append({
54
- "source": f.name,
55
  "text": text,
56
  "structured": structured,
57
  "normalized": normalized,
58
  })
59
 
60
- # 3) 統合(複数ファイル→1候補者)
61
  merged = merge_normalized_records([r["normalized"] for r in partial_records])
62
 
63
- # 4) スキル抽出(辞書/正規表現)
64
  merged_text = "\n\n".join([r["text"] for r in partial_records])
65
  skills = extract_skills(merged_text, {
66
  "work_experience": merged.get("raw_sections", {}).get("work_experience", ""),
@@ -69,65 +77,43 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
69
  "skills": ", ".join(merged.get("skills", [])),
70
  })
71
 
72
- # 5) 匿名化
73
- anonymized_text, anon_map = anonymize_text(merged_text)
74
- anon_pdf_bytes = render_anonymized_pdf(anonymized_text)
75
-
76
  # 6) 品質スコア
77
  score = compute_quality_score(merged_text, merged)
78
 
79
  # 7) 要約(300/100/1文)
80
  summaries = summarize_with_openai(merged_text)
81
 
82
- # 8) 構造化出力
83
  result_json = {
84
  "candidate_id": candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16],
85
- "files": [f.name for f in files],
86
  "merged": merged,
87
  "skills": skills,
88
  "quality_score": score,
89
  "summaries": summaries,
90
- "anonymization_map": anon_map,
91
  "notes": additional_notes,
92
  }
93
 
94
- # 9) HF Datasets 保存
95
- dataset_repo = os.environ.get("DATASET_REPO")
96
- commit_info = None
97
- if dataset_repo:
98
- file_hash = result_json["candidate_id"]
99
- commit_info = persist_to_hf(
100
- dataset_repo=dataset_repo,
101
- record=result_json,
102
- anon_pdf_bytes=anon_pdf_bytes,
103
- parquet_path=f"candidates/{file_hash}.parquet",
104
- json_path=f"candidates/{file_hash}.json",
105
- pdf_path=f"candidates/{file_hash}.anon.pdf",
106
- )
107
-
108
- anon_pdf = (result_json["candidate_id"] + ".anon.pdf", anon_pdf_bytes)
109
-
110
  return (
111
- json.dumps(result_json, ensure_ascii=False, indent=2), # out_jsonCodeへ文字列)
112
- json.dumps(skills, ensure_ascii=False, indent=2), # JSON→Code: ここを文字列で返す
113
- json.dumps(score, ensure_ascii=False, indent=2), # out_scoreCodeへ文字列)
114
- summaries["300chars"],
115
- summaries["100chars"],
116
- summaries["onesent"],
117
- anon_pdf,
118
- json.dumps(commit_info or {"status": "skipped (DATASET_REPO not set)"}, ensure_ascii=False, indent=2),
119
  )
120
 
121
 
122
  with gr.Blocks(title=APP_TITLE) as demo:
123
- gr.Markdown(f"# {APP_TITLE}\n複数ファイルを統合→OpenAIで読み込み/構造化/要約→匿名化→Datasets保存")
124
 
125
  with gr.Row():
 
126
  in_files = gr.Files(
127
  label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
128
  file_count="multiple",
129
  file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
130
- type="file"
131
  )
132
  candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
133
  notes = gr.Textbox(label="補足メモ(任意)", lines=3)
@@ -138,30 +124,24 @@ with gr.Blocks(title=APP_TITLE) as demo:
138
  out_json = gr.Code(label="統合出力 (JSON)")
139
 
140
  with gr.Tab("抽出スキル"):
141
- # ★ GradioのJSONスキーマ推論バグ回避のため Code に変更
142
  out_skills = gr.Code(label="スキル一覧 (JSON)")
143
 
144
  with gr.Tab("品質スコア"):
145
- out_score = gr.Code(label="品質評価")
146
 
147
  with gr.Tab("要約 (300/100/1文)"):
148
  out_sum_300 = gr.Textbox(label="300字要約")
149
  out_sum_100 = gr.Textbox(label="100字要約")
150
  out_sum_1 = gr.Textbox(label="1文要約")
151
 
152
- with gr.Tab("匿名PDF"):
153
- out_pdf = gr.File(label="匿名PDFダウンロード")
154
-
155
- with gr.Tab("Datasets 保存ログ"):
156
- out_commit = gr.Code(label="コミット情報")
157
-
158
  run_btn.click(
159
  process_resumes,
160
  inputs=[in_files, candidate_id, notes],
161
- outputs=[out_json, out_skills, out_score, out_sum_300, out_sum_100, out_sum_1, out_pdf, out_commit],
162
  )
163
 
164
 
165
  if __name__ == "__main__":
166
- # ローカル未到達環境での ValueError 回避(Space でも安全)
167
  demo.launch(share=True, server_name="0.0.0.0", server_port=7860)
 
1
  import os
 
2
  import json
3
  import hashlib
4
  import gradio as gr
 
11
  from pipelines.parsing import normalize_resume
12
  from pipelines.merge import merge_normalized_records
13
  from pipelines.skills import extract_skills
 
14
  from pipelines.scoring import compute_quality_score
 
15
  from pipelines.utils import detect_filetype, load_doc_text
16
 
17
+ APP_TITLE = "候補者インテーク & レジュメ標準化(安定版・最小機能)"
18
 
19
 
20
+ def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
21
+ """
22
+ 安定版(最小機能):
23
+ - OCR/抽出 → 構造化 → 正規化 → マージ → スキル抽出 → 要約 → 品質スコア
24
+ - 匿名化PDF生成・HF Datasets保存は一旦オフ
25
+ 入力:
26
+ filepaths: gr.Files(type="filepath") からのパス配列
27
+ 出力:
28
+ JSON文字列 / スキルJSON文字列 / スコアJSON文字列 / 要約(3種)
29
+ """
30
+ if not filepaths:
31
  raise gr.Error("少なくとも1ファイルをアップロードしてください。")
32
 
33
  partial_records = []
34
  raw_texts = []
35
 
36
+ for path in filepaths:
37
+ # パスからバイトとファイル名を取得
38
+ with open(path, "rb") as rf:
39
+ raw_bytes = rf.read()
40
+ fname = os.path.basename(path)
41
+ filetype = detect_filetype(fname, raw_bytes)
42
 
43
  # 1) テキスト抽出:画像/PDFはOpenAI Vision OCR、docx/txtは生文面+OpenAI整形
44
  if filetype in {"pdf", "image"}:
45
+ text = extract_text_with_openai(raw_bytes, filename=fname, filetype=filetype)
46
  else:
47
  base_text = load_doc_text(filetype, raw_bytes)
48
+ text = extract_text_with_openai(base_text.encode("utf-8"), filename=fname, filetype="txt")
 
49
 
50
+ raw_texts.append({"filename": fname, "text": text})
51
 
52
+ # 2) OpenAIでセクション構造化 → 3) 正規化
53
  structured = structure_with_openai(text)
 
54
  normalized = normalize_resume({
55
  "work_experience": structured.get("work_experience_raw", ""),
56
  "education": structured.get("education_raw", ""),
57
  "certifications": structured.get("certifications_raw", ""),
58
  "skills": ", ".join(structured.get("skills_list", [])),
59
  })
60
+
61
  partial_records.append({
62
+ "source": fname,
63
  "text": text,
64
  "structured": structured,
65
  "normalized": normalized,
66
  })
67
 
68
+ # 4) 複数ファイルの統合
69
  merged = merge_normalized_records([r["normalized"] for r in partial_records])
70
 
71
+ # 5) スキル抽出(辞書/正規表現)
72
  merged_text = "\n\n".join([r["text"] for r in partial_records])
73
  skills = extract_skills(merged_text, {
74
  "work_experience": merged.get("raw_sections", {}).get("work_experience", ""),
 
77
  "skills": ", ".join(merged.get("skills", [])),
78
  })
79
 
 
 
 
 
80
  # 6) 品質スコア
81
  score = compute_quality_score(merged_text, merged)
82
 
83
  # 7) 要約(300/100/1文)
84
  summaries = summarize_with_openai(merged_text)
85
 
86
+ # 8) 構造化出力(文字列にして Code に安全に渡す)
87
  result_json = {
88
  "candidate_id": candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16],
89
+ "files": [os.path.basename(p) for p in filepaths],
90
  "merged": merged,
91
  "skills": skills,
92
  "quality_score": score,
93
  "summaries": summaries,
 
94
  "notes": additional_notes,
95
  }
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  return (
98
+ json.dumps(result_json, ensure_ascii=False, indent=2), # out_json -> Code
99
+ json.dumps(skills, ensure_ascii=False, indent=2), # out_skills -> Code
100
+ json.dumps(score, ensure_ascii=False, indent=2), # out_score -> Code
101
+ summaries.get("300chars", ""),
102
+ summaries.get("100chars", ""),
103
+ summaries.get("onesent", ""),
 
 
104
  )
105
 
106
 
107
  with gr.Blocks(title=APP_TITLE) as demo:
108
+ gr.Markdown(f"# {APP_TITLE}\nOpenAIでOCR/構造化/要約→統合→スコア(匿名化・HF保存なしの安定版)")
109
 
110
  with gr.Row():
111
+ # ★ Gradio v4仕様: Filesは type='filepath' or 'binary' のみ
112
  in_files = gr.Files(
113
  label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
114
  file_count="multiple",
115
  file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
116
+ type="filepath"
117
  )
118
  candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
119
  notes = gr.Textbox(label="補足メモ(任意)", lines=3)
 
124
  out_json = gr.Code(label="統合出力 (JSON)")
125
 
126
  with gr.Tab("抽出スキル"):
127
+ # ★ JSONスキーマ推論の例外回避のため Code を使用
128
  out_skills = gr.Code(label="スキル一覧 (JSON)")
129
 
130
  with gr.Tab("品質スコア"):
131
+ out_score = gr.Code(label="品質評価 (JSON)")
132
 
133
  with gr.Tab("要約 (300/100/1文)"):
134
  out_sum_300 = gr.Textbox(label="300字要約")
135
  out_sum_100 = gr.Textbox(label="100字要約")
136
  out_sum_1 = gr.Textbox(label="1文要約")
137
 
 
 
 
 
 
 
138
  run_btn.click(
139
  process_resumes,
140
  inputs=[in_files, candidate_id, notes],
141
+ outputs=[out_json, out_skills, out_score, out_sum_300, out_sum_100, out_sum_1],
142
  )
143
 
144
 
145
  if __name__ == "__main__":
146
+ # localhost 到達不可環境でも動くように share=True を明示
147
  demo.launch(share=True, server_name="0.0.0.0", server_port=7860)