Corin1998 commited on
Commit
efd9e64
·
verified ·
1 Parent(s): a7f8bc2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -22
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import os
2
- import io
3
  import json
4
  import hashlib
5
  import gradio as gr
@@ -27,12 +26,15 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
27
  partial_records = []
28
  raw_texts = []
29
 
30
- for p in files: # gr.Files(type="filepath") でパスが来る
31
- raw_bytes = open(p, "rb").read()
32
  fname = os.path.basename(p)
 
 
 
33
  filetype = detect_filetype(fname, raw_bytes)
34
 
35
- # 1) テキスト抽出
36
  if filetype in {"pdf", "image"}:
37
  text = extract_text_with_openai(raw_bytes, filename=fname, filetype=filetype)
38
  else:
@@ -41,7 +43,7 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
41
 
42
  raw_texts.append({"filename": fname, "text": text})
43
 
44
- # 2) 構造化→正規化
45
  structured = structure_with_openai(text)
46
  normalized = normalize_resume({
47
  "work_experience": structured.get("work_experience_raw", ""),
@@ -56,10 +58,10 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
56
  "normalized": normalized,
57
  })
58
 
59
- # 3) 統合
60
  merged = merge_normalized_records([r["normalized"] for r in partial_records])
61
 
62
- # 4) スキル抽出
63
  merged_text = "\n\n".join([r["text"] for r in partial_records])
64
  skills = extract_skills(merged_text, {
65
  "work_experience": merged.get("raw_sections", {}).get("work_experience", ""),
@@ -75,13 +77,12 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
75
  # 6) 品質スコア
76
  score = compute_quality_score(merged_text, merged)
77
 
78
- # 7) 要約
79
  summaries = summarize_with_openai(merged_text)
80
 
81
  # 8) 構造化出力
82
- cid = candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16]
83
  result_json = {
84
- "candidate_id": cid,
85
  "files": [os.path.basename(p) for p in files],
86
  "merged": merged,
87
  "skills": skills,
@@ -91,22 +92,23 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
91
  "notes": additional_notes,
92
  }
93
 
94
- # 9) HF Datasets 保存(任意)
95
  dataset_repo = os.environ.get("DATASET_REPO")
96
  commit_info = None
97
  if dataset_repo:
 
98
  commit_info = persist_to_hf(
99
  dataset_repo=dataset_repo,
100
  record=result_json,
101
  anon_pdf_bytes=anon_pdf_bytes,
102
- parquet_path=f"candidates/{cid}.parquet",
103
- json_path=f"candidates/{cid}.json",
104
- pdf_path=f"candidates/{cid}.anon.pdf",
105
  )
106
 
107
- anon_pdf = (f"{cid}.anon.pdf", anon_pdf_bytes)
108
 
109
- # UI には全て文字列(JSONダンプ)で返す
110
  return (
111
  json.dumps(result_json, ensure_ascii=False, indent=2),
112
  json.dumps(skills, ensure_ascii=False, indent=2),
@@ -127,7 +129,7 @@ with gr.Blocks(title=APP_TITLE) as demo:
127
  label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
128
  file_count="multiple",
129
  file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
130
- type="filepath", # ←重要
131
  )
132
  candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
133
  notes = gr.Textbox(label="補足メモ(任意)", lines=3)
@@ -138,7 +140,7 @@ with gr.Blocks(title=APP_TITLE) as demo:
138
  out_json = gr.Code(label="統合出力 (JSON)")
139
 
140
  with gr.Tab("抽出スキル"):
141
- out_skills = gr.Code(label="スキル一覧 (JSON)") # ← gr.JSON を使わない
142
 
143
  with gr.Tab("品質スコア"):
144
  out_score = gr.Code(label="品質評価 (JSON)")
@@ -152,15 +154,14 @@ with gr.Blocks(title=APP_TITLE) as demo:
152
  out_pdf = gr.File(label="匿名PDFダウンロード")
153
 
154
  with gr.Tab("Datasets 保存ログ"):
155
- out_commit = gr.Code(label="コミット情報 (JSON)")
156
 
157
  run_btn.click(
158
- fn=process_resumes,
159
  inputs=[in_files, candidate_id, notes],
160
  outputs=[out_json, out_skills, out_score, out_sum_300, out_sum_100, out_sum_1, out_pdf, out_commit],
161
- api_name="run",
162
  )
163
 
164
 
165
  if __name__ == "__main__":
166
- demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
 
1
  import os
 
2
  import json
3
  import hashlib
4
  import gradio as gr
 
26
  partial_records = []
27
  raw_texts = []
28
 
29
+ # gr.Files(type="filepath") を前提に、パスで受け取り→自前で read
30
+ for p in files:
31
  fname = os.path.basename(p)
32
+ with open(p, "rb") as fh:
33
+ raw_bytes = fh.read()
34
+
35
  filetype = detect_filetype(fname, raw_bytes)
36
 
37
+ # 1) テキスト抽出:画像/PDFはOpenAI Vision OCR、docx/txtは生文面+OpenAI整形
38
  if filetype in {"pdf", "image"}:
39
  text = extract_text_with_openai(raw_bytes, filename=fname, filetype=filetype)
40
  else:
 
43
 
44
  raw_texts.append({"filename": fname, "text": text})
45
 
46
+ # 2) OpenAIでセクション構造化 → ルールベース正規化も適用
47
  structured = structure_with_openai(text)
48
  normalized = normalize_resume({
49
  "work_experience": structured.get("work_experience_raw", ""),
 
58
  "normalized": normalized,
59
  })
60
 
61
+ # 3) 統合(複数ファイル→1候補者)
62
  merged = merge_normalized_records([r["normalized"] for r in partial_records])
63
 
64
+ # 4) スキル抽出(辞書/正規表現)
65
  merged_text = "\n\n".join([r["text"] for r in partial_records])
66
  skills = extract_skills(merged_text, {
67
  "work_experience": merged.get("raw_sections", {}).get("work_experience", ""),
 
77
  # 6) 品質スコア
78
  score = compute_quality_score(merged_text, merged)
79
 
80
+ # 7) 要約(300/100/1文)
81
  summaries = summarize_with_openai(merged_text)
82
 
83
  # 8) 構造化出力
 
84
  result_json = {
85
+ "candidate_id": candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16],
86
  "files": [os.path.basename(p) for p in files],
87
  "merged": merged,
88
  "skills": skills,
 
92
  "notes": additional_notes,
93
  }
94
 
95
+ # 9) HF Datasets 保存
96
  dataset_repo = os.environ.get("DATASET_REPO")
97
  commit_info = None
98
  if dataset_repo:
99
+ file_hash = result_json["candidate_id"]
100
  commit_info = persist_to_hf(
101
  dataset_repo=dataset_repo,
102
  record=result_json,
103
  anon_pdf_bytes=anon_pdf_bytes,
104
+ parquet_path=f"candidates/{file_hash}.parquet",
105
+ json_path=f"candidates/{file_hash}.json",
106
+ pdf_path=f"candidates/{file_hash}.anon.pdf",
107
  )
108
 
109
+ anon_pdf = (result_json["candidate_id"] + ".anon.pdf", anon_pdf_bytes)
110
 
111
+ # dict を gr.Code で安全表示するため、文字列化して返す
112
  return (
113
  json.dumps(result_json, ensure_ascii=False, indent=2),
114
  json.dumps(skills, ensure_ascii=False, indent=2),
 
129
  label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
130
  file_count="multiple",
131
  file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
132
+ type="filepath", # ← 重要:パスで受け取る
133
  )
134
  candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
135
  notes = gr.Textbox(label="補足メモ(任意)", lines=3)
 
140
  out_json = gr.Code(label="統合出力 (JSON)")
141
 
142
  with gr.Tab("抽出スキル"):
143
+ out_skills = gr.Code(label="スキル一覧 (JSON)") # ← gr.JSON をやめて文字列表示
144
 
145
  with gr.Tab("品質スコア"):
146
  out_score = gr.Code(label="品質評価 (JSON)")
 
154
  out_pdf = gr.File(label="匿名PDFダウンロード")
155
 
156
  with gr.Tab("Datasets 保存ログ"):
157
+ out_commit = gr.Code(label="コミット情報")
158
 
159
  run_btn.click(
160
+ process_resumes,
161
  inputs=[in_files, candidate_id, notes],
162
  outputs=[out_json, out_skills, out_score, out_sum_300, out_sum_100, out_sum_1, out_pdf, out_commit],
 
163
  )
164
 
165
 
166
  if __name__ == "__main__":
167
+ demo.launch()