File size: 11,724 Bytes
acd7cf4
 
 
817f16e
3a3b216
acd7cf4
 
 
d2a63cc
43d27f2
d2a63cc
acd7cf4
fb9c306
acd7cf4
3a3b216
acd7cf4
 
3a3b216
0b9d8c7
 
3a3b216
799ac7c
acd7cf4
799ac7c
acd7cf4
3a3b216
fb9c306
 
0b9d8c7
acd7cf4
 
 
fb9c306
acd7cf4
 
 
 
 
 
fb9c306
3a3b216
 
acd7cf4
 
 
 
 
817f16e
 
fb9c306
817f16e
 
 
 
 
 
 
 
 
fb9c306
817f16e
 
fb9c306
 
 
3a3b216
fb9c306
 
acd7cf4
 
 
0b9d8c7
 
acd7cf4
 
 
 
fb9c306
 
 
acd7cf4
 
 
fb9c306
817f16e
bda6eda
acd7cf4
 
3a3b216
817f16e
3a3b216
 
 
 
e4316f1
acd7cf4
3a3b216
 
acd7cf4
0b9d8c7
 
3a3b216
acd7cf4
0b9d8c7
d2a63cc
 
0b9d8c7
 
3a3b216
0b9d8c7
d2a63cc
0b9d8c7
 
 
 
 
 
 
 
 
 
 
 
 
acd7cf4
0b9d8c7
 
 
 
 
 
acd7cf4
0b9d8c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a3b216
0b9d8c7
 
 
 
 
 
acd7cf4
0b9d8c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
acd7cf4
 
 
fb9c306
 
0b9d8c7
fb9c306
 
 
acd7cf4
 
 
 
 
3a3b216
817f16e
fb9c306
817f16e
fb9c306
817f16e
 
fb9c306
 
 
 
 
 
 
 
 
817f16e
fb9c306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a3b216
fb9c306
3a3b216
817f16e
 
 
 
 
 
 
fb9c306
 
 
 
 
 
acd7cf4
817f16e
 
fb9c306
 
 
 
 
 
817f16e
acd7cf4
 
3a3b216
817f16e
 
799ac7c
0b9d8c7
 
 
 
799ac7c
fb9c306
799ac7c
 
2a0edfe
 
 
 
fb9c306
 
799ac7c
 
 
 
 
acd7cf4
 
 
3a3b216
 
acd7cf4
0b9d8c7
fb9c306
acd7cf4
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
import asyncio
import os
import time
from dataclasses import dataclass
from typing import Dict, cast

import gradio as gr

from graphgen.bases.base_storage import StorageNameSpace
from graphgen.bases.datatypes import Chunk
from graphgen.models import (
    JsonKVStorage,
    JsonListStorage,
    NetworkXStorage,
    OpenAIClient,
    Tokenizer,
)
from graphgen.operators import (
    build_mm_kg,
    build_text_kg,
    chunk_documents,
    generate_qas,
    judge_statement,
    partition_kg,
    quiz,
    read_files,
    search_all,
)
from graphgen.utils import async_to_sync_method, compute_mm_hash, logger

sys_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))


@dataclass
class GraphGen:
    unique_id: int = int(time.time())
    working_dir: str = os.path.join(sys_path, "cache")

    # llm
    tokenizer_instance: Tokenizer = None
    synthesizer_llm_client: OpenAIClient = None
    trainee_llm_client: OpenAIClient = None

    # webui
    progress_bar: gr.Progress = None

    def __post_init__(self):
        self.tokenizer_instance: Tokenizer = self.tokenizer_instance or Tokenizer(
            model_name=os.getenv("TOKENIZER_MODEL")
        )

        self.synthesizer_llm_client: OpenAIClient = (
            self.synthesizer_llm_client
            or OpenAIClient(
                model_name=os.getenv("SYNTHESIZER_MODEL"),
                api_key=os.getenv("SYNTHESIZER_API_KEY"),
                base_url=os.getenv("SYNTHESIZER_BASE_URL"),
                tokenizer=self.tokenizer_instance,
            )
        )

        self.trainee_llm_client: OpenAIClient = self.trainee_llm_client or OpenAIClient(
            model_name=os.getenv("TRAINEE_MODEL"),
            api_key=os.getenv("TRAINEE_API_KEY"),
            base_url=os.getenv("TRAINEE_BASE_URL"),
            tokenizer=self.tokenizer_instance,
        )

        self.full_docs_storage: JsonKVStorage = JsonKVStorage(
            self.working_dir, namespace="full_docs"
        )
        self.chunks_storage: JsonKVStorage = JsonKVStorage(
            self.working_dir, namespace="chunks"
        )
        self.graph_storage: NetworkXStorage = NetworkXStorage(
            self.working_dir, namespace="graph"
        )
        self.search_storage: JsonKVStorage = JsonKVStorage(
            self.working_dir, namespace="search"
        )
        self.rephrase_storage: JsonKVStorage = JsonKVStorage(
            self.working_dir, namespace="rephrase"
        )
        self.qa_storage: JsonListStorage = JsonListStorage(
            os.path.join(self.working_dir, "data", "graphgen", f"{self.unique_id}"),
            namespace="qa",
        )

    @async_to_sync_method
    async def insert(self, read_config: Dict, split_config: Dict):
        """
        insert chunks into the graph
        """
        # Step 1: Read files
        data = read_files(read_config["input_file"], self.working_dir)
        if len(data) == 0:
            logger.warning("No data to process")
            return

        assert isinstance(data, list) and isinstance(data[0], dict)

        # TODO: configurable whether to use coreference resolution

        new_docs = {compute_mm_hash(doc, prefix="doc-"): doc for doc in data}
        _add_doc_keys = await self.full_docs_storage.filter_keys(list(new_docs.keys()))
        new_docs = {k: v for k, v in new_docs.items() if k in _add_doc_keys}
        new_text_docs = {k: v for k, v in new_docs.items() if v.get("type") == "text"}
        new_mm_docs = {k: v for k, v in new_docs.items() if v.get("type") != "text"}

        await self.full_docs_storage.upsert(new_docs)

        async def _insert_text_docs(text_docs):
            if len(text_docs) == 0:
                logger.warning("All text docs are already in the storage")
                return
            logger.info("[New Docs] inserting %d text docs", len(text_docs))
            # Step 2.1: Split chunks and filter existing ones
            inserting_chunks = await chunk_documents(
                text_docs,
                split_config["chunk_size"],
                split_config["chunk_overlap"],
                self.tokenizer_instance,
                self.progress_bar,
            )

            _add_chunk_keys = await self.chunks_storage.filter_keys(
                list(inserting_chunks.keys())
            )
            inserting_chunks = {
                k: v for k, v in inserting_chunks.items() if k in _add_chunk_keys
            }

            if len(inserting_chunks) == 0:
                logger.warning("All text chunks are already in the storage")
                return

            logger.info("[New Chunks] inserting %d text chunks", len(inserting_chunks))
            await self.chunks_storage.upsert(inserting_chunks)

            # Step 2.2: Extract entities and relations from text chunks
            logger.info("[Text Entity and Relation Extraction] processing ...")
            _add_entities_and_relations = await build_text_kg(
                llm_client=self.synthesizer_llm_client,
                kg_instance=self.graph_storage,
                chunks=[
                    Chunk(id=k, content=v["content"], type="text")
                    for k, v in inserting_chunks.items()
                ],
                progress_bar=self.progress_bar,
            )
            if not _add_entities_and_relations:
                logger.warning("No entities or relations extracted from text chunks")
                return

            await self._insert_done()
            return _add_entities_and_relations

        async def _insert_multi_modal_docs(mm_docs):
            if len(mm_docs) == 0:
                logger.warning("No multi-modal documents to insert")
                return

            logger.info("[New Docs] inserting %d multi-modal docs", len(mm_docs))

            # Step 3.1: Transform multi-modal documents into chunks and filter existing ones
            inserting_chunks = await chunk_documents(
                mm_docs,
                split_config["chunk_size"],
                split_config["chunk_overlap"],
                self.tokenizer_instance,
                self.progress_bar,
            )

            _add_chunk_keys = await self.chunks_storage.filter_keys(
                list(inserting_chunks.keys())
            )
            inserting_chunks = {
                k: v for k, v in inserting_chunks.items() if k in _add_chunk_keys
            }

            if len(inserting_chunks) == 0:
                logger.warning("All multi-modal chunks are already in the storage")
                return

            logger.info(
                "[New Chunks] inserting %d multimodal chunks", len(inserting_chunks)
            )
            await self.chunks_storage.upsert(inserting_chunks)

            # Step 3.2: Extract multi-modal entities and relations from chunks
            logger.info("[Multi-modal Entity and Relation Extraction] processing ...")
            _add_entities_and_relations = await build_mm_kg(
                llm_client=self.synthesizer_llm_client,
                kg_instance=self.graph_storage,
                chunks=[Chunk.from_dict(k, v) for k, v in inserting_chunks.items()],
                progress_bar=self.progress_bar,
            )
            if not _add_entities_and_relations:
                logger.warning(
                    "No entities or relations extracted from multi-modal chunks"
                )
                return
            await self._insert_done()
            return _add_entities_and_relations

        # Step 2: Insert text documents
        await _insert_text_docs(new_text_docs)
        # Step 3: Insert multi-modal documents
        await _insert_multi_modal_docs(new_mm_docs)

    async def _insert_done(self):
        tasks = []
        for storage_instance in [
            self.full_docs_storage,
            self.chunks_storage,
            self.graph_storage,
            self.search_storage,
        ]:
            if storage_instance is None:
                continue
            tasks.append(cast(StorageNameSpace, storage_instance).index_done_callback())
        await asyncio.gather(*tasks)

    @async_to_sync_method
    async def search(self, search_config: Dict):
        logger.info(
            "Search is %s", "enabled" if search_config["enabled"] else "disabled"
        )
        if search_config["enabled"]:
            logger.info("[Search] %s ...", ", ".join(search_config["search_types"]))
            all_nodes = await self.graph_storage.get_all_nodes()
            all_nodes_names = [node[0] for node in all_nodes]
            new_search_entities = await self.full_docs_storage.filter_keys(
                all_nodes_names
            )
            logger.info(
                "[Search] Found %d entities to search", len(new_search_entities)
            )
            _add_search_data = await search_all(
                search_types=search_config["search_types"],
                search_entities=new_search_entities,
            )
            if _add_search_data:
                await self.search_storage.upsert(_add_search_data)
                logger.info("[Search] %d entities searched", len(_add_search_data))

                # Format search results for inserting
                search_results = []
                for _, search_data in _add_search_data.items():
                    search_results.extend(
                        [
                            {"content": search_data[key]}
                            for key in list(search_data.keys())
                        ]
                    )
                # TODO: fix insert after search
                await self.insert()

    @async_to_sync_method
    async def quiz_and_judge(self, quiz_and_judge_config: Dict):
        if quiz_and_judge_config is None or not quiz_and_judge_config.get(
            "enabled", False
        ):
            logger.warning("Quiz and Judge is not used in this pipeline.")
            return
        max_samples = quiz_and_judge_config["quiz_samples"]
        await quiz(
            self.synthesizer_llm_client,
            self.graph_storage,
            self.rephrase_storage,
            max_samples,
        )

        # TODO: assert trainee_llm_client is valid before judge
        re_judge = quiz_and_judge_config["re_judge"]
        _update_relations = await judge_statement(
            self.trainee_llm_client,
            self.graph_storage,
            self.rephrase_storage,
            re_judge,
        )
        await self.rephrase_storage.index_done_callback()
        await _update_relations.index_done_callback()

    @async_to_sync_method
    async def generate(self, partition_config: Dict, generate_config: Dict):
        # Step 1: partition the graph
        batches = await partition_kg(
            self.graph_storage,
            self.chunks_storage,
            self.tokenizer_instance,
            partition_config,
        )

        # Step 2: generate QA pairs
        results = await generate_qas(
            self.synthesizer_llm_client,
            batches,
            generate_config,
            progress_bar=self.progress_bar,
        )

        if not results:
            logger.warning("No QA pairs generated")
            return

        # Step 3: store the generated QA pairs
        await self.qa_storage.upsert(results)
        await self.qa_storage.index_done_callback()

    @async_to_sync_method
    async def clear(self):
        await self.full_docs_storage.drop()
        await self.chunks_storage.drop()
        await self.search_storage.drop()
        await self.graph_storage.clear()
        await self.rephrase_storage.drop()
        await self.qa_storage.drop()

        logger.info("All caches are cleared")