Az-r-ow commited on
Commit
9f61aa9
·
1 Parent(s): fcb3586

WIP: CamemBERT fine-tuning

Browse files
Files changed (3) hide show
  1. camemBERT_finetuning.ipynb +378 -0
  2. deepl_ner.ipynb +0 -0
  3. requirements.txt +2 -1
camemBERT_finetuning.ipynb ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# CamemBERT fine-tuning\n",
8
+ "\n",
9
+ "Because of dependency conflicts, we will be fine-tuning the model here and then loading it and evaluating in [deepl_ner.ipynb](./deepl_ner.ipynb).\n"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": null,
15
+ "metadata": {},
16
+ "outputs": [
17
+ {
18
+ "name": "stdout",
19
+ "output_type": "stream",
20
+ "text": [
21
+ "Requirement already satisfied: transformers in ./venv/lib/python3.12/site-packages (4.46.3)\n",
22
+ "Requirement already satisfied: filelock in ./venv/lib/python3.12/site-packages (from transformers) (3.16.1)\n",
23
+ "Requirement already satisfied: huggingface-hub<1.0,>=0.23.2 in ./venv/lib/python3.12/site-packages (from transformers) (0.26.3)\n",
24
+ "Requirement already satisfied: numpy>=1.17 in ./venv/lib/python3.12/site-packages (from transformers) (1.26.4)\n",
25
+ "Requirement already satisfied: packaging>=20.0 in ./venv/lib/python3.12/site-packages (from transformers) (24.1)\n",
26
+ "Requirement already satisfied: pyyaml>=5.1 in ./venv/lib/python3.12/site-packages (from transformers) (6.0.2)\n",
27
+ "Requirement already satisfied: regex!=2019.12.17 in ./venv/lib/python3.12/site-packages (from transformers) (2024.9.11)\n",
28
+ "Requirement already satisfied: requests in ./venv/lib/python3.12/site-packages (from transformers) (2.32.3)\n",
29
+ "Requirement already satisfied: tokenizers<0.21,>=0.20 in ./venv/lib/python3.12/site-packages (from transformers) (0.20.3)\n",
30
+ "Requirement already satisfied: safetensors>=0.4.1 in ./venv/lib/python3.12/site-packages (from transformers) (0.4.5)\n",
31
+ "Requirement already satisfied: tqdm>=4.27 in ./venv/lib/python3.12/site-packages (from transformers) (4.66.5)\n",
32
+ "Requirement already satisfied: fsspec>=2023.5.0 in ./venv/lib/python3.12/site-packages (from huggingface-hub<1.0,>=0.23.2->transformers) (2024.10.0)\n",
33
+ "Requirement already satisfied: typing-extensions>=3.7.4.3 in ./venv/lib/python3.12/site-packages (from huggingface-hub<1.0,>=0.23.2->transformers) (4.12.2)\n",
34
+ "Requirement already satisfied: charset-normalizer<4,>=2 in ./venv/lib/python3.12/site-packages (from requests->transformers) (3.4.0)\n",
35
+ "Requirement already satisfied: idna<4,>=2.5 in ./venv/lib/python3.12/site-packages (from requests->transformers) (3.10)\n",
36
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in ./venv/lib/python3.12/site-packages (from requests->transformers) (2.2.3)\n",
37
+ "Requirement already satisfied: certifi>=2017.4.17 in ./venv/lib/python3.12/site-packages (from requests->transformers) (2024.8.30)\n",
38
+ "\n",
39
+ "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.3.1\u001b[0m\n",
40
+ "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
41
+ "Requirement already satisfied: tf_keras in ./venv/lib/python3.12/site-packages (2.18.0)\n",
42
+ "Requirement already satisfied: tensorflow<2.19,>=2.18 in ./venv/lib/python3.12/site-packages (from tf_keras) (2.18.0)\n",
43
+ "Requirement already satisfied: absl-py>=1.0.0 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (2.1.0)\n",
44
+ "Requirement already satisfied: astunparse>=1.6.0 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (1.6.3)\n",
45
+ "Requirement already satisfied: flatbuffers>=24.3.25 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (24.3.25)\n",
46
+ "Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (0.6.0)\n",
47
+ "Requirement already satisfied: google-pasta>=0.1.1 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (0.2.0)\n",
48
+ "Requirement already satisfied: libclang>=13.0.0 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (18.1.1)\n",
49
+ "Requirement already satisfied: opt-einsum>=2.3.2 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (3.4.0)\n",
50
+ "Requirement already satisfied: packaging in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (24.1)\n",
51
+ "Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev,>=3.20.3 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (4.25.5)\n",
52
+ "Requirement already satisfied: requests<3,>=2.21.0 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (2.32.3)\n",
53
+ "Requirement already satisfied: setuptools in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (75.2.0)\n",
54
+ "Requirement already satisfied: six>=1.12.0 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (1.16.0)\n",
55
+ "Requirement already satisfied: termcolor>=1.1.0 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (2.5.0)\n",
56
+ "Requirement already satisfied: typing-extensions>=3.6.6 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (4.12.2)\n",
57
+ "Requirement already satisfied: wrapt>=1.11.0 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (1.16.0)\n",
58
+ "Requirement already satisfied: grpcio<2.0,>=1.24.3 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (1.67.0)\n",
59
+ "Requirement already satisfied: tensorboard<2.19,>=2.18 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (2.18.0)\n",
60
+ "Requirement already satisfied: keras>=3.5.0 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (3.7.0)\n",
61
+ "Requirement already satisfied: numpy<2.1.0,>=1.26.0 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (1.26.4)\n",
62
+ "Requirement already satisfied: h5py>=3.11.0 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (3.12.1)\n",
63
+ "Requirement already satisfied: ml-dtypes<0.5.0,>=0.4.0 in ./venv/lib/python3.12/site-packages (from tensorflow<2.19,>=2.18->tf_keras) (0.4.1)\n",
64
+ "Requirement already satisfied: wheel<1.0,>=0.23.0 in ./venv/lib/python3.12/site-packages (from astunparse>=1.6.0->tensorflow<2.19,>=2.18->tf_keras) (0.44.0)\n",
65
+ "Requirement already satisfied: rich in ./venv/lib/python3.12/site-packages (from keras>=3.5.0->tensorflow<2.19,>=2.18->tf_keras) (13.9.2)\n",
66
+ "Requirement already satisfied: namex in ./venv/lib/python3.12/site-packages (from keras>=3.5.0->tensorflow<2.19,>=2.18->tf_keras) (0.0.8)\n",
67
+ "Requirement already satisfied: optree in ./venv/lib/python3.12/site-packages (from keras>=3.5.0->tensorflow<2.19,>=2.18->tf_keras) (0.13.0)\n",
68
+ "Requirement already satisfied: charset-normalizer<4,>=2 in ./venv/lib/python3.12/site-packages (from requests<3,>=2.21.0->tensorflow<2.19,>=2.18->tf_keras) (3.4.0)\n",
69
+ "Requirement already satisfied: idna<4,>=2.5 in ./venv/lib/python3.12/site-packages (from requests<3,>=2.21.0->tensorflow<2.19,>=2.18->tf_keras) (3.10)\n",
70
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in ./venv/lib/python3.12/site-packages (from requests<3,>=2.21.0->tensorflow<2.19,>=2.18->tf_keras) (2.2.3)\n",
71
+ "Requirement already satisfied: certifi>=2017.4.17 in ./venv/lib/python3.12/site-packages (from requests<3,>=2.21.0->tensorflow<2.19,>=2.18->tf_keras) (2024.8.30)\n",
72
+ "Requirement already satisfied: markdown>=2.6.8 in ./venv/lib/python3.12/site-packages (from tensorboard<2.19,>=2.18->tensorflow<2.19,>=2.18->tf_keras) (3.7)\n",
73
+ "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in ./venv/lib/python3.12/site-packages (from tensorboard<2.19,>=2.18->tensorflow<2.19,>=2.18->tf_keras) (0.7.2)\n",
74
+ "Requirement already satisfied: werkzeug>=1.0.1 in ./venv/lib/python3.12/site-packages (from tensorboard<2.19,>=2.18->tensorflow<2.19,>=2.18->tf_keras) (3.0.4)\n",
75
+ "Requirement already satisfied: MarkupSafe>=2.1.1 in ./venv/lib/python3.12/site-packages (from werkzeug>=1.0.1->tensorboard<2.19,>=2.18->tensorflow<2.19,>=2.18->tf_keras) (3.0.2)\n",
76
+ "Requirement already satisfied: markdown-it-py>=2.2.0 in ./venv/lib/python3.12/site-packages (from rich->keras>=3.5.0->tensorflow<2.19,>=2.18->tf_keras) (3.0.0)\n",
77
+ "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in ./venv/lib/python3.12/site-packages (from rich->keras>=3.5.0->tensorflow<2.19,>=2.18->tf_keras) (2.18.0)\n",
78
+ "Requirement already satisfied: mdurl~=0.1 in ./venv/lib/python3.12/site-packages (from markdown-it-py>=2.2.0->rich->keras>=3.5.0->tensorflow<2.19,>=2.18->tf_keras) (0.1.2)\n",
79
+ "\n",
80
+ "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.3.1\u001b[0m\n",
81
+ "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
82
+ ]
83
+ }
84
+ ],
85
+ "source": [
86
+ "!pip install --upgrade transformers tf-keras numpy sentencepiece"
87
+ ]
88
+ },
89
+ {
90
+ "cell_type": "code",
91
+ "execution_count": 2,
92
+ "metadata": {},
93
+ "outputs": [],
94
+ "source": [
95
+ "import os\n",
96
+ "\n",
97
+ "os.environ[\"TF_USE_LEGACY_KERAS\"] = \"1\""
98
+ ]
99
+ },
100
+ {
101
+ "cell_type": "code",
102
+ "execution_count": 3,
103
+ "metadata": {},
104
+ "outputs": [],
105
+ "source": [
106
+ "import tensorflow as tf"
107
+ ]
108
+ },
109
+ {
110
+ "cell_type": "code",
111
+ "execution_count": 4,
112
+ "metadata": {},
113
+ "outputs": [
114
+ {
115
+ "name": "stderr",
116
+ "output_type": "stream",
117
+ "text": [
118
+ "[nltk_data] Downloading package punkt_tab to /Users/az-r-\n",
119
+ "[nltk_data] ow/nltk_data...\n",
120
+ "[nltk_data] Package punkt_tab is already up-to-date!\n"
121
+ ]
122
+ }
123
+ ],
124
+ "source": [
125
+ "from app.travel_resolver.libs.nlp import data_processing as dp\n",
126
+ "\n",
127
+ "sentences, labels, vocab, unique_labels = dp.from_bio_file_to_examples(\n",
128
+ " \"./data/bio/fr.bio/10k_train_small_samples.bio\"\n",
129
+ ")\n",
130
+ "\n",
131
+ "lambda_sentences, lambda_labels, _, __ = dp.from_bio_file_to_examples(\n",
132
+ " \"./data/bio/fr.bio/1k_train_unlabeled_samples.bio\"\n",
133
+ ")\n",
134
+ "\n",
135
+ "large_sentences, large_labels, _, __ = dp.from_bio_file_to_examples(\n",
136
+ " \"./data/bio/fr.bio/1k_train_large_samples.bio\"\n",
137
+ ")\n",
138
+ "\n",
139
+ "sentences = sentences + lambda_sentences + large_sentences\n",
140
+ "labels = labels + lambda_labels + large_labels"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": 5,
146
+ "metadata": {},
147
+ "outputs": [],
148
+ "source": [
149
+ "import app.travel_resolver.libs.nlp.data_processing as dp\n",
150
+ "\n",
151
+ "processed_sentences, processed_labels = dp.process_sentences_and_labels(\n",
152
+ " sentences, labels, return_tokens=True, stemming=False\n",
153
+ ")"
154
+ ]
155
+ },
156
+ {
157
+ "cell_type": "code",
158
+ "execution_count": 6,
159
+ "metadata": {},
160
+ "outputs": [],
161
+ "source": [
162
+ "for i in range(len(processed_sentences)):\n",
163
+ " for j in range(len(processed_sentences[i])):\n",
164
+ " if processed_labels[i][j] > 0:\n",
165
+ " processed_sentences[i][j] = processed_sentences[i][j].title()"
166
+ ]
167
+ },
168
+ {
169
+ "cell_type": "code",
170
+ "execution_count": 7,
171
+ "metadata": {},
172
+ "outputs": [],
173
+ "source": [
174
+ "\"\"\"\n",
175
+ " This variable will control the maximum length of the sentence \n",
176
+ " as well as the embedding size\n",
177
+ "\"\"\"\n",
178
+ "\n",
179
+ "MAX_LEN = 100"
180
+ ]
181
+ },
182
+ {
183
+ "cell_type": "code",
184
+ "execution_count": 8,
185
+ "metadata": {},
186
+ "outputs": [],
187
+ "source": [
188
+ "padded_labels = tf.keras.preprocessing.sequence.pad_sequences(\n",
189
+ " processed_labels, maxlen=MAX_LEN, padding=\"post\"\n",
190
+ ")"
191
+ ]
192
+ },
193
+ {
194
+ "cell_type": "code",
195
+ "execution_count": null,
196
+ "metadata": {},
197
+ "outputs": [],
198
+ "source": [
199
+ "from transformers import TFAutoModelForTokenClassification, CamembertTokenizer\n",
200
+ "import numpy as np\n",
201
+ "\n",
202
+ "tokenizer = CamembertTokenizer.from_pretrained(\"camembert-base\")"
203
+ ]
204
+ },
205
+ {
206
+ "cell_type": "code",
207
+ "execution_count": 23,
208
+ "metadata": {},
209
+ "outputs": [],
210
+ "source": [
211
+ "tokenized_sentences = tokenizer(\n",
212
+ " processed_sentences,\n",
213
+ " is_split_into_words=True,\n",
214
+ " truncation=True,\n",
215
+ " padding=\"max_length\",\n",
216
+ " max_length=MAX_LEN,\n",
217
+ ")"
218
+ ]
219
+ },
220
+ {
221
+ "cell_type": "code",
222
+ "execution_count": 24,
223
+ "metadata": {},
224
+ "outputs": [],
225
+ "source": [
226
+ "from sklearn.model_selection import train_test_split\n",
227
+ "\n",
228
+ "(\n",
229
+ " train_input_ids,\n",
230
+ " test_input_ids,\n",
231
+ " train_attention_masks,\n",
232
+ " test_attention_masks,\n",
233
+ " train_labels,\n",
234
+ " test_labels,\n",
235
+ ") = train_test_split(\n",
236
+ " tokenized_sentences[\"input_ids\"],\n",
237
+ " tokenized_sentences[\"attention_mask\"],\n",
238
+ " padded_labels,\n",
239
+ " test_size=0.2,\n",
240
+ ")"
241
+ ]
242
+ },
243
+ {
244
+ "cell_type": "code",
245
+ "execution_count": 26,
246
+ "metadata": {},
247
+ "outputs": [],
248
+ "source": [
249
+ "train_dataset = tf.data.Dataset.from_tensor_slices(\n",
250
+ " (\n",
251
+ " {\n",
252
+ " \"input_ids\": train_input_ids,\n",
253
+ " \"attention_mask\": train_attention_masks,\n",
254
+ " },\n",
255
+ " train_labels,\n",
256
+ " )\n",
257
+ ")\n",
258
+ "\n",
259
+ "test_dataset = tf.data.Dataset.from_tensor_slices(\n",
260
+ " (\n",
261
+ " {\n",
262
+ " \"input_ids\": test_input_ids,\n",
263
+ " \"attention_mask\": test_attention_masks,\n",
264
+ " },\n",
265
+ " test_labels,\n",
266
+ " )\n",
267
+ ")"
268
+ ]
269
+ },
270
+ {
271
+ "cell_type": "code",
272
+ "execution_count": 32,
273
+ "metadata": {},
274
+ "outputs": [
275
+ {
276
+ "name": "stderr",
277
+ "output_type": "stream",
278
+ "text": [
279
+ "All PyTorch model weights were used when initializing TFCamembertForTokenClassification.\n",
280
+ "\n",
281
+ "Some weights or buffers of the TF 2.0 model TFCamembertForTokenClassification were not initialized from the PyTorch model and are newly initialized: ['classifier.weight', 'classifier.bias']\n",
282
+ "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
283
+ "WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy TF-Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`.\n"
284
+ ]
285
+ }
286
+ ],
287
+ "source": [
288
+ "camembert = TFAutoModelForTokenClassification.from_pretrained(\n",
289
+ " \"camembert-base\", num_labels=len(unique_labels)\n",
290
+ ")\n",
291
+ "\n",
292
+ "camembert.compile(\n",
293
+ " optimizer=tf.keras.optimizers.Adam(5e-5),\n",
294
+ " loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n",
295
+ " metrics=[\"accuracy\"],\n",
296
+ ")"
297
+ ]
298
+ },
299
+ {
300
+ "cell_type": "code",
301
+ "execution_count": 33,
302
+ "metadata": {},
303
+ "outputs": [],
304
+ "source": [
305
+ "train_dataset = train_dataset.batch(64)\n",
306
+ "test_dataset = test_dataset.batch(64)"
307
+ ]
308
+ },
309
+ {
310
+ "cell_type": "code",
311
+ "execution_count": 34,
312
+ "metadata": {},
313
+ "outputs": [
314
+ {
315
+ "name": "stdout",
316
+ "output_type": "stream",
317
+ "text": [
318
+ "Epoch 1/3\n",
319
+ "148/148 [==============================] - 1725s 12s/step - loss: 0.2062 - accuracy: 0.9711 - val_loss: 0.0952 - val_accuracy: 0.9873\n",
320
+ "Epoch 2/3\n",
321
+ "148/148 [==============================] - 1782s 12s/step - loss: 0.0681 - accuracy: 0.9922 - val_loss: 0.0442 - val_accuracy: 0.9953\n",
322
+ "Epoch 3/3\n",
323
+ "148/148 [==============================] - 1749s 12s/step - loss: 0.0377 - accuracy: 0.9956 - val_loss: 0.0260 - val_accuracy: 0.9964\n"
324
+ ]
325
+ },
326
+ {
327
+ "data": {
328
+ "text/plain": [
329
+ "<tf_keras.src.callbacks.History at 0x295a015b0>"
330
+ ]
331
+ },
332
+ "execution_count": 34,
333
+ "metadata": {},
334
+ "output_type": "execute_result"
335
+ }
336
+ ],
337
+ "source": [
338
+ "callback = tf.keras.callbacks.EarlyStopping(\n",
339
+ " monitor=\"val_loss\", patience=0, restore_best_weights=True\n",
340
+ ")\n",
341
+ "\n",
342
+ "camembert.fit(\n",
343
+ " train_dataset, validation_data=test_dataset, epochs=3, callbacks=[callback]\n",
344
+ ")"
345
+ ]
346
+ },
347
+ {
348
+ "cell_type": "code",
349
+ "execution_count": null,
350
+ "metadata": {},
351
+ "outputs": [],
352
+ "source": [
353
+ "camembert.save_pretrained(\"./camembert\")"
354
+ ]
355
+ }
356
+ ],
357
+ "metadata": {
358
+ "kernelspec": {
359
+ "display_name": "venv",
360
+ "language": "python",
361
+ "name": "python3"
362
+ },
363
+ "language_info": {
364
+ "codemirror_mode": {
365
+ "name": "ipython",
366
+ "version": 3
367
+ },
368
+ "file_extension": ".py",
369
+ "mimetype": "text/x-python",
370
+ "name": "python",
371
+ "nbconvert_exporter": "python",
372
+ "pygments_lexer": "ipython3",
373
+ "version": "3.12.4"
374
+ }
375
+ },
376
+ "nbformat": 4,
377
+ "nbformat_minor": 2
378
+ }
deepl_ner.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
requirements.txt CHANGED
@@ -12,4 +12,5 @@ scikit-learn
12
  hmmlearn # for hidden markov models
13
  ipykernel
14
  tabulate
15
- transformers
 
 
12
  hmmlearn # for hidden markov models
13
  ipykernel
14
  tabulate
15
+ transformers
16
+ sentencepiece