karths commited on
Commit
e3b0c4a
·
verified ·
1 Parent(s): 4e2e2a1

Upload folder using huggingface_hub

Browse files
Files changed (44) hide show
  1. .gitattributes +2 -0
  2. .ipynb_checkpoints/classification_report-checkpoint.json +1 -0
  3. .ipynb_checkpoints/config-checkpoint.json +28 -0
  4. .ipynb_checkpoints/confusion_matrix-checkpoint.png +0 -0
  5. .ipynb_checkpoints/detailed_confusion_matrix-checkpoint.png +0 -0
  6. .ipynb_checkpoints/fold_results-checkpoint.json +15 -0
  7. .ipynb_checkpoints/metrics-checkpoint.json +1 -0
  8. .ipynb_checkpoints/metrics_all_fold-checkpoint.json +44 -0
  9. .ipynb_checkpoints/metrics_ci_bounds-checkpoint.json +26 -0
  10. .ipynb_checkpoints/metrics_mean-checkpoint.json +8 -0
  11. .ipynb_checkpoints/metrics_std-checkpoint.json +8 -0
  12. .ipynb_checkpoints/metrics_visualisation-checkpoint.png +0 -0
  13. .ipynb_checkpoints/precision_recall_curve-checkpoint.png +0 -0
  14. .ipynb_checkpoints/roc_curve-checkpoint.png +0 -0
  15. .ipynb_checkpoints/test_top_repo_data-checkpoint.csv +0 -0
  16. .ipynb_checkpoints/top_repo_data-checkpoint.csv +0 -0
  17. .ipynb_checkpoints/tracker_carbon_statistics-checkpoint.json +1 -0
  18. checkpoint-5810/config.json +28 -0
  19. checkpoint-5810/model.safetensors +3 -0
  20. checkpoint-5810/optimizer.pt +3 -0
  21. checkpoint-5810/rng_state.pth +3 -0
  22. checkpoint-5810/scheduler.pt +3 -0
  23. checkpoint-5810/trainer_state.json +85 -0
  24. checkpoint-5810/training_args.bin +3 -0
  25. classification_report.json +1 -0
  26. config.json +28 -0
  27. confusion_matrix.png +0 -0
  28. detailed_confusion_matrix.png +0 -0
  29. fold_results.json +67 -0
  30. metrics.json +1 -0
  31. metrics_all_fold.json +44 -0
  32. metrics_ci_bounds.json +26 -0
  33. metrics_mean.json +8 -0
  34. metrics_std.json +8 -0
  35. metrics_visualisation.png +0 -0
  36. model.safetensors +3 -0
  37. precision_recall_curve.png +0 -0
  38. reduced_main_data.csv +3 -0
  39. roc_curve.png +0 -0
  40. test_data_for_future_evaluation.csv +3 -0
  41. test_top_repo_data.csv +0 -0
  42. top_repo_data.csv +0 -0
  43. tracker_carbon_statistics.json +33 -0
  44. training_args.bin +3 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ reduced_main_data.csv filter=lfs diff=lfs merge=lfs -text
37
+ test_data_for_future_evaluation.csv filter=lfs diff=lfs merge=lfs -text
.ipynb_checkpoints/classification_report-checkpoint.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"0": {"precision": 0.9124060839288987, "recall": 0.911737776963926, "f1-score": 0.9120718080234475, "support": 10922}, "1": {"precision": 0.8745118458734704, "recall": 0.8754235079489184, "f1-score": 0.8749674394373534, "support": 7674}, "accuracy": 0.896751989675199, "macro avg": {"precision": 0.8934589649011846, "recall": 0.8935806424564222, "f1-score": 0.8935196237304004, "support": 18596}, "weighted avg": {"precision": 0.8967682917780407, "recall": 0.896751989675199, "f1-score": 0.89675997082568, "support": 18596}}
.ipynb_checkpoints/config-checkpoint.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilroberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 6,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "problem_type": "single_label_classification",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.35.0",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 50265
28
+ }
.ipynb_checkpoints/confusion_matrix-checkpoint.png ADDED
.ipynb_checkpoints/detailed_confusion_matrix-checkpoint.png ADDED
.ipynb_checkpoints/fold_results-checkpoint.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "eval_loss": 0.37736985087394714,
4
+ "eval_precision": 0.8745118458734704,
5
+ "eval_recall": 0.8754235079489184,
6
+ "eval_acc": 0.896751989675199,
7
+ "eval_mcc": 0.7870395979518365,
8
+ "eval_f1": 0.8749674394373534,
9
+ "eval_auc": 0.9571703135608877,
10
+ "eval_runtime": 39.4159,
11
+ "eval_samples_per_second": 471.789,
12
+ "eval_steps_per_second": 7.383,
13
+ "epoch": 5.0
14
+ }
15
+ ]
.ipynb_checkpoints/metrics-checkpoint.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"precision": 0.8745118458734704, "recall": 0.8754235079489184, "acc": 0.896751989675199, "mcc": 0.7870395979518365, "f1": 0.8749674394373534, "auc": 0.9571703135608877}
.ipynb_checkpoints/metrics_all_fold-checkpoint.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "precision": [
3
+ 0.7672209026128266,
4
+ 0.7968970380818053,
5
+ 0.8137651821862348,
6
+ 0.8172959805115713,
7
+ 0.7978208232445521
8
+ ],
9
+ "recall": [
10
+ 0.8197969543147208,
11
+ 0.7356770833333334,
12
+ 0.7831168831168831,
13
+ 0.8569604086845466,
14
+ 0.8448717948717949
15
+ ],
16
+ "f1": [
17
+ 0.7926380368098158,
18
+ 0.7650643195666892,
19
+ 0.7981469225678359,
20
+ 0.8366583541147132,
21
+ 0.8206724782067248
22
+ ],
23
+ "auc": [
24
+ 0.8918816056140617,
25
+ 0.8943374875992063,
26
+ 0.9120660073871083,
27
+ 0.9301759416381771,
28
+ 0.9334253323836657
29
+ ],
30
+ "acc": [
31
+ 0.8182795698924731,
32
+ 0.8134408602150538,
33
+ 0.8360215053763441,
34
+ 0.8591397849462366,
35
+ 0.8451612903225807
36
+ ],
37
+ "mcc": [
38
+ 0.6323295820087347,
39
+ 0.6121117228192946,
40
+ 0.6605059312974649,
41
+ 0.7135805983763059,
42
+ 0.6855045974862294
43
+ ]
44
+ }
.ipynb_checkpoints/metrics_ci_bounds-checkpoint.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "precision": {
3
+ "ci_lower": 0.7740188215583432,
4
+ "ci_upper": 0.8231811490964529
5
+ },
6
+ "recall": {
7
+ "ci_lower": 0.7467963459284087,
8
+ "ci_upper": 0.8693729038001027
9
+ },
10
+ "f1": {
11
+ "ci_lower": 0.7685491375672109,
12
+ "ci_upper": 0.8367229069391007
13
+ },
14
+ "auc": {
15
+ "ci_lower": 0.8882891810953514,
16
+ "ci_upper": 0.9364653687535361
17
+ },
18
+ "acc": {
19
+ "ci_lower": 0.8109345424292921,
20
+ "ci_upper": 0.8578826618717831
21
+ },
22
+ "mcc": {
23
+ "ci_lower": 0.6104566361569744,
24
+ "ci_upper": 0.7111563366382375
25
+ }
26
+ }
.ipynb_checkpoints/metrics_mean-checkpoint.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "precision": 0.7985999853273981,
3
+ "recall": 0.8080846248642557,
4
+ "f1": 0.8026360222531558,
5
+ "auc": 0.9123772749244438,
6
+ "acc": 0.8344086021505376,
7
+ "mcc": 0.660806486397606
8
+ }
.ipynb_checkpoints/metrics_std-checkpoint.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "precision": 0.019796952963615604,
3
+ "recall": 0.04935979381258435,
4
+ "f1": 0.027452583577566983,
5
+ "auc": 0.019399848802847657,
6
+ "acc": 0.01890532362636263,
7
+ "mcc": 0.04055030210545639
8
+ }
.ipynb_checkpoints/metrics_visualisation-checkpoint.png ADDED
.ipynb_checkpoints/precision_recall_curve-checkpoint.png ADDED
.ipynb_checkpoints/roc_curve-checkpoint.png ADDED
.ipynb_checkpoints/test_top_repo_data-checkpoint.csv ADDED
The diff for this file is too large to render. See raw diff
 
.ipynb_checkpoints/top_repo_data-checkpoint.csv ADDED
The diff for this file is too large to render. See raw diff
 
.ipynb_checkpoints/tracker_carbon_statistics-checkpoint.json ADDED
@@ -0,0 +1 @@
 
 
1
+ "{\n \"cloud_provider\": \"\",\n \"cloud_region\": \"\",\n \"codecarbon_version\": \"2.3.4\",\n \"country_iso_code\": \"NOR\",\n \"country_name\": \"Norway\",\n \"cpu_count\": 192,\n \"cpu_energy\": 0.013719349511581939,\n \"cpu_model\": \"AMD EPYC 7642 48-Core Processor\",\n \"cpu_power\": 172.01081495134497,\n \"duration\": 286.1363821029663,\n \"emissions\": 0.0024595888544551997,\n \"emissions_rate\": 8.5958620025122e-06,\n \"energy_consumed\": 0.08927727239401813,\n \"gpu_count\": 4,\n \"gpu_energy\": 0.04554486032474789,\n \"gpu_model\": \"4 x NVIDIA GeForce RTX 3090\",\n \"gpu_power\": 573.9790979684847,\n \"latitude\": 59.955,\n \"longitude\": 10.859,\n \"on_cloud\": \"N\",\n \"os\": \"Linux-4.18.0-513.11.1.el8_9.x86_64-x86_64-with-glibc2.28\",\n \"project_name\": \"codecarbon\",\n \"pue\": 1.0,\n \"python_version\": \"3.10.8\",\n \"ram_energy\": 0.030013062557688305,\n \"ram_power\": 377.6938133239746,\n \"ram_total_size\": 1007.1835021972656,\n \"region\": \"oslo county\",\n \"run_id\": \"178e1f39-10b0-4b1c-bcc4-9ec8a6123a3b\",\n \"timestamp\": \"2024-02-26T12:41:37\",\n \"tracking_mode\": \"machine\"\n}"
checkpoint-5810/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilroberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 6,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "problem_type": "single_label_classification",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.35.0",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 50265
28
+ }
checkpoint-5810/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd02bdc975460faf678d88889d59c4c26d939d75537094da9902c7b0ad705bab
3
+ size 328492280
checkpoint-5810/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:371f130749c5cb8e77ab314939b61a3e364bb227f9e56de04e85b5d9691e0066
3
+ size 657047610
checkpoint-5810/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94d4848b2ac890cff04a20b22c86a283fd6c48cd3f584168346f67ef51b79780
3
+ size 14244
checkpoint-5810/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cb2113eead7342a675a6c16d9f52f3c1e7771465895f8c6da9a67da203d5fee
3
+ size 1064
checkpoint-5810/trainer_state.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.997849462365592,
5
+ "eval_steps": 500,
6
+ "global_step": 5810,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.43,
13
+ "learning_rate": 4.9800000000000004e-05,
14
+ "loss": 0.0405,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.86,
19
+ "learning_rate": 4.892270320001786e-05,
20
+ "loss": 0.0701,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 1.29,
25
+ "learning_rate": 4.5767203472630947e-05,
26
+ "loss": 0.0582,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 1.72,
31
+ "learning_rate": 4.080760885882176e-05,
32
+ "loss": 0.0539,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 2.15,
37
+ "learning_rate": 3.447477078705983e-05,
38
+ "loss": 0.0445,
39
+ "step": 2500
40
+ },
41
+ {
42
+ "epoch": 2.58,
43
+ "learning_rate": 2.731883751379496e-05,
44
+ "loss": 0.0347,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 3.01,
49
+ "learning_rate": 1.9975949771515295e-05,
50
+ "loss": 0.0298,
51
+ "step": 3500
52
+ },
53
+ {
54
+ "epoch": 3.44,
55
+ "learning_rate": 1.3054786108686476e-05,
56
+ "loss": 0.0187,
57
+ "step": 4000
58
+ },
59
+ {
60
+ "epoch": 3.87,
61
+ "learning_rate": 7.171330740426066e-06,
62
+ "loss": 0.0163,
63
+ "step": 4500
64
+ },
65
+ {
66
+ "epoch": 4.3,
67
+ "learning_rate": 2.8366930107707358e-06,
68
+ "loss": 0.0119,
69
+ "step": 5000
70
+ },
71
+ {
72
+ "epoch": 4.73,
73
+ "learning_rate": 4.3016059323062464e-07,
74
+ "loss": 0.0088,
75
+ "step": 5500
76
+ }
77
+ ],
78
+ "logging_steps": 500,
79
+ "max_steps": 5810,
80
+ "num_train_epochs": 5,
81
+ "save_steps": 500,
82
+ "total_flos": 4.924819960273306e+16,
83
+ "trial_name": null,
84
+ "trial_params": null
85
+ }
checkpoint-5810/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07eab3d0ad6c8087afd3b1c16b0379be36034e20421a8ab17a1f15ec2299d578
3
+ size 4664
classification_report.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"0": {"precision": 0.9954283624394258, "recall": 0.9968867319842505, "f1-score": 0.9961570134504529, "support": 10921}, "1": {"precision": 0.9955601984852442, "recall": 0.9934844930935627, "f1-score": 0.9945212627184973, "support": 7674}, "accuracy": 0.9954826566281259, "macro avg": {"precision": 0.995494280462335, "recall": 0.9951856125389066, "f1-score": 0.9953391380844752, "support": 18595}, "weighted avg": {"precision": 0.9954827700659713, "recall": 0.9954826566281259, "f1-score": 0.9954819528902471, "support": 18595}}
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilroberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 6,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "problem_type": "single_label_classification",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.35.0",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 50265
28
+ }
confusion_matrix.png ADDED
detailed_confusion_matrix.png ADDED
fold_results.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "0": {
3
+ "eval_loss": 0.37736985087394714,
4
+ "eval_precision": 0.8745118458734704,
5
+ "eval_recall": 0.8754235079489184,
6
+ "eval_acc": 0.896751989675199,
7
+ "eval_mcc": 0.7870395979518365,
8
+ "eval_f1": 0.8749674394373534,
9
+ "eval_auc": 0.9571703135608877,
10
+ "eval_runtime": 39.8867,
11
+ "eval_samples_per_second": 466.22,
12
+ "eval_steps_per_second": 7.296,
13
+ "epoch": 5.0
14
+ },
15
+ "1": {
16
+ "eval_loss": 0.1398582011461258,
17
+ "eval_precision": 0.956964006259781,
18
+ "eval_recall": 0.9562157935887412,
19
+ "eval_acc": 0.9641858464185846,
20
+ "eval_mcc": 0.9261094175487641,
21
+ "eval_f1": 0.9565897536175206,
22
+ "eval_auc": 0.9927892451971969,
23
+ "eval_runtime": 40.87,
24
+ "eval_samples_per_second": 455.004,
25
+ "eval_steps_per_second": 7.12,
26
+ "epoch": 5.0
27
+ },
28
+ "2": {
29
+ "eval_loss": 0.04346846789121628,
30
+ "eval_precision": 0.9905524209421336,
31
+ "eval_recall": 0.9835830618892508,
32
+ "eval_acc": 0.9893525489352549,
33
+ "eval_mcc": 0.9780304673221634,
34
+ "eval_f1": 0.987055439330544,
35
+ "eval_auc": 0.9991525039020241,
36
+ "eval_runtime": 42.4017,
37
+ "eval_samples_per_second": 438.567,
38
+ "eval_steps_per_second": 6.863,
39
+ "epoch": 5.0
40
+ },
41
+ "3": {
42
+ "eval_loss": 0.027091512456536293,
43
+ "eval_precision": 0.9933402977278663,
44
+ "eval_recall": 0.9911400651465798,
45
+ "eval_acc": 0.9936007743600774,
46
+ "eval_mcc": 0.9867967470365173,
47
+ "eval_f1": 0.992238961716559,
48
+ "eval_auc": 0.9996318123616247,
49
+ "eval_runtime": 43.8711,
50
+ "eval_samples_per_second": 423.878,
51
+ "eval_steps_per_second": 6.633,
52
+ "epoch": 5.0
53
+ },
54
+ "4": {
55
+ "eval_loss": 0.021780893206596375,
56
+ "eval_precision": 0.9955601984852442,
57
+ "eval_recall": 0.9934844930935627,
58
+ "eval_acc": 0.9954826566281259,
59
+ "eval_mcc": 0.9906798449151293,
60
+ "eval_f1": 0.9945212627184973,
61
+ "eval_auc": 0.9997516578239288,
62
+ "eval_runtime": 45.3945,
63
+ "eval_samples_per_second": 409.631,
64
+ "eval_steps_per_second": 6.41,
65
+ "epoch": 5.0
66
+ }
67
+ }
metrics.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"precision": 0.9955601984852442, "recall": 0.9934844930935627, "acc": 0.9954826566281259, "mcc": 0.9906798449151293, "f1": 0.9945212627184973, "auc": 0.9997516578239288}
metrics_all_fold.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "precision": [
3
+ 0.8745118458734704,
4
+ 0.956964006259781,
5
+ 0.9905524209421336,
6
+ 0.9933402977278663,
7
+ 0.9955601984852442
8
+ ],
9
+ "recall": [
10
+ 0.8754235079489184,
11
+ 0.9562157935887412,
12
+ 0.9835830618892508,
13
+ 0.9911400651465798,
14
+ 0.9934844930935627
15
+ ],
16
+ "f1": [
17
+ 0.8749674394373534,
18
+ 0.9565897536175206,
19
+ 0.987055439330544,
20
+ 0.992238961716559,
21
+ 0.9945212627184973
22
+ ],
23
+ "auc": [
24
+ 0.9571703135608877,
25
+ 0.9927892451971969,
26
+ 0.9991525039020241,
27
+ 0.9996318123616247,
28
+ 0.9997516578239288
29
+ ],
30
+ "acc": [
31
+ 0.896751989675199,
32
+ 0.9641858464185846,
33
+ 0.9893525489352549,
34
+ 0.9936007743600774,
35
+ 0.9954826566281259
36
+ ],
37
+ "mcc": [
38
+ 0.7870395979518365,
39
+ 0.9261094175487641,
40
+ 0.9780304673221634,
41
+ 0.9867967470365173,
42
+ 0.9906798449151293
43
+ ]
44
+ }
metrics_ci_bounds.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "precision": {
3
+ "ci_lower": 0.8982578181747759,
4
+ "ci_upper": 1.0261136895406224
5
+ },
6
+ "recall": {
7
+ "ci_lower": 0.898464468781247,
8
+ "ci_upper": 1.0214742998855744
9
+ },
10
+ "f1": {
11
+ "ci_lower": 0.8983752799390791,
12
+ "ci_upper": 1.0237738627891109
13
+ },
14
+ "auc": {
15
+ "ci_lower": 0.9668313223329391,
16
+ "ci_upper": 1.0125668908053258
17
+ },
18
+ "acc": {
19
+ "ci_lower": 0.9160906167435001,
20
+ "ci_upper": 1.0196589096633968
21
+ },
22
+ "mcc": {
23
+ "ci_lower": 0.8269227697248197,
24
+ "ci_upper": 1.0405396601849441
25
+ }
26
+ }
metrics_mean.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "precision": 0.9621857538576991,
3
+ "recall": 0.9599693843334107,
4
+ "f1": 0.9610745713640949,
5
+ "auc": 0.9896991065691324,
6
+ "acc": 0.9678747632034485,
7
+ "mcc": 0.933731214954882
8
+ }
metrics_std.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "precision": 0.05148569643269319,
3
+ "recall": 0.04953426663021465,
4
+ "f1": 0.050496182152111053,
5
+ "auc": 0.018417047018577187,
6
+ "acc": 0.041705442482717556,
7
+ "mcc": 0.08602040921261711
8
+ }
metrics_visualisation.png ADDED
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd02bdc975460faf678d88889d59c4c26d939d75537094da9902c7b0ad705bab
3
+ size 328492280
precision_recall_curve.png ADDED
reduced_main_data.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac536bfffefd15feeeb999cfc144401d6c6c7d7e971b97a47cf35b0992c72eb1
3
+ size 87979025
roc_curve.png ADDED
test_data_for_future_evaluation.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6939b4a1df4b574395d37dce88b09b02d14162b0790175334232151393fdce39
3
+ size 13227360
test_top_repo_data.csv ADDED
The diff for this file is too large to render. See raw diff
 
top_repo_data.csv ADDED
The diff for this file is too large to render. See raw diff
 
tracker_carbon_statistics.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cloud_provider": "",
3
+ "cloud_region": "",
4
+ "codecarbon_version": "2.3.4",
5
+ "country_iso_code": "NOR",
6
+ "country_name": "Norway",
7
+ "cpu_count": 192,
8
+ "cpu_energy": 0.2111595784428054,
9
+ "cpu_model": "AMD EPYC 7642 48-Core Processor",
10
+ "cpu_power": 48.10110546610765,
11
+ "duration": 10616.775909900665,
12
+ "emissions": 0.08403832397985361,
13
+ "emissions_rate": 7.91561625610688e-06,
14
+ "energy_consumed": 3.0503928849311652,
15
+ "gpu_count": 4,
16
+ "gpu_energy": 1.7256333418944583,
17
+ "gpu_model": "4 x NVIDIA GeForce RTX 3090",
18
+ "gpu_power": 619.2809614882538,
19
+ "latitude": 59.955,
20
+ "longitude": 10.859,
21
+ "on_cloud": "N",
22
+ "os": "Linux-4.18.0-513.11.1.el8_9.x86_64-x86_64-with-glibc2.28",
23
+ "project_name": "codecarbon",
24
+ "pue": 1.0,
25
+ "python_version": "3.10.8",
26
+ "ram_energy": 1.1135999645939019,
27
+ "ram_power": 377.6938133239746,
28
+ "ram_total_size": 1007.1835021972656,
29
+ "region": "oslo county",
30
+ "run_id": "96ec545c-3b42-4824-bcc8-97a6bdb13982",
31
+ "timestamp": "2024-02-28T17:52:44",
32
+ "tracking_mode": "machine"
33
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07eab3d0ad6c8087afd3b1c16b0379be36034e20421a8ab17a1f15ec2299d578
3
+ size 4664