Adding datasets
Browse files- .gitignore +2 -1
- hupd.py +10 -9
- json-files-Jan2016.tar +0 -3
- tests/tests.py +179 -0
.gitignore
CHANGED
|
@@ -1 +1,2 @@
|
|
| 1 |
-
tmp
|
|
|
|
|
|
| 1 |
+
tmp
|
| 2 |
+
*.pyc
|
hupd.py
CHANGED
|
@@ -119,7 +119,7 @@ class PatentsConfig(datasets.BuilderConfig):
|
|
| 119 |
class Patents(datasets.GeneratorBasedBuilder):
|
| 120 |
_DESCRIPTION
|
| 121 |
|
| 122 |
-
VERSION = datasets.Version("1.0.
|
| 123 |
|
| 124 |
# This is an example of a dataset with multiple configurations.
|
| 125 |
# If you don't want/need to define several sub-sets in your dataset,
|
|
@@ -129,16 +129,16 @@ class Patents(datasets.GeneratorBasedBuilder):
|
|
| 129 |
PatentsConfig(
|
| 130 |
name="sample",
|
| 131 |
description="Patent data from January 2016, for debugging",
|
| 132 |
-
metadata_url="https://huggingface.co/datasets/HUPD/hupd
|
| 133 |
-
data_url="https://huggingface.co/datasets/HUPD/hupd
|
| 134 |
-
data_dir="
|
| 135 |
),
|
| 136 |
PatentsConfig(
|
| 137 |
name="all",
|
| 138 |
-
description="Patent data from
|
| 139 |
-
metadata_url="https://
|
| 140 |
-
data_url="https://
|
| 141 |
-
data_dir="
|
| 142 |
),
|
| 143 |
]
|
| 144 |
|
|
@@ -274,8 +274,9 @@ class Patents(datasets.GeneratorBasedBuilder):
|
|
| 274 |
for id_, x in enumerate(df.itertuples()):
|
| 275 |
|
| 276 |
# JSON files are named by application number (unique)
|
|
|
|
| 277 |
application_number = x.application_number
|
| 278 |
-
filepath = os.path.join(json_dir, application_number + '.json')
|
| 279 |
try:
|
| 280 |
with open(filepath, 'r') as f:
|
| 281 |
patent = json.load(f)
|
|
|
|
| 119 |
class Patents(datasets.GeneratorBasedBuilder):
|
| 120 |
_DESCRIPTION
|
| 121 |
|
| 122 |
+
VERSION = datasets.Version("1.0.2")
|
| 123 |
|
| 124 |
# This is an example of a dataset with multiple configurations.
|
| 125 |
# If you don't want/need to define several sub-sets in your dataset,
|
|
|
|
| 129 |
PatentsConfig(
|
| 130 |
name="sample",
|
| 131 |
description="Patent data from January 2016, for debugging",
|
| 132 |
+
metadata_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/hupd_metadata_jan16_2022-02-22.feather",
|
| 133 |
+
data_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/data/sample-jan-2016.tar.gz",
|
| 134 |
+
data_dir="data", # this will unpack to data/sample/2016
|
| 135 |
),
|
| 136 |
PatentsConfig(
|
| 137 |
name="all",
|
| 138 |
+
description="Patent data from all years (2004-2018)",
|
| 139 |
+
metadata_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/hupd_metadata_2022-02-22.feather",
|
| 140 |
+
data_url="https://huggingface.co/datasets/HUPD/hupd/resolve/main/data/all-years.tar.gz",
|
| 141 |
+
data_dir="data", # this will unpack to data/{year}
|
| 142 |
),
|
| 143 |
]
|
| 144 |
|
|
|
|
| 274 |
for id_, x in enumerate(df.itertuples()):
|
| 275 |
|
| 276 |
# JSON files are named by application number (unique)
|
| 277 |
+
application_year = str(x.filing_date.year)
|
| 278 |
application_number = x.application_number
|
| 279 |
+
filepath = os.path.join(json_dir, application_year, application_number + '.json')
|
| 280 |
try:
|
| 281 |
with open(filepath, 'r') as f:
|
| 282 |
patent = json.load(f)
|
json-files-Jan2016.tar
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:4a7d7923941e39255112d2b40a40e8dae8579d9150459c1f0599ffe8a4cfb5a5
|
| 3 |
-
size 2024540160
|
|
|
|
|
|
|
|
|
|
|
|
tests/tests.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Dataset loading tests. Run with:
|
| 3 |
+
PYTHONPATH=. pytest tests/tests.py -vvrP
|
| 4 |
+
|
| 5 |
+
Additional notes about pytest:
|
| 6 |
+
- Skip a test with @pytest.mark.skip(reason='skipping')
|
| 7 |
+
- Use `-vvrP` to print stdout
|
| 8 |
+
"""
|
| 9 |
+
import pdb
|
| 10 |
+
import os
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from pprint import pprint
|
| 13 |
+
|
| 14 |
+
import pytest
|
| 15 |
+
import torch
|
| 16 |
+
import torch.nn.functional as F
|
| 17 |
+
import torch.utils.data
|
| 18 |
+
from datasets import load_dataset
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def test_dataset_sample():
|
| 22 |
+
"""Load the sample dataset"""
|
| 23 |
+
root = os.getcwd()
|
| 24 |
+
dataset_dict = load_dataset(
|
| 25 |
+
'hupd.py',
|
| 26 |
+
name='sample',
|
| 27 |
+
data_files=os.path.join(root, "hupd_metadata_jan16_2022-02-22.feather"),
|
| 28 |
+
data_dir=os.path.join(root, "data/sample"),
|
| 29 |
+
uniform_split=True
|
| 30 |
+
)
|
| 31 |
+
for name, dataset in dataset_dict.items():
|
| 32 |
+
print(f'Dataset {name}: {len(dataset)}')
|
| 33 |
+
import pdb; pdb.set_trace()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
if __name__ == '__main__':
|
| 37 |
+
test_dataset_sample()
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# # # ----- Data loading example 1 ------
|
| 41 |
+
# # # To load a dataset from files directly, pass in the
|
| 42 |
+
# # # data_files and data_dir parameters. For example:
|
| 43 |
+
|
| 44 |
+
# # # ----- Data loading example 2 ------
|
| 45 |
+
# # # It is simple to specify an IPCR or CPC label and
|
| 46 |
+
# # # a date range for training/validation. For example:
|
| 47 |
+
# # dataset_dict = load_dataset(
|
| 48 |
+
# # 'patents.py',
|
| 49 |
+
# # data_files="/blob/uspto/data/codebooks/data_link_new.pkl",
|
| 50 |
+
# # data_dir="/blob/uspto/data/distilled",
|
| 51 |
+
# # ipcr_label='G01T', #'G06F',
|
| 52 |
+
# # cpc_label=None,
|
| 53 |
+
# # train_filing_start_date=None,
|
| 54 |
+
# # train_filing_end_date=None,
|
| 55 |
+
# # val_filing_start_date=None,
|
| 56 |
+
# # val_filing_end_date=None,
|
| 57 |
+
# # )
|
| 58 |
+
|
| 59 |
+
# # # ----- Data loading example 3 ------
|
| 60 |
+
# # If you do not specify the data_files and data_dir parameters, the
|
| 61 |
+
# # dataset will be downloaded automatically for you. For example:
|
| 62 |
+
# dataset_dict = load_dataset(
|
| 63 |
+
# 'patents.py',
|
| 64 |
+
# data_dir="/blob/uspto/data/distilled",
|
| 65 |
+
# cache_dir='/blob/data/patents/distilled/distilled/huggingface-dataset/cache',
|
| 66 |
+
# ipcr_label=None, # 'G01T', #'G06F', # cpc_label='G01T',
|
| 67 |
+
# train_filing_start_date='2016-01-01',
|
| 68 |
+
# train_filing_end_date='2016-01-05',
|
| 69 |
+
# val_filing_start_date='2017-01-01',
|
| 70 |
+
# val_filing_end_date='2017-01-05',
|
| 71 |
+
# )
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# def combine_two_sections(tokenizer, dataset, s1, s2, new_tokens):
|
| 75 |
+
|
| 76 |
+
# # Add the seperation token
|
| 77 |
+
# if tokenizer.sep_token != '[SEP]':
|
| 78 |
+
# tokenizer.add_tokens(['[SEP]'], special_tokens=True)
|
| 79 |
+
# tokenizer.sep_token = '[SEP]'
|
| 80 |
+
|
| 81 |
+
# print(f'[OLD] len(tokenizer.vocab) = {len(tokenizer)}')
|
| 82 |
+
# tokenizer.add_tokens(new_tokens + [s1.upper(), 'TITLE', 'YEAR', s2.upper()])
|
| 83 |
+
# print(f'[NEW] len(tokenizer.vocab) = {len(tokenizer)}')
|
| 84 |
+
# dataset = dataset.map(
|
| 85 |
+
# # lambda e: {f'{s1}_{s2}': f'[SEP] {s1.upper()} ' + e[s1 + '_label'][:4] + ' [SEP] ' + e[s2]})
|
| 86 |
+
# lambda e: {f'{s1}_{s2}': f'[SEP] TITLE ' + e['title'] + '. YEAR ' + e['filing_date'][:4] + f'. {s1.upper()} ' + e[s1 + '_label'][:4] + f' [SEP] {s2.upper()} ' + e[s2]})
|
| 87 |
+
# return tokenizer, dataset
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# def convert_ids_to_string(tokenizer, input):
|
| 91 |
+
# return ' '.join(tokenizer.convert_ids_to_tokens(input))
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# conditional = 'ipc'
|
| 95 |
+
# section = 'abstract'
|
| 96 |
+
|
| 97 |
+
# # Print some metadata
|
| 98 |
+
# print('Dataset dictionary contents:')
|
| 99 |
+
# pprint(dataset_dict)
|
| 100 |
+
# print('Dataset dictionary cached to:')
|
| 101 |
+
# pprint(dataset_dict.cache_files)
|
| 102 |
+
# print(f'Train dataset size: {dataset_dict["train"].shape}')
|
| 103 |
+
# print(f'Validation dataset size: {dataset_dict["validation"].shape}')
|
| 104 |
+
|
| 105 |
+
# # Example: preprocess dataset "decision" feature for classification
|
| 106 |
+
# decision_to_str = {
|
| 107 |
+
# 'REJECTED': 0,
|
| 108 |
+
# 'ACCEPTED': 1,
|
| 109 |
+
# 'PENDING': 2,
|
| 110 |
+
# 'CONT-REJECTED': 3,
|
| 111 |
+
# 'CONT-ACCEPTED': 4,
|
| 112 |
+
# 'CONT-PENDING': 5
|
| 113 |
+
# }
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
# def map_decision_to_string(example):
|
| 117 |
+
# # NOTE: returned dict updates the example
|
| 118 |
+
# return {'decision': decision_to_str[example['decision']]}
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
# # Performing the remapping means iterating over the dataset
|
| 122 |
+
# # NOTE: This stores the updated table in a cache file indexed
|
| 123 |
+
# # by the current state and the mapping function
|
| 124 |
+
# train_dataset = dataset_dict['train'].map(map_decision_to_string)
|
| 125 |
+
# print('Processed train dataset cached to: ')
|
| 126 |
+
# pprint(train_dataset.cache_files)
|
| 127 |
+
|
| 128 |
+
# # Example: preprocess dataset "abstract" field using huggingface
|
| 129 |
+
# # tokenizers for classification. We truncate at the max token length.
|
| 130 |
+
# from transformers import AutoTokenizer
|
| 131 |
+
# tokenizer = AutoTokenizer.from_pretrained('roberta-base')
|
| 132 |
+
|
| 133 |
+
# # def map_cpc_label(example):
|
| 134 |
+
# # # NOTE: returned dict updates the example
|
| 135 |
+
# # # print(tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids(example['cpc_label'][:4])))
|
| 136 |
+
# # return {'cpc_label': tokenizer.convert_tokens_to_ids(example['cpc_label'][:4])}
|
| 137 |
+
# # train_dataset = train_dataset.map(map_cpc_label)
|
| 138 |
+
|
| 139 |
+
# if conditional:
|
| 140 |
+
# f = open(f'{conditional}_labels.txt', 'r')
|
| 141 |
+
# new_tokens = f.read().split('\n')
|
| 142 |
+
# tokenizer, train_dataset = combine_two_sections(tokenizer, train_dataset, conditional, section, new_tokens)
|
| 143 |
+
# section = f'{conditional}_{section}'
|
| 144 |
+
|
| 145 |
+
# # We tokenize in batches, so it is actually quite fast
|
| 146 |
+
# print('Tokenizing')
|
| 147 |
+
# train_dataset = train_dataset.map(
|
| 148 |
+
# lambda e: tokenizer((e[section]), truncation=True, padding='max_length'),
|
| 149 |
+
# batched=True)
|
| 150 |
+
# print('Processed train dataset cached to: ')
|
| 151 |
+
# pprint(train_dataset.cache_files)
|
| 152 |
+
# print('Processed train dataset columns: ')
|
| 153 |
+
# pprint(train_dataset.column_names)
|
| 154 |
+
|
| 155 |
+
# # Convert to PyTorch Dataset
|
| 156 |
+
# # NOTE: If you also want to return string columns (as a list), just
|
| 157 |
+
# # pass `output_all_columns=True` to the dataset
|
| 158 |
+
# train_dataset.set_format(type='torch',
|
| 159 |
+
# columns=['input_ids', 'attention_mask', 'decision'])
|
| 160 |
+
|
| 161 |
+
# # Standard PyTorch DataLoader
|
| 162 |
+
# from torch.utils.data import DataLoader
|
| 163 |
+
# train_dataloader = DataLoader(train_dataset, batch_size=16)
|
| 164 |
+
# print('Shapes of items in batch from standard PyTorch DataLoader:')
|
| 165 |
+
# pprint({k: v.shape for k, v in next(iter(train_dataloader)).items()})
|
| 166 |
+
# print('Batch from standard PyTorch DataLoader:')
|
| 167 |
+
# batch = next(iter(train_dataloader))
|
| 168 |
+
# pprint(batch['input_ids'])
|
| 169 |
+
# pprint(batch['decision'])
|
| 170 |
+
|
| 171 |
+
# # Print examples
|
| 172 |
+
# print(convert_ids_to_string(tokenizer, batch['input_ids'][0]))
|
| 173 |
+
# pprint(batch['input_ids'][0][:20])
|
| 174 |
+
# # vocab = batch['input_ids'][0][:20]
|
| 175 |
+
# # for elt in vocab:
|
| 176 |
+
# # print(f'{elt}: {convert_ids_to_string(tokenizer, [elt])}')
|
| 177 |
+
# print(tokenizer.decode(batch['input_ids'][0]))
|
| 178 |
+
|
| 179 |
+
# print('All done')
|