| | from datasets import Dataset, load_dataset |
| | from transformers import AutoTokenizer |
| |
|
| | tokenizer = AutoTokenizer.from_pretrained('models/RedPajama-INCITE-Instruct-7B') |
| | max_seq = 2048 |
| |
|
| | def make_prompt(code): |
| | return f'Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{code}\n\n### Response:\n' |
| |
|
| |
|
| | def is_not_too_long(data): |
| | encoded = tokenizer.encode(make_prompt(data['content'])) |
| | return len(encoded) < max_seq |
| |
|
| | def deduplicate_dicts(dicts): |
| | seen = {} |
| | result = [] |
| | for d in dicts: |
| | content = d.get('content') |
| | if content not in seen: |
| | seen[content] = True |
| | result.append(d) |
| | return result |
| |
|
| | dataset = load_dataset('json', data_files='ts_parser/ts-chunks.jsonl') |
| |
|
| | data_short = dataset.filter(is_not_too_long) |
| |
|
| | dedup = deduplicate_dicts(data_short['train']) |
| |
|
| | data_short_dedup = Dataset.from_list(dedup) |
| | print(data_short_dedup) |
| |
|
| | data_short_dedup.to_json('typescript-chunks.json') |
| |
|