| | """Tokenization classes for Moss""" |
| |
|
| | import json |
| | import os |
| | import numpy as np |
| | import regex as re |
| |
|
| | from functools import lru_cache |
| | from typing import TYPE_CHECKING, List, Optional, Tuple, Union |
| |
|
| | from transformers.utils import is_tf_available, is_torch_available, logging |
| | from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer |
| |
|
| |
|
| | if TYPE_CHECKING: |
| | if is_torch_available(): |
| | import torch |
| | if is_tf_available(): |
| | import tensorflow as tf |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | VOCAB_FILES_NAMES = { |
| | "vocab_file": "vocab.json", |
| | "merges_file": "merges.txt", |
| | } |
| |
|
| | PRETRAINED_VOCAB_FILES_MAP = { |
| | "vocab_file": { |
| | "fnlp/moss-moon-003-base": "https://huggingface.co/fnlp/moss-moon-003-base/resolve/main/vocab.json", |
| | "fnlp/moss-moon-003-sft": "https://huggingface.co/fnlp/moss-moon-003-sft/resolve/main/vocab.json", |
| | "fnlp/moss-moon-003-sft-plugin": "https://huggingface.co/fnlp/moss-moon-003-sft-plugin/resolve/main/vocab.json", |
| | }, |
| | "merges_file": { |
| | "fnlp/moss-moon-003-base": "https://huggingface.co/fnlp/moss-moon-003-base/resolve/main/merges.txt", |
| | "fnlp/moss-moon-003-sft": "https://huggingface.co/fnlp/moss-moon-003-sft/resolve/main/merges.txt", |
| | "fnlp/moss-moon-003-sft-plugin": "https://huggingface.co/fnlp/moss-moon-003-sft-plugin/resolve/main/merges.txt", |
| | }, |
| | } |
| |
|
| | PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { |
| | "fnlp/moss-moon-003-base": 2048, |
| | "fnlp/moss-moon-003-sft": 2048, |
| | "fnlp/moss-moon-003-sft-plugin": 2048, |
| | } |
| |
|
| |
|
| | @lru_cache() |
| | def bytes_to_unicode(): |
| | """ |
| | Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control |
| | characters the bpe code barfs on. |
| | |
| | The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab |
| | if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for |
| | decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup |
| | tables between utf-8 bytes and unicode strings. |
| | """ |
| | bs = ( |
| | list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) |
| | ) |
| | cs = bs[:] |
| | n = 0 |
| | for b in range(2**8): |
| | if b not in bs: |
| | bs.append(b) |
| | cs.append(2**8 + n) |
| | n += 1 |
| | cs = [chr(n) for n in cs] |
| | return dict(zip(bs, cs)) |
| |
|
| |
|
| | def get_pairs(word): |
| | """ |
| | Return set of symbol pairs in a word. |
| | |
| | Word is represented as tuple of symbols (symbols being variable-length strings). |
| | """ |
| | pairs = set() |
| | prev_char = word[0] |
| | for char in word[1:]: |
| | pairs.add((prev_char, char)) |
| | prev_char = char |
| | return pairs |
| |
|
| |
|
| | class MossTokenizer(PreTrainedTokenizer): |
| | """ |
| | Construct a Moss tokenizer. Based on byte-level Byte-Pair-Encoding. |
| | |
| | This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will |
| | be encoded differently whether it is at the beginning of the sentence (without space) or not: |
| | |
| | You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you |
| | call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. |
| | |
| | <Tip> |
| | |
| | When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). |
| | |
| | </Tip> |
| | |
| | This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to |
| | this superclass for more information regarding those methods. |
| | |
| | Args: |
| | vocab_file (`str`): |
| | Path to the vocabulary file. |
| | merges_file (`str`): |
| | Path to the merges file. |
| | errors (`str`, *optional*, defaults to `"replace"`): |
| | Paradigm to follow when decoding bytes to UTF-8. See |
| | [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. |
| | unk_token (`str`, *optional*, defaults to `<|endoftext|>`): |
| | The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this |
| | token instead. |
| | bos_token (`str`, *optional*, defaults to `<|endoftext|>`): |
| | The beginning of sequence token. |
| | eos_token (`str`, *optional*, defaults to `<|endoftext|>`): |
| | The end of sequence token. |
| | add_prefix_space (`bool`, *optional*, defaults to `False`): |
| | Whether or not to add an initial space to the input. This allows to treat the leading word just as any |
| | other word. (Moss tokenizer detect beginning of words by the preceding space). |
| | """ |
| |
|
| | vocab_files_names = VOCAB_FILES_NAMES |
| | pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP |
| | max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
| | model_input_names = ["input_ids", "attention_mask"] |
| |
|
| | def __init__( |
| | self, |
| | vocab_file, |
| | merges_file, |
| | errors="replace", |
| | unk_token="<|endoftext|>", |
| | bos_token="<|endoftext|>", |
| | eos_token="<eom>", |
| | pad_token=None, |
| | add_prefix_space=False, |
| | add_bos_token=False, |
| | **kwargs, |
| | ): |
| | bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token |
| | eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token |
| | unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token |
| | pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token |
| | super().__init__( |
| | errors=errors, |
| | unk_token=unk_token, |
| | bos_token=bos_token, |
| | eos_token=eos_token, |
| | pad_token=pad_token, |
| | add_prefix_space=add_prefix_space, |
| | add_bos_token=add_bos_token, |
| | **kwargs, |
| | ) |
| | self.add_bos_token = add_bos_token |
| |
|
| | with open(vocab_file, encoding="utf-8") as vocab_handle: |
| | self.encoder = json.load(vocab_handle) |
| | self.decoder = {v: k for k, v in self.encoder.items()} |
| | self.errors = errors |
| | self.byte_encoder = bytes_to_unicode() |
| | self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} |
| | with open(merges_file, encoding="utf-8") as merges_handle: |
| | bpe_merges = merges_handle.read().split("\n")[1:-1] |
| | bpe_merges = [tuple(merge.split()) for merge in bpe_merges] |
| | self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) |
| | self.cache = {} |
| | self.add_prefix_space = add_prefix_space |
| |
|
| | |
| | self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") |
| |
|
| | @property |
| | def vocab_size(self): |
| | return len(self.encoder) |
| |
|
| | def get_vocab(self): |
| | return dict(self.encoder, **self.added_tokens_encoder) |
| |
|
| | def bpe(self, token): |
| | if token in self.cache: |
| | return self.cache[token] |
| | word = tuple(token) |
| | pairs = get_pairs(word) |
| |
|
| | if not pairs: |
| | return token |
| |
|
| | while True: |
| | bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) |
| | if bigram not in self.bpe_ranks: |
| | break |
| | first, second = bigram |
| | new_word = [] |
| | i = 0 |
| | while i < len(word): |
| | try: |
| | j = word.index(first, i) |
| | except ValueError: |
| | new_word.extend(word[i:]) |
| | break |
| | else: |
| | new_word.extend(word[i:j]) |
| | i = j |
| |
|
| | if word[i] == first and i < len(word) - 1 and word[i + 1] == second: |
| | new_word.append(first + second) |
| | i += 2 |
| | else: |
| | new_word.append(word[i]) |
| | i += 1 |
| | new_word = tuple(new_word) |
| | word = new_word |
| | if len(word) == 1: |
| | break |
| | else: |
| | pairs = get_pairs(word) |
| | word = " ".join(word) |
| | self.cache[token] = word |
| | return word |
| |
|
| | def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): |
| | if self.add_bos_token: |
| | bos_token_ids = [self.bos_token_id] |
| | else: |
| | bos_token_ids = [] |
| |
|
| | output = bos_token_ids + token_ids_0 |
| |
|
| | if token_ids_1 is None: |
| | return output |
| |
|
| | return output + bos_token_ids + token_ids_1 |
| |
|
| | def _tokenize(self, text): |
| | """Tokenize a string.""" |
| | bpe_tokens = [] |
| | for token in re.findall(self.pat, text): |
| | token = "".join( |
| | self.byte_encoder[b] for b in token.encode("utf-8") |
| | ) |
| | bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) |
| | return bpe_tokens |
| |
|
| | def _convert_token_to_id(self, token): |
| | """Converts a token (str) in an id using the vocab.""" |
| | return self.encoder.get(token, self.encoder.get(self.unk_token)) |
| |
|
| | def _convert_id_to_token(self, index): |
| | """Converts an index (integer) in a token (str) using the vocab.""" |
| | return self.decoder.get(index) |
| |
|
| | def convert_tokens_to_string(self, tokens): |
| | """Converts a sequence of tokens (string) in a single string.""" |
| | text = "".join(tokens) |
| | text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) |
| | return text |
| |
|
| | def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: |
| | if not os.path.isdir(save_directory): |
| | logger.error(f"Vocabulary path ({save_directory}) should be a directory") |
| | return |
| | vocab_file = os.path.join( |
| | save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] |
| | ) |
| | merge_file = os.path.join( |
| | save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] |
| | ) |
| |
|
| | with open(vocab_file, "w", encoding="utf-8") as f: |
| | f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") |
| |
|
| | index = 0 |
| | with open(merge_file, "w", encoding="utf-8") as writer: |
| | writer.write("#version: 0.2\n") |
| | for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): |
| | if index != token_index: |
| | logger.warning( |
| | f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." |
| | " Please check that the tokenizer is not corrupted!" |
| | ) |
| | index = token_index |
| | writer.write(" ".join(bpe_tokens) + "\n") |
| | index += 1 |
| |
|
| | return vocab_file, merge_file |
| |
|
| | def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): |
| | add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) |
| | if is_split_into_words or add_prefix_space: |
| | text = " " + text |
| | return (text, kwargs) |
| |
|
| | def decode( |
| | self, |
| | token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], |
| | skip_special_tokens: bool = False, |
| | clean_up_tokenization_spaces: bool = None, |
| | truncate_before_pattern: Optional[List[str]] = None, |
| | **kwargs, |
| | ) -> str: |
| | """ |
| | Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special |
| | tokens and clean up tokenization spaces. |
| | |
| | Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. |
| | |
| | Args: |
| | token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): |
| | List of tokenized input ids. Can be obtained using the `__call__` method. |
| | skip_special_tokens (`bool`, *optional*, defaults to `False`): |
| | Whether or not to remove special tokens in the decoding. |
| | clean_up_tokenization_spaces (`bool`, *optional*): |
| | Whether or not to clean up the tokenization spaces. If `None`, will default to |
| | `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). |
| | truncate_before_pattern (`List[str]`, *optional*, defaults to `None`): |
| | A list of regular expression strings that will be used to truncate the returned string. This can be |
| | used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning |
| | of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`. |
| | kwargs (additional keyword arguments, *optional*): |
| | Will be passed to the underlying model specific decode method. |
| | |
| | Returns: |
| | `str`: The decoded sentence. |
| | """ |
| | decoded_text = super()._decode( |
| | token_ids=token_ids, |
| | skip_special_tokens=skip_special_tokens, |
| | clean_up_tokenization_spaces=clean_up_tokenization_spaces, |
| | **kwargs, |
| | ) |
| |
|
| | if truncate_before_pattern is not None and len(truncate_before_pattern) > 0: |
| | decoded_text = self.truncate(decoded_text, truncate_before_pattern) |
| |
|
| | return decoded_text |
| |
|
| | def truncate(self, completion, truncate_before_pattern): |
| | def find_re(string, pattern, start_pos): |
| | m = pattern.search(string, start_pos) |
| | return m.start() if m else -1 |
| |
|
| | terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern] |
| |
|
| | prints = list(re.finditer("^print", completion, re.MULTILINE)) |
| |
|
| | if len(prints) > 1: |
| | completion = completion[: prints[1].start()] |
| |
|
| | defs = list(re.finditer("^def", completion, re.MULTILINE)) |
| |
|
| | if len(defs) > 1: |
| | completion = completion[: defs[1].start()] |
| |
|
| | start_pos = 0 |
| |
|
| | terminals_pos = [ |
| | pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1 |
| | ] |
| |
|
| | if len(terminals_pos) > 0: |
| | return completion[: min(terminals_pos)] |
| | else: |
| | return completion |
| |
|