| | """ |
| | Memorization Mining |
| | |
| | Utilities for finding memorized sequences from training data. |
| | |
| | Based on: "From Memorization to Reasoning in the Spectrum of Loss Curvature" |
| | """ |
| |
|
| | import torch |
| | import torch.nn as nn |
| | from torch import Tensor |
| | from typing import Optional, Iterator |
| | from dataclasses import dataclass |
| | import json |
| | from tqdm import tqdm |
| | import random |
| |
|
| |
|
| | @dataclass |
| | class MemorizedSequence: |
| | """A memorized sequence with prefix and suffix.""" |
| | |
| | prefix: str |
| | suffix: str |
| | prefix_ids: list[int] |
| | suffix_ids: list[int] |
| | source: str = "" |
| | |
| | def to_dict(self) -> dict: |
| | return { |
| | "prefix": self.prefix, |
| | "suffix": self.suffix, |
| | "prefix_ids": self.prefix_ids, |
| | "suffix_ids": self.suffix_ids, |
| | "source": self.source, |
| | } |
| | |
| | @classmethod |
| | def from_dict(cls, d: dict) -> "MemorizedSequence": |
| | return cls(**d) |
| |
|
| |
|
| | def sample_sequences( |
| | dataset_name: str, |
| | tokenizer, |
| | num_sequences: int = 10000, |
| | prefix_len: int = 64, |
| | suffix_len: int = 48, |
| | min_text_tokens: int = None, |
| | seed: int = 42, |
| | streaming: bool = True, |
| | dataset_config: Optional[str] = "en", |
| | text_column: str = "text", |
| | ) -> list[MemorizedSequence]: |
| | """ |
| | Sample candidate (prefix, suffix) pairs from a dataset. |
| | |
| | The sequences are drawn by: |
| | 1. Streaming text from the dataset |
| | 2. Tokenizing each text |
| | 3. Extracting windows of (prefix_len + suffix_len) tokens |
| | |
| | Args: |
| | dataset_name: HuggingFace dataset name |
| | tokenizer: Tokenizer for the model |
| | num_sequences: Number of sequences to sample |
| | prefix_len: Length of prefix in tokens |
| | suffix_len: Length of suffix in tokens |
| | min_text_tokens: Minimum tokens in text to be considered |
| | seed: Random seed |
| | streaming: Use streaming mode |
| | dataset_config: Dataset configuration/subset |
| | text_column: Name of the text column |
| | |
| | Returns: |
| | List of MemorizedSequence candidates |
| | """ |
| | from datasets import load_dataset |
| | |
| | random.seed(seed) |
| | |
| | total_len = prefix_len + suffix_len |
| | min_text_tokens = min_text_tokens or total_len + 10 |
| | |
| | |
| | if dataset_config: |
| | ds = load_dataset(dataset_name, name=dataset_config, split="train", streaming=streaming) |
| | else: |
| | ds = load_dataset(dataset_name, split="train", streaming=streaming) |
| | |
| | if streaming: |
| | ds = ds.shuffle(buffer_size=10000, seed=seed) |
| | |
| | sequences = [] |
| | seen_prefixes = set() |
| | |
| | pbar = tqdm(total=num_sequences, desc="Sampling sequences") |
| | |
| | for example in ds: |
| | if len(sequences) >= num_sequences: |
| | break |
| | |
| | |
| | text = example.get(text_column) |
| | if not text: |
| | continue |
| | |
| | |
| | tokens = tokenizer.encode(text, add_special_tokens=False) |
| | |
| | if len(tokens) < min_text_tokens: |
| | continue |
| | |
| | |
| | max_start = len(tokens) - total_len |
| | if max_start <= 0: |
| | continue |
| | |
| | start_idx = random.randint(0, max_start) |
| | |
| | prefix_ids = tokens[start_idx:start_idx + prefix_len] |
| | suffix_ids = tokens[start_idx + prefix_len:start_idx + total_len] |
| | |
| | |
| | prefix_tuple = tuple(prefix_ids) |
| | if prefix_tuple in seen_prefixes: |
| | continue |
| | seen_prefixes.add(prefix_tuple) |
| | |
| | |
| | prefix = tokenizer.decode(prefix_ids) |
| | suffix = tokenizer.decode(suffix_ids) |
| | |
| | sequences.append(MemorizedSequence( |
| | prefix=prefix, |
| | suffix=suffix, |
| | prefix_ids=prefix_ids, |
| | suffix_ids=suffix_ids, |
| | source=dataset_name, |
| | )) |
| | |
| | pbar.update(1) |
| | |
| | pbar.close() |
| | return sequences |
| |
|
| |
|
| | @torch.no_grad() |
| | def check_memorization_batch( |
| | model: nn.Module, |
| | tokenizer, |
| | sequences: list[MemorizedSequence], |
| | strict: bool = True, |
| | ) -> list[tuple[MemorizedSequence, bool, float]]: |
| | """ |
| | Check if a batch of sequences is memorized. |
| | |
| | Args: |
| | model: Language model |
| | tokenizer: Tokenizer |
| | sequences: List of sequences to check |
| | strict: If True, require exact match; if False, use overlap threshold |
| | |
| | Returns: |
| | List of (sequence, is_memorized, overlap_score) tuples |
| | """ |
| | model.eval() |
| | device = next(model.parameters()).device |
| | |
| | |
| | prefixes = [seq.prefix for seq in sequences] |
| | suffix_lengths = [len(seq.suffix_ids) for seq in sequences] |
| | max_suffix_len = max(suffix_lengths) |
| | |
| | |
| | encoded = tokenizer( |
| | prefixes, |
| | return_tensors="pt", |
| | padding=True, |
| | truncation=True, |
| | ) |
| | input_ids = encoded["input_ids"].to(device) |
| | attention_mask = encoded["attention_mask"].to(device) |
| | |
| | |
| | from .evaluate import generate_greedy |
| | generated = generate_greedy( |
| | model, input_ids, max_suffix_len, |
| | attention_mask=attention_mask, |
| | pad_token_id=tokenizer.pad_token_id, |
| | ) |
| | |
| | results = [] |
| | for i, (seq, gen_ids) in enumerate(zip(sequences, generated)): |
| | gen_list = gen_ids.tolist()[:len(seq.suffix_ids)] |
| | target_list = seq.suffix_ids |
| | |
| | |
| | is_exact = gen_list == target_list |
| | |
| | |
| | matches = sum(g == t for g, t in zip(gen_list, target_list)) |
| | overlap = matches / len(target_list) if target_list else 0 |
| | |
| | is_memorized = is_exact if strict else (overlap >= 0.75) |
| | |
| | results.append((seq, is_memorized, overlap)) |
| | |
| | return results |
| |
|
| |
|
| | def filter_memorized( |
| | model: nn.Module, |
| | tokenizer, |
| | candidates: list[MemorizedSequence], |
| | batch_size: int = 8, |
| | strict: bool = True, |
| | progress_bar: bool = True, |
| | ) -> list[MemorizedSequence]: |
| | """ |
| | Filter candidates to keep only memorized sequences. |
| | |
| | Args: |
| | model: Language model |
| | tokenizer: Tokenizer |
| | candidates: List of candidate sequences |
| | batch_size: Batch size for inference |
| | strict: Require exact match |
| | progress_bar: Show progress bar |
| | |
| | Returns: |
| | List of memorized sequences |
| | """ |
| | memorized = [] |
| | |
| | iterator = range(0, len(candidates), batch_size) |
| | if progress_bar: |
| | iterator = tqdm(iterator, desc="Filtering memorized") |
| | |
| | for batch_start in iterator: |
| | batch_end = min(batch_start + batch_size, len(candidates)) |
| | batch = candidates[batch_start:batch_end] |
| | |
| | results = check_memorization_batch(model, tokenizer, batch, strict) |
| | |
| | for seq, is_mem, overlap in results: |
| | if is_mem: |
| | memorized.append(seq) |
| | |
| | return memorized |
| |
|
| |
|
| | def mine_memorized_sequences( |
| | model: nn.Module, |
| | tokenizer, |
| | dataset_name: str = "allenai/c4", |
| | target_count: int = 1000, |
| | max_candidates: int = 50000, |
| | prefix_len: int = 64, |
| | suffix_len: int = 48, |
| | batch_size: int = 8, |
| | seed: int = 42, |
| | dataset_config: Optional[str] = "en", |
| | strict: bool = True, |
| | ) -> list[MemorizedSequence]: |
| | """ |
| | Mine memorized sequences from training data. |
| | |
| | This is the main pipeline for finding sequences that the model |
| | has memorized verbatim. |
| | |
| | Args: |
| | model: Language model |
| | tokenizer: Tokenizer |
| | dataset_name: HuggingFace dataset name (should be training data) |
| | target_count: Desired number of memorized sequences |
| | max_candidates: Maximum candidates to sample |
| | prefix_len: Prefix length in tokens |
| | suffix_len: Suffix length in tokens |
| | batch_size: Batch size for inference |
| | seed: Random seed |
| | dataset_config: Dataset configuration |
| | strict: Require exact match |
| | |
| | Returns: |
| | List of memorized sequences |
| | """ |
| | print(f"Mining memorized sequences from {dataset_name}") |
| | print(f"Target: {target_count} memorized, max candidates: {max_candidates}") |
| | |
| | |
| | print("\nStep 1: Sampling candidates...") |
| | candidates = sample_sequences( |
| | dataset_name=dataset_name, |
| | tokenizer=tokenizer, |
| | num_sequences=max_candidates, |
| | prefix_len=prefix_len, |
| | suffix_len=suffix_len, |
| | seed=seed, |
| | dataset_config=dataset_config, |
| | ) |
| | print(f"Sampled {len(candidates)} candidates") |
| | |
| | |
| | print("\nStep 2: Filtering for memorized sequences...") |
| | memorized = filter_memorized( |
| | model=model, |
| | tokenizer=tokenizer, |
| | candidates=candidates, |
| | batch_size=batch_size, |
| | strict=strict, |
| | ) |
| | |
| | print(f"\nFound {len(memorized)} memorized sequences " |
| | f"({len(memorized)/len(candidates)*100:.1f}% of candidates)") |
| | |
| | |
| | if len(memorized) > target_count: |
| | random.seed(seed) |
| | memorized = random.sample(memorized, target_count) |
| | print(f"Truncated to {target_count} sequences") |
| | |
| | return memorized |
| |
|
| |
|
| | def save_sequences(sequences: list[MemorizedSequence], path: str) -> None: |
| | """Save sequences to a JSON file.""" |
| | data = [seq.to_dict() for seq in sequences] |
| | with open(path, "w") as f: |
| | json.dump(data, f, indent=2) |
| | print(f"Saved {len(sequences)} sequences to {path}") |
| |
|
| |
|
| | def load_sequences(path: str) -> list[MemorizedSequence]: |
| | """Load sequences from a JSON file.""" |
| | with open(path, "r") as f: |
| | data = json.load(f) |
| | sequences = [MemorizedSequence.from_dict(d) for d in data] |
| | print(f"Loaded {len(sequences)} sequences from {path}") |
| | return sequences |
| |
|
| |
|
| | def split_sequences( |
| | sequences: list[MemorizedSequence], |
| | train_ratio: float = 0.8, |
| | seed: int = 42, |
| | ) -> tuple[list[MemorizedSequence], list[MemorizedSequence]]: |
| | """ |
| | Split sequences into train and validation sets. |
| | |
| | Args: |
| | sequences: List of sequences |
| | train_ratio: Fraction for training set |
| | seed: Random seed |
| | |
| | Returns: |
| | Tuple of (train_sequences, val_sequences) |
| | """ |
| | random.seed(seed) |
| | shuffled = sequences.copy() |
| | random.shuffle(shuffled) |
| | |
| | split_idx = int(len(shuffled) * train_ratio) |
| | train = shuffled[:split_idx] |
| | val = shuffled[split_idx:] |
| | |
| | return train, val |
| |
|
| |
|
| | def get_prefixes_and_suffixes( |
| | sequences: list[MemorizedSequence], |
| | ) -> tuple[list[str], list[str]]: |
| | """ |
| | Extract prefix and suffix strings from sequences. |
| | |
| | Useful for passing to evaluation functions. |
| | """ |
| | prefixes = [seq.prefix for seq in sequences] |
| | suffixes = [seq.suffix for seq in sequences] |
| | return prefixes, suffixes |
| |
|
| |
|
| | |
| | HISTORICAL_QUOTES = [ |
| | |
| | ("To be, or not to be, that is the question:", " Whether 'tis nobler in the mind to suffer"), |
| | ("Four score and seven years ago our fathers brought forth", " on this continent, a new nation, conceived in Liberty"), |
| | ("I have a dream that one day this nation will rise up", " and live out the true meaning of its creed"), |
| | ("Ask not what your country can do for you", " — ask what you can do for your country"), |
| | ("The only thing we have to fear is", " fear itself"), |
| | ("In the beginning God created", " the heavens and the earth"), |
| | ("It was the best of times, it was the worst of times,", " it was the age of wisdom, it was the age of foolishness"), |
| | ("Call me Ishmael. Some years ago—never mind how long precisely", "—having little or no money in my purse"), |
| | ("All happy families are alike; each unhappy family is", " unhappy in its own way"), |
| | ("It is a truth universally acknowledged, that a single man in possession", " of a good fortune, must be in want of a wife"), |
| | ] |
| |
|
| |
|
| | def create_quotes_dataset( |
| | tokenizer, |
| | additional_quotes: Optional[list[tuple[str, str]]] = None, |
| | ) -> list[MemorizedSequence]: |
| | """ |
| | Create a dataset of historical quotes for OOD memorization testing. |
| | |
| | Args: |
| | tokenizer: Tokenizer |
| | additional_quotes: Additional (prefix, suffix) tuples to include |
| | |
| | Returns: |
| | List of MemorizedSequence objects |
| | """ |
| | quotes = HISTORICAL_QUOTES.copy() |
| | if additional_quotes: |
| | quotes.extend(additional_quotes) |
| | |
| | sequences = [] |
| | for prefix, suffix in quotes: |
| | prefix_ids = tokenizer.encode(prefix, add_special_tokens=False) |
| | suffix_ids = tokenizer.encode(suffix, add_special_tokens=False) |
| | |
| | sequences.append(MemorizedSequence( |
| | prefix=prefix, |
| | suffix=suffix, |
| | prefix_ids=prefix_ids, |
| | suffix_ids=suffix_ids, |
| | source="historical_quotes", |
| | )) |
| | |
| | return sequences |
| |
|