YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/model-cards#model-card-metadata)
embedding_layer = embedding_layer.to(DEVICE)
transformer_encoder = transformer_encoder.to(DEVICE)
pos_encoding = pos_encoding.to(DEVICE)
output_layer = output_layer.to(DEVICE)

# -----------------------------
# Оптимизатор
# -----------------------------
optimizer = torch.optim.Adam(
    list(embedding_layer.parameters()) +
    list(transformer_encoder.parameters()) +
    list(pos_encoding.parameters()) +
    list(output_layer.parameters()),
    lr=1e-4
)

# -----------------------------
# Загружаем чекпоинт
# -----------------------------
start_epoch = 0
if os.path.exists(CHECKPOINT_PATH):
    checkpoint = torch.load(CHECKPOINT_PATH, map_location=DEVICE)
    embedding_layer.load_state_dict(checkpoint['embedding_state'])
    pos_encoding.load_state_dict(checkpoint['pos_encoding_state'])
    transformer_encoder.load_state_dict(checkpoint['transformer_state'])
    output_layer.load_state_dict(checkpoint['output_state'])
    optimizer.load_state_dict(checkpoint['optimizer_state'])
    start_epoch = checkpoint['epoch'] + 1
    print(f"Модель загружена, продолжаем с эпохи {start_epoch}")
else:
    print("Чекпоинт не найден, начинаем обучение с нуля")

# -----------------------------
# Обучение с отладкой
# -----------------------------
for epoch in range(start_epoch, start_epoch + EPOCHS):
    running_loss = 0.0
    print(f"\n=== Эпоха {epoch + 1}/{start_epoch + EPOCHS} ===")

    for chunk_idx, (input_ids_chunk, attention_mask_chunk, target_ids_chunk) in enumerate(
            chunked_tokenizer(data, tokenizer, max_len=MAX_LEN, chunk_size=CHUNK_SIZE)
    ):
        print(f"\n--- Чанк {chunk_idx + 1} / {len(data) // CHUNK_SIZE + 1} ---")
        dataset = TensorDataset(input_ids_chunk, attention_mask_chunk, target_ids_chunk)
        dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)

        for batch_idx, batch in enumerate(dataloader):
            batch_input_ids, batch_attention_mask, batch_target_ids = [x.to(DEVICE) for x in batch]
            padding_mask = (batch_attention_mask == 0)

            optimizer.zero_grad()

            # Эмбеддинги
            embedded = embedding_layer(batch_input_ids)
            print(f"[DEBUG] embedded shape: {embedded.shape}")  # batch, seq_len, embed_dim

            # Позиционное кодирование
            embedded = embedded.transpose(0, 1)  # seq_len, batch, embed_dim
            embedded = pos_encoding(embedded)
            print(f"[DEBUG] embedded + pos_encoding shape: {embedded.shape}")

            # Трансформер
            transformer_output = transformer_encoder(embedded, src_key_padding_mask=padding_mask)
            transformer_output = transformer_output.transpose(0, 1)  # batch, seq_len, embed_dim
            print(f"[DEBUG] transformer_output shape: {transformer_output.shape}")

            # Память выхода трансформера (примерно)
            batch_size, seq_len, emb_dim = transformer_output.shape
            mem_MB = batch_size * seq_len * emb_dim * 4 / 1024 ** 2
            print(f"[DEBUG] Output memory approx: {mem_MB:.2f} MB")

            # Линейный слой
            logits = output_layer(transformer_output)
            print(f"[DEBUG] logits shape: {logits.shape}")

            # Потери
            loss = criterion(logits.view(-1, vocab_size), batch_target_ids.view(-1))
            loss_history.append(loss.item())
            print(f"[DEBUG] batch {batch_idx + 1} loss: {loss.item():.6f}")

            # Backprop
            loss.backward()
            optimizer.step()

            running_loss += loss.item() * batch_input_ids.size(0)

            # Демонстрация предсказаний
            pred_tokens = torch.argmax(logits, dim=-1)
            sample_input = tokenizer.decode(batch_input_ids[0], skip_special_tokens=True)
            sample_pred = tokenizer.decode(pred_tokens[0], skip_special_tokens=True)
            sample_target = tokenizer.decode(batch_target_ids[0], skip_special_tokens=True)
            print(f"[DEBUG] Sample input:  {sample_input[:50]}...")
            print(f"[DEBUG] Sample target: {sample_target[:50]}...")
            print(f"[DEBUG] Sample pred:   {sample_pred[:50]}...")

            # Очистка памяти
            del batch_input_ids, batch_attention_mask, batch_target_ids, embedded, transformer_output, logits
            torch.cuda.empty_cache()

    avg_loss = running_loss / len(data)
    print(f"\n=== Эпоха {epoch + 1} завершена — Avg Loss: {avg_loss:.6f} ===\n")

# -----------------------------
# Сохраняем чекпоинт
# -----------------------------
torch.save({
    'embedding_state': embedding_layer.state_dict(),
    'pos_encoding_state': pos_encoding.state_dict(),
    'transformer_state': transformer_encoder.state_dict(),
    'output_state': output_layer.state_dict(),
    'optimizer_state': optimizer.state_dict(),
    'epoch': epoch
}, CHECKPOINT_PATH)
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support