distilgpt2-werther-finetuned / generate_werther_text.py
ajsbsd's picture
Upload 4 files
3dacdd0 verified
import os
from transformers import pipeline
model_path = os.path.join(os.getcwd(), "fine_tuned_werther_model")
print(f"Loading fine-tuned model from: {model_path}...")
try:
generator = pipeline("text-generation", model=model_path)
print("Model loaded successfully!")
print("\n--- Generating Text (Adjusted Parameters) ---")
# Example 1: Lower temperature for less repetition, shorter length
prompt1 = "How happy I am that I am gone!"
print(f"\nPrompt: '{prompt1}'")
generated_text1 = generator(
prompt1,
max_new_tokens=60, # Shorter output
num_return_sequences=1,
do_sample=True,
temperature=0.6, # Lower temperature
top_k=50,
top_p=0.9
)
print(f"Generated text: {generated_text1[0]['generated_text']}")
# Example 2: Try slightly different values
prompt2 = "My soul yearns for"
print(f"\nPrompt: '{prompt2}'")
generated_text2 = generator(
prompt2,
max_new_tokens=70,
num_return_sequences=1,
do_sample=True,
temperature=0.7, # Slightly higher than 0.6, lower than 0.9
top_k=40, # Smaller top_k
top_p=0.85 # Slightly lower top_p
)
print(f"Generated text: {generated_text2[0]['generated_text']}")
# Example 3: Experiment with a very low temperature (more deterministic)
prompt3 = "The world seemed to me"
print(f"\nPrompt: '{prompt3}'")
generated_text3 = generator(
prompt3,
max_new_tokens=80,
num_return_sequences=1,
do_sample=True,
temperature=0.5 # Very low temperature
)
print(f"Generated text: {generated_text3[0]['generated_text']}")
except Exception as e:
print(f"\nAn error occurred during text generation: {e}")
print("Please ensure the 'fine_tuned_werther_model' directory exists and contains the model and tokenizer files.")