|
|
import os |
|
|
from transformers import pipeline |
|
|
|
|
|
model_path = os.path.join(os.getcwd(), "fine_tuned_werther_model") |
|
|
print(f"Loading fine-tuned model from: {model_path}...") |
|
|
|
|
|
try: |
|
|
generator = pipeline("text-generation", model=model_path) |
|
|
print("Model loaded successfully!") |
|
|
|
|
|
print("\n--- Generating Text (Adjusted Parameters) ---") |
|
|
|
|
|
|
|
|
prompt1 = "How happy I am that I am gone!" |
|
|
print(f"\nPrompt: '{prompt1}'") |
|
|
generated_text1 = generator( |
|
|
prompt1, |
|
|
max_new_tokens=60, |
|
|
num_return_sequences=1, |
|
|
do_sample=True, |
|
|
temperature=0.6, |
|
|
top_k=50, |
|
|
top_p=0.9 |
|
|
) |
|
|
print(f"Generated text: {generated_text1[0]['generated_text']}") |
|
|
|
|
|
|
|
|
prompt2 = "My soul yearns for" |
|
|
print(f"\nPrompt: '{prompt2}'") |
|
|
generated_text2 = generator( |
|
|
prompt2, |
|
|
max_new_tokens=70, |
|
|
num_return_sequences=1, |
|
|
do_sample=True, |
|
|
temperature=0.7, |
|
|
top_k=40, |
|
|
top_p=0.85 |
|
|
) |
|
|
print(f"Generated text: {generated_text2[0]['generated_text']}") |
|
|
|
|
|
|
|
|
prompt3 = "The world seemed to me" |
|
|
print(f"\nPrompt: '{prompt3}'") |
|
|
generated_text3 = generator( |
|
|
prompt3, |
|
|
max_new_tokens=80, |
|
|
num_return_sequences=1, |
|
|
do_sample=True, |
|
|
temperature=0.5 |
|
|
) |
|
|
print(f"Generated text: {generated_text3[0]['generated_text']}") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"\nAn error occurred during text generation: {e}") |
|
|
print("Please ensure the 'fine_tuned_werther_model' directory exists and contains the model and tokenizer files.") |
|
|
|