File size: 1,884 Bytes
3dacdd0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import os
from transformers import pipeline

model_path = os.path.join(os.getcwd(), "fine_tuned_werther_model")
print(f"Loading fine-tuned model from: {model_path}...")

try:
    generator = pipeline("text-generation", model=model_path)
    print("Model loaded successfully!")

    print("\n--- Generating Text (Adjusted Parameters) ---")

    # Example 1: Lower temperature for less repetition, shorter length
    prompt1 = "How happy I am that I am gone!"
    print(f"\nPrompt: '{prompt1}'")
    generated_text1 = generator(
        prompt1,
        max_new_tokens=60, # Shorter output
        num_return_sequences=1,
        do_sample=True,
        temperature=0.6, # Lower temperature
        top_k=50,
        top_p=0.9
    )
    print(f"Generated text: {generated_text1[0]['generated_text']}")

    # Example 2: Try slightly different values
    prompt2 = "My soul yearns for"
    print(f"\nPrompt: '{prompt2}'")
    generated_text2 = generator(
        prompt2,
        max_new_tokens=70,
        num_return_sequences=1,
        do_sample=True,
        temperature=0.7, # Slightly higher than 0.6, lower than 0.9
        top_k=40, # Smaller top_k
        top_p=0.85 # Slightly lower top_p
    )
    print(f"Generated text: {generated_text2[0]['generated_text']}")

    # Example 3: Experiment with a very low temperature (more deterministic)
    prompt3 = "The world seemed to me"
    print(f"\nPrompt: '{prompt3}'")
    generated_text3 = generator(
        prompt3,
        max_new_tokens=80,
        num_return_sequences=1,
        do_sample=True,
        temperature=0.5 # Very low temperature
    )
    print(f"Generated text: {generated_text3[0]['generated_text']}")

except Exception as e:
    print(f"\nAn error occurred during text generation: {e}")
    print("Please ensure the 'fine_tuned_werther_model' directory exists and contains the model and tokenizer files.")