mav23 commited on
Commit
1e94682
·
verified ·
1 Parent(s): 3bcfec3

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ distilgpt2-stable-diffusion-v2.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: creativeml-openrail-m
3
+ tags:
4
+ - stable-diffusion
5
+ - prompt-generator
6
+ - arxiv:2210.14140
7
+ widget:
8
+ - text: "amazing"
9
+ - text: "a photo of"
10
+ - text: "a sci-fi"
11
+ - text: "a portrait of"
12
+ - text: "a person standing"
13
+ - text: "a boy watching"
14
+ datasets:
15
+ - FredZhang7/stable-diffusion-prompts-2.47M
16
+ - poloclub/diffusiondb
17
+ - Gustavosta/Stable-Diffusion-Prompts
18
+ - bartman081523/stable-diffusion-discord-prompts
19
+ ---
20
+ # Fast GPT2 PromptGen
21
+
22
+ <style>
23
+ .container {
24
+ padding-left: 20px;
25
+ border-left: 5px solid gray;
26
+ }
27
+ </style>
28
+
29
+ <div class="container">
30
+ <p><strong><a href="https://huggingface.co/FredZhang7/anime-anything-promptgen-v2">Fast Anime PromptGen</a></strong> generates descriptive safebooru and danbooru tags for anime text-to-image models.</p>
31
+ </div>
32
+
33
+
34
+ This model was trained on 2,470,000 descriptive stable diffusion prompts on the [FredZhang7/distilgpt2-stable-diffusion](https://huggingface.co/FredZhang7/distilgpt2-stable-diffusion) checkpoint for another 4,270,000 steps.
35
+
36
+ Compared to other prompt generation models using GPT2, this one runs with 50% faster forwardpropagation and 40% less disk space & RAM.
37
+
38
+ Major improvements from v1 are:
39
+ - 25% more variations
40
+ - faster and more fluent prompt generation
41
+ - cleaned training data
42
+ * removed prompts that generate images with nsfw scores > 0.5
43
+ * removed duplicates, including prompts that differ by capitalization and punctuations
44
+ * removed punctuations at random places
45
+ * removed prompts shorter than 15 characters
46
+
47
+
48
+ ## Live WebUI Demo
49
+ See the Prompt Generator tab of [Paint Journey Demo](https://huggingface.co/spaces/FredZhang7/paint-journey-demo).
50
+
51
+
52
+ ## Contrastive Search
53
+
54
+ ```bash
55
+ pip install --upgrade transformers
56
+ ```
57
+
58
+ ```python
59
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
60
+ tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
61
+ tokenizer.add_special_tokens({'pad_token': '[PAD]'})
62
+ model = GPT2LMHeadModel.from_pretrained('FredZhang7/distilgpt2-stable-diffusion-v2')
63
+
64
+ prompt = r'a cat sitting' # the beginning of the prompt
65
+ temperature = 0.9 # a higher temperature will produce more diverse results, but with a higher risk of less coherent text
66
+ top_k = 8 # the number of tokens to sample from at each step
67
+ max_length = 80 # the maximum number of tokens for the output of the model
68
+ repitition_penalty = 1.2 # the penalty value for each repetition of a token
69
+ num_return_sequences=5 # the number of results to generate
70
+
71
+ # generate the result with contrastive search
72
+ input_ids = tokenizer(prompt, return_tensors='pt').input_ids
73
+ output = model.generate(input_ids, do_sample=True, temperature=temperature, top_k=top_k, max_length=max_length, num_return_sequences=num_return_sequences, repetition_penalty=repitition_penalty, penalty_alpha=0.6, no_repeat_ngram_size=1, early_stopping=True)
74
+
75
+ print('\nInput:\n' + 100 * '-')
76
+ print('\033[96m' + prompt + '\033[0m')
77
+ print('\nOutput:\n' + 100 * '-')
78
+ for i in range(len(output)):
79
+ print('\033[92m' + tokenizer.decode(output[i], skip_special_tokens=True) + '\033[0m\n')
80
+ ```
81
+
82
+ No comma style:
83
+ ![constrastive search](./constrastive_search.png)
84
+
85
+
86
+ To bring back the commas, assign output without `penalty_alpha` and `no_repeat_ngram_size`:
87
+ ```python
88
+ output = model.generate(input_ids, do_sample=True, temperature=temperature, top_k=top_k, max_length=max_length, num_return_sequences=num_return_sequences, repetition_penalty=repitition_penalty, early_stopping=True)
89
+ ```
90
+
91
+ ![constrastive search](./contrastive_comma_style.png)
distilgpt2-stable-diffusion-v2.Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dd77104266dfebe4e0dc5632348792c69c1de62c327fbe651a16ad7c6eb97ce
3
+ size 82424032