Spaces:
Build error
Build error
Update app_dialogue.py
Browse files- app_dialogue.py +207 -201
app_dialogue.py
CHANGED
|
@@ -561,7 +561,7 @@ textbox = gr.Textbox(
|
|
| 561 |
|
| 562 |
css2 = """
|
| 563 |
#chatbot {
|
| 564 |
-
background-image: url('https://huggingface.co/spaces/ysharma/
|
| 565 |
background-repeat: repeat;}
|
| 566 |
"""
|
| 567 |
|
|
@@ -580,194 +580,49 @@ with gr.Blocks(title="IDEFICS-Chat", theme=gr.themes.Base(), css=css2) as demo:
|
|
| 580 |
"""
|
| 581 |
)
|
| 582 |
|
| 583 |
-
with gr.Row():
|
| 584 |
-
|
| 585 |
-
|
| 586 |
-
|
| 587 |
-
|
| 588 |
-
|
| 589 |
-
|
| 590 |
-
|
| 591 |
-
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
| 595 |
-
|
| 596 |
-
imagebox = gr.Image(type="filepath", label="Image input", visible=False)
|
| 597 |
-
|
| 598 |
-
with gr.Accordion("Advanced parameters", open=False, visible=True) as parameter_row:
|
| 599 |
-
max_new_tokens = gr.Slider(
|
| 600 |
-
minimum=0,
|
| 601 |
-
maximum=2048,
|
| 602 |
-
value=512,
|
| 603 |
-
step=1,
|
| 604 |
-
interactive=True,
|
| 605 |
-
label="Maximum number of new tokens to generate",
|
| 606 |
-
)
|
| 607 |
-
min_length = gr.Slider(
|
| 608 |
-
minimum=0,
|
| 609 |
-
maximum=50,
|
| 610 |
-
value=0,
|
| 611 |
-
step=1,
|
| 612 |
-
interactive=True,
|
| 613 |
-
label="Minimum number of new tokens to generate",
|
| 614 |
-
)
|
| 615 |
-
repetition_penalty = gr.Slider(
|
| 616 |
-
minimum=0.0,
|
| 617 |
-
maximum=5.0,
|
| 618 |
-
value=1.0,
|
| 619 |
-
step=0.1,
|
| 620 |
-
interactive=True,
|
| 621 |
-
label="Repetition penalty",
|
| 622 |
-
info="1.0 means no penalty",
|
| 623 |
-
)
|
| 624 |
-
no_repeat_ngram_size = gr.Slider(
|
| 625 |
-
minimum=0,
|
| 626 |
-
maximum=10,
|
| 627 |
-
value=0,
|
| 628 |
-
step=1,
|
| 629 |
-
interactive=True,
|
| 630 |
-
label="N-gram repetition threshold",
|
| 631 |
-
info="If set to int > 0, all ngrams of that size can only occur once.",
|
| 632 |
-
)
|
| 633 |
-
decoding_strategy = gr.Radio(
|
| 634 |
-
[
|
| 635 |
-
"Greedy",
|
| 636 |
-
# "beam_search",
|
| 637 |
-
# "beam_sampling",
|
| 638 |
-
# "sampling_top_k",
|
| 639 |
-
"Top P Sampling",
|
| 640 |
-
],
|
| 641 |
-
value="Top P Sampling",
|
| 642 |
-
label="Decoding strategy",
|
| 643 |
-
interactive=True,
|
| 644 |
-
)
|
| 645 |
-
temperature = gr.Slider(
|
| 646 |
-
minimum=0.0,
|
| 647 |
-
maximum=5.0,
|
| 648 |
-
value=1.2,
|
| 649 |
-
step=0.1,
|
| 650 |
-
interactive=True,
|
| 651 |
-
label="Sampling temperature",
|
| 652 |
-
)
|
| 653 |
-
decoding_strategy.change(
|
| 654 |
-
fn=lambda selection: gr.Slider.update(
|
| 655 |
-
visible=(
|
| 656 |
-
selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
|
| 657 |
-
)
|
| 658 |
-
),
|
| 659 |
-
inputs=decoding_strategy,
|
| 660 |
-
outputs=temperature,
|
| 661 |
-
)
|
| 662 |
-
num_beams = gr.Slider(
|
| 663 |
-
minimum=0,
|
| 664 |
-
maximum=20,
|
| 665 |
-
value=3.0,
|
| 666 |
-
step=1.0,
|
| 667 |
-
interactive=True,
|
| 668 |
-
visible=False,
|
| 669 |
-
label="Number of beams",
|
| 670 |
-
info="Only used if `decoding_strategy` is `beam_search` or `beam_sampling`.",
|
| 671 |
-
)
|
| 672 |
-
decoding_strategy.change(
|
| 673 |
-
fn=lambda selection: gr.Slider.update(visible=(selection in ["beam_search", "beam_sampling"])),
|
| 674 |
-
inputs=decoding_strategy,
|
| 675 |
-
outputs=num_beams,
|
| 676 |
-
)
|
| 677 |
-
top_p = gr.Slider(
|
| 678 |
-
minimum=0.0,
|
| 679 |
-
maximum=1.0,
|
| 680 |
-
value=0.8,
|
| 681 |
-
step=0.01,
|
| 682 |
-
interactive=True,
|
| 683 |
-
visible=True,
|
| 684 |
-
label="Top P",
|
| 685 |
-
info=(
|
| 686 |
-
"If set to float < 1, only the smallest set of most probable tokens with probabilities that"
|
| 687 |
-
" add up to top_p or higher are kept for generation."
|
| 688 |
-
),
|
| 689 |
-
)
|
| 690 |
-
decoding_strategy.change(
|
| 691 |
-
fn=lambda selection: gr.Slider.update(visible=(selection in ["Top P Sampling"])),
|
| 692 |
-
inputs=decoding_strategy,
|
| 693 |
-
outputs=top_p,
|
| 694 |
-
)
|
| 695 |
-
top_k = gr.Slider(
|
| 696 |
-
minimum=0,
|
| 697 |
-
maximum=500,
|
| 698 |
-
value=50,
|
| 699 |
-
step=1,
|
| 700 |
-
interactive=True,
|
| 701 |
-
visible=False,
|
| 702 |
-
label="Top K",
|
| 703 |
-
info="The number of highest probability vocabulary tokens to keep for top-k-filtering.",
|
| 704 |
-
)
|
| 705 |
-
decoding_strategy.change(
|
| 706 |
-
fn=lambda selection: gr.Slider.update(visible=(selection in ["sampling_top_k"])),
|
| 707 |
-
inputs=decoding_strategy,
|
| 708 |
-
outputs=top_k,
|
| 709 |
-
)
|
| 710 |
-
length_penalty = gr.Slider(
|
| 711 |
-
minimum=-1000.0,
|
| 712 |
-
maximum=1000.0,
|
| 713 |
-
value=1.0,
|
| 714 |
-
step=0.1,
|
| 715 |
-
interactive=True,
|
| 716 |
-
visible=False,
|
| 717 |
-
label="Length penalty",
|
| 718 |
-
info=(
|
| 719 |
-
"length_penalty > 0.0 promotes longer sequences, while length_penalty < 0.0 encourages shorter"
|
| 720 |
-
" sequences. Only used if `decoding_strategy` is `beam_search` or `beam_sampling`."
|
| 721 |
-
),
|
| 722 |
-
)
|
| 723 |
-
decoding_strategy.change(
|
| 724 |
-
fn=lambda selection: gr.Slider.update(visible=(selection in ["beam_search", "beam_sampling"])),
|
| 725 |
-
inputs=decoding_strategy,
|
| 726 |
-
outputs=length_penalty,
|
| 727 |
-
)
|
| 728 |
-
penalty_alpha = gr.Slider(
|
| 729 |
-
minimum=0.0,
|
| 730 |
-
maximum=5.0,
|
| 731 |
-
value=0.95,
|
| 732 |
-
step=0.05,
|
| 733 |
-
interactive=True,
|
| 734 |
-
visible=False,
|
| 735 |
-
label="Penalty alpha",
|
| 736 |
-
info="Only used if `decoding_strategy` is `contrastive_sampling`.",
|
| 737 |
-
)
|
| 738 |
-
decoding_strategy.change(
|
| 739 |
-
fn=lambda selection: gr.Slider.update(visible=(selection in ["contrastive_sampling"])),
|
| 740 |
-
inputs=decoding_strategy,
|
| 741 |
-
outputs=penalty_alpha,
|
| 742 |
-
)
|
| 743 |
-
|
| 744 |
-
with gr.Column(scale=6):
|
| 745 |
|
| 746 |
-
|
| 747 |
-
"""
|
| 748 |
-
Pre-fetch the images that are passed in the chatbot default history.
|
| 749 |
-
"""
|
| 750 |
-
return user_prompt_list_to_markdown(handle_manual_images_in_user_prompt(user_prompt_str))
|
| 751 |
|
| 752 |
-
|
| 753 |
-
|
| 754 |
-
|
| 755 |
-
|
| 756 |
-
|
| 757 |
-
|
| 758 |
-
|
| 759 |
-
|
| 760 |
-
|
| 761 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 762 |
There should be drama and most importantly romance.
|
| 763 |
Character 1:<fake_token_around_image><image:https://m.media-amazon.com/images/M/MV5BODg1OTczMWEtNTU3MS00OTUzLThjODEtNDg1MWQwZmExYmFlXkEyXkFqcGdeQWFybm8@._V1_QL75_UX500_CR0,0,500,281_.jpg><fake_token_around_image>
|
| 764 |
Character 2:<fake_token_around_image><image:https://static.wikia.nocookie.net/dreamworks/images/0/09/Shrek_dreamworks.png/revision/latest?cb=20170921225439><fake_token_around_image>
|
| 765 |
Character 3:<fake_token_around_image><image:https://upload.wikimedia.org/wikipedia/en/e/ef/Marsupilami_1.jpg><fake_token_around_image>
|
| 766 |
The plot should take place in the world pictured here:<fake_token_around_image><image:https://www.astronomy.com/wp-content/uploads/sites/2/2021/09/ON1.jpg><fake_token_around_image>"""
|
| 767 |
-
|
| 768 |
-
|
| 769 |
-
|
| 770 |
-
|
| 771 |
|
| 772 |
Mulan, being the kind-hearted person she was, decided to help Shrek find his way back. As they traveled together, they began to develop a strong bond. Shrek was impressed by Mulan's bravery and determination, while Mulan admired Shrek's loyalty and sense of humor.
|
| 773 |
|
|
@@ -780,27 +635,178 @@ Finally, they reached Shrek's home, and he was reunited with his family and frie
|
|
| 780 |
Mulan was overjoyed, and they shared a passionate kiss. From that day on, they lived happily ever after, exploring the world together and facing any challenges that came their way.
|
| 781 |
|
| 782 |
And so, the story of Mulan and Shrek's romance came to an end, leaving a lasting impression on all who heard it."""
|
| 783 |
-
|
| 784 |
-
],
|
| 785 |
],
|
| 786 |
-
|
| 787 |
-
|
| 788 |
-
with gr.Row():
|
| 789 |
-
with gr.Column(scale=7):
|
| 790 |
-
textbox.render()
|
| 791 |
-
with gr.Column(scale=1, min_width=80):
|
| 792 |
-
submit_btn = gr.Button(value="Submit", visible=True)
|
| 793 |
-
with gr.Column(scale=1, min_width=10):
|
| 794 |
-
clear_btn = gr.ClearButton([textbox, chatbot])
|
| 795 |
-
with gr.Column(scale=1, min_width=10):
|
| 796 |
-
upload_btn = gr.UploadButton("📁", file_types=["image"])
|
| 797 |
-
with gr.Group():
|
| 798 |
-
with gr.Row():
|
| 799 |
-
with gr.Column(scale=1, min_width=50):
|
| 800 |
-
dope_bttn = gr.Button("Dope🔥")
|
| 801 |
-
with gr.Column(scale=1, min_width=50):
|
| 802 |
-
problematic_bttn = gr.Button("Problematic😬")
|
| 803 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 804 |
def model_inference(
|
| 805 |
user_prompt_str,
|
| 806 |
chat_history,
|
|
|
|
| 561 |
|
| 562 |
css2 = """
|
| 563 |
#chatbot {
|
| 564 |
+
background-image: url('https://huggingface.co/spaces/ysharma/dummy_m4/resolve/main/idefics_transparent20.png');
|
| 565 |
background-repeat: repeat;}
|
| 566 |
"""
|
| 567 |
|
|
|
|
| 580 |
"""
|
| 581 |
)
|
| 582 |
|
| 583 |
+
#with gr.Row():
|
| 584 |
+
# with gr.Column(): #(scale=3):
|
| 585 |
+
with gr.Row(elem_id="model_selector_row"):
|
| 586 |
+
model_selector = gr.Dropdown(
|
| 587 |
+
choices=MODELS,
|
| 588 |
+
value="HuggingFaceM4/idefics-9b-instruct",
|
| 589 |
+
interactive=True,
|
| 590 |
+
show_label=False,
|
| 591 |
+
container=False,
|
| 592 |
+
label="Model"
|
| 593 |
+
)
|
| 594 |
+
processor, tokenizer, model = load_processor_tokenizer_model(model_selector.value)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 595 |
|
| 596 |
+
imagebox = gr.Image(type="filepath", label="Image input", visible=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 597 |
|
| 598 |
+
with gr.Row():
|
| 599 |
+
#with gr.Column(scale=6):
|
| 600 |
+
|
| 601 |
+
def prefetch_images_in_history(user_prompt_str):
|
| 602 |
+
"""
|
| 603 |
+
Pre-fetch the images that are passed in the chatbot default history.
|
| 604 |
+
"""
|
| 605 |
+
return user_prompt_list_to_markdown(handle_manual_images_in_user_prompt(user_prompt_str))
|
| 606 |
+
|
| 607 |
+
chatbot = gr.Chatbot(
|
| 608 |
+
elem_id="chatbot",
|
| 609 |
+
label="ChatIDEFICS",
|
| 610 |
+
visible=True,
|
| 611 |
+
height=550,
|
| 612 |
+
value=[
|
| 613 |
+
[
|
| 614 |
+
(
|
| 615 |
+
prefetch_images_in_history(
|
| 616 |
+
"""Write a funny story including the following characters.
|
| 617 |
There should be drama and most importantly romance.
|
| 618 |
Character 1:<fake_token_around_image><image:https://m.media-amazon.com/images/M/MV5BODg1OTczMWEtNTU3MS00OTUzLThjODEtNDg1MWQwZmExYmFlXkEyXkFqcGdeQWFybm8@._V1_QL75_UX500_CR0,0,500,281_.jpg><fake_token_around_image>
|
| 619 |
Character 2:<fake_token_around_image><image:https://static.wikia.nocookie.net/dreamworks/images/0/09/Shrek_dreamworks.png/revision/latest?cb=20170921225439><fake_token_around_image>
|
| 620 |
Character 3:<fake_token_around_image><image:https://upload.wikimedia.org/wikipedia/en/e/ef/Marsupilami_1.jpg><fake_token_around_image>
|
| 621 |
The plot should take place in the world pictured here:<fake_token_around_image><image:https://www.astronomy.com/wp-content/uploads/sites/2/2021/09/ON1.jpg><fake_token_around_image>"""
|
| 622 |
+
)
|
| 623 |
+
),
|
| 624 |
+
(
|
| 625 |
+
"""Once upon a time, in a world where the sky was a dome and the ground was covered in grass, there lived a beautiful princess named Mulan. She was kind, brave, and always ready to help others. One day, while walking through the forest, she stumbled upon a strange creature. It was a green ogre named Shrek, who was lost and looking for his way back home.
|
| 626 |
|
| 627 |
Mulan, being the kind-hearted person she was, decided to help Shrek find his way back. As they traveled together, they began to develop a strong bond. Shrek was impressed by Mulan's bravery and determination, while Mulan admired Shrek's loyalty and sense of humor.
|
| 628 |
|
|
|
|
| 635 |
Mulan was overjoyed, and they shared a passionate kiss. From that day on, they lived happily ever after, exploring the world together and facing any challenges that came their way.
|
| 636 |
|
| 637 |
And so, the story of Mulan and Shrek's romance came to an end, leaving a lasting impression on all who heard it."""
|
| 638 |
+
),
|
|
|
|
| 639 |
],
|
| 640 |
+
],
|
| 641 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 642 |
|
| 643 |
+
with gr.Row():
|
| 644 |
+
with gr.Group():
|
| 645 |
+
with gr.Row():
|
| 646 |
+
with gr.Column(scale=7):
|
| 647 |
+
textbox.render()
|
| 648 |
+
with gr.Column(scale=1, min_width=80):
|
| 649 |
+
submit_btn = gr.Button(value="Submit", visible=True)
|
| 650 |
+
with gr.Column(scale=1, min_width=10):
|
| 651 |
+
clear_btn = gr.ClearButton([textbox, chatbot])
|
| 652 |
+
with gr.Column(scale=1, min_width=10):
|
| 653 |
+
upload_btn = gr.UploadButton("📁", file_types=["image"])
|
| 654 |
+
with gr.Row():
|
| 655 |
+
with gr.Group():
|
| 656 |
+
with gr.Row():
|
| 657 |
+
with gr.Column(scale=1, min_width=50):
|
| 658 |
+
dope_bttn = gr.Button("Dope🔥")
|
| 659 |
+
with gr.Column(scale=1, min_width=50):
|
| 660 |
+
problematic_bttn = gr.Button("Problematic😬")
|
| 661 |
+
|
| 662 |
+
with gr.Row():
|
| 663 |
+
with gr.Accordion("Advanced parameters", open=False, visible=True) as parameter_row:
|
| 664 |
+
max_new_tokens = gr.Slider(
|
| 665 |
+
minimum=0,
|
| 666 |
+
maximum=2048,
|
| 667 |
+
value=512,
|
| 668 |
+
step=1,
|
| 669 |
+
interactive=True,
|
| 670 |
+
label="Maximum number of new tokens to generate",
|
| 671 |
+
)
|
| 672 |
+
min_length = gr.Slider(
|
| 673 |
+
minimum=0,
|
| 674 |
+
maximum=50,
|
| 675 |
+
value=0,
|
| 676 |
+
step=1,
|
| 677 |
+
interactive=True,
|
| 678 |
+
label="Minimum number of new tokens to generate",
|
| 679 |
+
)
|
| 680 |
+
repetition_penalty = gr.Slider(
|
| 681 |
+
minimum=0.0,
|
| 682 |
+
maximum=5.0,
|
| 683 |
+
value=1.0,
|
| 684 |
+
step=0.1,
|
| 685 |
+
interactive=True,
|
| 686 |
+
label="Repetition penalty",
|
| 687 |
+
info="1.0 means no penalty",
|
| 688 |
+
)
|
| 689 |
+
no_repeat_ngram_size = gr.Slider(
|
| 690 |
+
minimum=0,
|
| 691 |
+
maximum=10,
|
| 692 |
+
value=0,
|
| 693 |
+
step=1,
|
| 694 |
+
interactive=True,
|
| 695 |
+
label="N-gram repetition threshold",
|
| 696 |
+
info="If set to int > 0, all ngrams of that size can only occur once.",
|
| 697 |
+
)
|
| 698 |
+
decoding_strategy = gr.Radio(
|
| 699 |
+
[
|
| 700 |
+
"Greedy",
|
| 701 |
+
# "beam_search",
|
| 702 |
+
# "beam_sampling",
|
| 703 |
+
# "sampling_top_k",
|
| 704 |
+
"Top P Sampling",
|
| 705 |
+
],
|
| 706 |
+
value="Top P Sampling",
|
| 707 |
+
label="Decoding strategy",
|
| 708 |
+
interactive=True,
|
| 709 |
+
)
|
| 710 |
+
temperature = gr.Slider(
|
| 711 |
+
minimum=0.0,
|
| 712 |
+
maximum=5.0,
|
| 713 |
+
value=1.2,
|
| 714 |
+
step=0.1,
|
| 715 |
+
interactive=True,
|
| 716 |
+
label="Sampling temperature",
|
| 717 |
+
)
|
| 718 |
+
decoding_strategy.change(
|
| 719 |
+
fn=lambda selection: gr.Slider.update(
|
| 720 |
+
visible=(
|
| 721 |
+
selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
|
| 722 |
+
)
|
| 723 |
+
),
|
| 724 |
+
inputs=decoding_strategy,
|
| 725 |
+
outputs=temperature,
|
| 726 |
+
)
|
| 727 |
+
num_beams = gr.Slider(
|
| 728 |
+
minimum=0,
|
| 729 |
+
maximum=20,
|
| 730 |
+
value=3.0,
|
| 731 |
+
step=1.0,
|
| 732 |
+
interactive=True,
|
| 733 |
+
visible=False,
|
| 734 |
+
label="Number of beams",
|
| 735 |
+
info="Only used if `decoding_strategy` is `beam_search` or `beam_sampling`.",
|
| 736 |
+
)
|
| 737 |
+
decoding_strategy.change(
|
| 738 |
+
fn=lambda selection: gr.Slider.update(visible=(selection in ["beam_search", "beam_sampling"])),
|
| 739 |
+
inputs=decoding_strategy,
|
| 740 |
+
outputs=num_beams,
|
| 741 |
+
)
|
| 742 |
+
top_p = gr.Slider(
|
| 743 |
+
minimum=0.0,
|
| 744 |
+
maximum=1.0,
|
| 745 |
+
value=0.8,
|
| 746 |
+
step=0.01,
|
| 747 |
+
interactive=True,
|
| 748 |
+
visible=True,
|
| 749 |
+
label="Top P",
|
| 750 |
+
info=(
|
| 751 |
+
"If set to float < 1, only the smallest set of most probable tokens with probabilities that"
|
| 752 |
+
" add up to top_p or higher are kept for generation."
|
| 753 |
+
),
|
| 754 |
+
)
|
| 755 |
+
decoding_strategy.change(
|
| 756 |
+
fn=lambda selection: gr.Slider.update(visible=(selection in ["Top P Sampling"])),
|
| 757 |
+
inputs=decoding_strategy,
|
| 758 |
+
outputs=top_p,
|
| 759 |
+
)
|
| 760 |
+
top_k = gr.Slider(
|
| 761 |
+
minimum=0,
|
| 762 |
+
maximum=500,
|
| 763 |
+
value=50,
|
| 764 |
+
step=1,
|
| 765 |
+
interactive=True,
|
| 766 |
+
visible=False,
|
| 767 |
+
label="Top K",
|
| 768 |
+
info="The number of highest probability vocabulary tokens to keep for top-k-filtering.",
|
| 769 |
+
)
|
| 770 |
+
decoding_strategy.change(
|
| 771 |
+
fn=lambda selection: gr.Slider.update(visible=(selection in ["sampling_top_k"])),
|
| 772 |
+
inputs=decoding_strategy,
|
| 773 |
+
outputs=top_k,
|
| 774 |
+
)
|
| 775 |
+
length_penalty = gr.Slider(
|
| 776 |
+
minimum=-1000.0,
|
| 777 |
+
maximum=1000.0,
|
| 778 |
+
value=1.0,
|
| 779 |
+
step=0.1,
|
| 780 |
+
interactive=True,
|
| 781 |
+
visible=False,
|
| 782 |
+
label="Length penalty",
|
| 783 |
+
info=(
|
| 784 |
+
"length_penalty > 0.0 promotes longer sequences, while length_penalty < 0.0 encourages shorter"
|
| 785 |
+
" sequences. Only used if `decoding_strategy` is `beam_search` or `beam_sampling`."
|
| 786 |
+
),
|
| 787 |
+
)
|
| 788 |
+
decoding_strategy.change(
|
| 789 |
+
fn=lambda selection: gr.Slider.update(visible=(selection in ["beam_search", "beam_sampling"])),
|
| 790 |
+
inputs=decoding_strategy,
|
| 791 |
+
outputs=length_penalty,
|
| 792 |
+
)
|
| 793 |
+
penalty_alpha = gr.Slider(
|
| 794 |
+
minimum=0.0,
|
| 795 |
+
maximum=5.0,
|
| 796 |
+
value=0.95,
|
| 797 |
+
step=0.05,
|
| 798 |
+
interactive=True,
|
| 799 |
+
visible=False,
|
| 800 |
+
label="Penalty alpha",
|
| 801 |
+
info="Only used if `decoding_strategy` is `contrastive_sampling`.",
|
| 802 |
+
)
|
| 803 |
+
decoding_strategy.change(
|
| 804 |
+
fn=lambda selection: gr.Slider.update(visible=(selection in ["contrastive_sampling"])),
|
| 805 |
+
inputs=decoding_strategy,
|
| 806 |
+
outputs=penalty_alpha,
|
| 807 |
+
)
|
| 808 |
+
|
| 809 |
+
|
| 810 |
def model_inference(
|
| 811 |
user_prompt_str,
|
| 812 |
chat_history,
|