Spaces:
Running
Running
| #Initial installations | |
| pip uninstall -y tensorflow | |
| pip install tensorflow==2.14 | |
| pip install --upgrade pip | |
| pip install --upgrade transformers scipy | |
| pip install transformers | |
| pip install pymupdf | |
| ## Summarization | |
| import gradio as gr | |
| import fitz # PyMuPDF | |
| from transformers import BartTokenizer, BartForConditionalGeneration, pipeline | |
| import scipy.io.wavfile | |
| import numpy as np | |
| tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') | |
| model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') | |
| def extract_abstract(pdf_path): | |
| doc = fitz.open(pdf_path) | |
| first_page = doc[0].get_text() | |
| start_idx = first_page.lower().find("abstract") | |
| end_idx = first_page.lower().find("introduction") | |
| if start_idx != -1 and end_idx != -1: | |
| return first_page[start_idx:end_idx].strip() | |
| else: | |
| return "Abstract not found or '1 Introduction' not found in the first page." | |
| # Specify the path to your PDF file | |
| pdf_path = "/content/article11.pdf" # Update the path | |
| # Extract the abstract | |
| abstract_text = extract_abstract(pdf_path) | |
| # Print the extracted abstract | |
| print("Extracted Abstract:") | |
| print(abstract_text) | |
| from IPython.core.display import display, HTML | |
| # Function to display summary and reduction percentage aesthetically | |
| def display_results(final_summary, original_text): | |
| reduction_percentage = 100 * (1 - len(final_summary) / len(original_text)) | |
| html_content = f""" | |
| <div style='padding: 20px; background-color: #f3f3f3; border-radius: 10px;'> | |
| <h2 style='color: #2c3e50; text-align: center;'>Summary</h2> | |
| <p style='color: #34495e; font-size: 16px; text-align: justify;'>{final_summary}</p> | |
| <p style='color: #2c3e50;'><b>Reduction in Text:</b> {reduction_percentage:.2f}%</p> | |
| </div> | |
| """ | |
| display(HTML(html_content)) | |
| # Summary generation and post-processing | |
| inputs = tokenizer([abstract_text], max_length=1024, return_tensors='pt', truncation=True) | |
| max_length_for_summary = 40 | |
| length_penalty_value = 2.0 | |
| summary_ids = model.generate(inputs['input_ids'], | |
| num_beams=4, | |
| max_length=max_length_for_summary, | |
| min_length=10, | |
| length_penalty=length_penalty_value, | |
| early_stopping=True, | |
| no_repeat_ngram_size=2) | |
| summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
| summary = ' '.join(summary.split()) # Remove extra spaces | |
| # Handle truncated words and adjust periods | |
| words = summary.split() | |
| cleaned_summary = [] | |
| for i, word in enumerate(words): | |
| if '-' in word and i < len(words) - 1: | |
| word = word.replace('-', '') + words[i + 1] | |
| words[i + 1] = "" | |
| if '.' in word and i != len(words) - 1: | |
| word = word.replace('.', '') | |
| cleaned_summary.append(word + ' and') | |
| else: | |
| cleaned_summary.append(word) | |
| # Capitalize first word and adjust following words | |
| final_summary = ' '.join(cleaned_summary) | |
| final_summary = final_summary[0].upper() + final_summary[1:] | |
| final_summary = ' '.join(w[0].lower() + w[1:] if w.lower() != 'and' else w for w in final_summary.split()) | |
| # Displaying the results | |
| display_results(final_summary, abstract_text) | |
| ##Text-to-Speech | |
| # Initialize the Bark TTS pipeline | |
| synthesiser = pipeline("text-to-speech", "suno/bark") | |
| # Initialize the Bark TTS pipeline | |
| synthesiser = pipeline("text-to-speech", "suno/bark") | |
| # Convert the summarized text to speech | |
| speech = synthesiser(final_summary, forward_params={"do_sample": True}) | |
| # Normalize the audio data | |
| audio_data = speech["audio"].squeeze() | |
| normalized_audio_data = np.int16(audio_data / np.max(np.abs(audio_data)) * 32767) | |
| # Save the normalized audio data as a WAV file | |
| output_file = "/content/bark_output.wav" | |
| scipy.io.wavfile.write(output_file, rate=speech["sampling_rate"], data=normalized_audio_data) | |
| print(f"Audio file saved as {output_file}") | |
| # Display an audio player widget to play the generated speech | |
| Audio(output_file) | |
| # Gradio Interface | |
| iface = gr.Interface( | |
| fn=process_text, | |
| inputs="text", | |
| outputs=["text", "audio"], | |
| title="Summarization and Text-to-Speech", | |
| description="Enter text to summarize and convert to speech." | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() |