from transformers import BertTokenizerFast,TFBertForSequenceClassification,TextClassificationPipeline, AutoTokenizer, T5ForConditionalGeneration, pipeline, AutoModelForQuestionAnswering import numpy as np import tensorflow as tf import gradio as gr import openai import os import torch #Summarization Fine Tune Model def summarize_text(text, model_path="leadingbridge/summarization"): # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path) # Tokenize the input text inputs = tokenizer.encode(text, return_tensors="pt") # Generate the summary summary_ids = model.generate(inputs) summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) return summary # Sentiment Analysis Pre-Trained Model def sentiment_analysis(text, model_path="leadingbridge/sentiment-analysis", id2label={0: 'negative', 1: 'positive'}): tokenizer = BertTokenizerFast.from_pretrained(model_path) model = TFBertForSequenceClassification.from_pretrained(model_path, id2label=id2label) pipe = TextClassificationPipeline(model=model, tokenizer=tokenizer) result = pipe(text) return result # Open AI Model openai.api_key = os.environ['openai_api'] def openai_chatbot(prompt): response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role":"system","content":"You are a general chatbot that can answer anything"}, {"role":"user","content":prompt} ], temperature=0.8, max_tokens=3000, top_p=1, frequency_penalty=0, presence_penalty=0.6 ) return response.choices[0].message.content def openai_translation_ec(prompt): response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role":"system","content":"As a professional translator, your task is to translate the following article to Chinese, ensuring that the original tone, meaning, and context are preserved. It's important to provide an accurate and culturally appropriate translation for the target audience."}, {"role":"user","content":prompt} ], temperature=0.8, max_tokens=3000, top_p=1, frequency_penalty=0, presence_penalty=1 ) return response.choices[0].message.content def openai_translation_ce(prompt): response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role":"system","content":"As a professional translator, your task is to translate the following article to English, ensuring that the original tone, meaning, and context are preserved. It's important to provide an accurate and culturally appropriate translation for the target audience."}, {"role":"user","content":prompt} ], temperature=0.8, max_tokens=3000, top_p=1, frequency_penalty=0, presence_penalty=1 ) return response.choices[0].message.content def chatgpt_clone(input, history): history = history or [] s = list(sum(history, ())) s.append(input) inp = ' '.join(s) output = openai_chatbot(inp) history.append((input, output)) return history, history # Pretrained Question Answering Model model = AutoModelForQuestionAnswering.from_pretrained('uer/roberta-base-chinese-extractive-qa') tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-chinese-extractive-qa') QA = pipeline('question-answering', model=model, tokenizer=tokenizer) model.eval() def cqa(question,context): #Chinese QA model function QA_input = {'question': question, 'context': context} return QA(QA_input) """# **Gradio Model**""" # Gradio Output Model with gr.Blocks() as demo: gr.Markdown('Welcome to the Chinese NLP Demo! Please select a model tab to interact with:') with gr.Tab("🤖Chatbot"): gr.Markdown("""