Spaces:
Sleeping
Sleeping
| from openai import OpenAI | |
| import gradio as gr | |
| from math import floor, log10 | |
| import dotenv | |
| dotenv.load_dotenv() | |
| import os | |
| # Initialize OpenAI API | |
| client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) | |
| def generate_together_tv_email_response(email_content, bullet_points, tone, length, model_choice): | |
| """ | |
| Generates an email response based on the provided content, bullet points, tone, and length. | |
| Chooses between different AI models for response generation. | |
| Parameters: | |
| - email_content: The content of the email to respond to | |
| - bullet_points: Key points to be included in the response | |
| - tone: The desired tone of the response (optional) | |
| - length: Desired length of the email response | |
| - model_choice: AI model to be used for generation (GPT-3.5 or GPT-4) | |
| """ | |
| # Determine the length of the response based on input parameters | |
| email_length = get_email_length(email_content, length) | |
| tone_statement = f"\n\nTone: {tone}" if tone else "\n\nTone: Warm and Kind" | |
| model_choice = model_choice if model_choice else "gpt-3.5-turbo" | |
| # Construct the prompt for the AI model | |
| prompt = ( | |
| f"Email context: {email_content}\n\n" | |
| f"Bullet Points: {bullet_points}{tone_statement}\n\n" | |
| f"Response length: {email_length} words\n\n" | |
| "Draft an email response." | |
| ) | |
| # Use OpenAI API to generate the email response | |
| try: | |
| response = client.chat.completions.create( | |
| model=model_choice, | |
| messages=[ | |
| { | |
| "role": "system", | |
| "content": ( | |
| "This task involves generating a response to an email. " | |
| "You will receive the original email, along with specific bullet points that summarize the content the user wishes to address in their response. " | |
| "Additionally, the desired tone for the response will be specified. " | |
| "Your task is to draft an email response that addresses the bullet points, " | |
| "aligns with the tone specified, and reflects the ethos and values of TogetherTV, a TV channel committed to social change and community engagement. " | |
| "Grammar MUST BE in UK English. " | |
| "Be polite and informal unless told otherwise. " | |
| "End the response with a polite closing and a signature from the TogetherTV team." | |
| ) | |
| }, | |
| { | |
| "role": "user", | |
| "content": prompt | |
| } | |
| ], | |
| temperature=0.7, | |
| max_tokens=500, | |
| top_p=1 | |
| ) | |
| complete_response = response.choices[0].message.content | |
| # Calculate the cost of token usage | |
| cost = calculate_token_usage_price(response, model_choice) | |
| # Return the generated response and the cost | |
| return complete_response, cost | |
| except Exception as e: | |
| print(f"An error occurred: {e}") | |
| return str(e) | |
| def sig_figs(x: float, precision: int): | |
| """ | |
| Rounds a number to a specified number of significant figures. | |
| Parameters: | |
| - x (float): The number to be rounded. | |
| - precision (int): The number of significant figures to round to. | |
| Returns: | |
| - A float rounded to the specified number of significant figures. | |
| """ | |
| x = float(x) | |
| precision = int(precision) | |
| return round(x, -int(floor(log10(abs(x)))) + (precision - 1)) | |
| def calculate_token_usage_price(response, model_choice): | |
| """ | |
| Calculates the cost of token usage for a given response and model. | |
| Parameters: | |
| - response: The response object from the OpenAI API. | |
| - model_choice (str): The model used for the response (e.g., 'gpt-3.5-turbo'). | |
| Returns: | |
| - The cost of token usage rounded to two significant figures. | |
| """ | |
| tokens_input = response.usage.prompt_tokens | |
| tokens_output = response.usage.completion_tokens | |
| # Define the rate per token based on the model choice | |
| if model_choice == "gpt-3.5-turbo": | |
| rate_input = 0.0010 / 1000 | |
| rate_output = 0.0020 / 1000 | |
| else: # Assuming gpt-4 | |
| rate_input = 0.03 / 1000 | |
| rate_output = 0.06 / 1000 | |
| # Calculate total cost | |
| cost = (tokens_input * rate_input) + (tokens_output * rate_output) | |
| return sig_figs(cost, 2) | |
| def word_count(word): | |
| """ | |
| Counts the number of words in a given string. | |
| Parameters: | |
| - word (str): The string to count words in. | |
| Returns: | |
| - The number of words in the string. | |
| """ | |
| countOfWords = len(str(word).split()) | |
| return countOfWords | |
| def get_email_length(email_content, length): | |
| """ | |
| Determines the email length based on the content and user's choice. | |
| Parameters: | |
| - email_content (str): The content of the email. | |
| - length (str): The user's choice for the response length. | |
| Returns: | |
| - An integer representing the number of words for the email response. | |
| """ | |
| # Mapping of length choices to their respective values | |
| email_length_options = { | |
| "Brief": 50, | |
| "Matching Client's Email Length": round(word_count(email_content)), | |
| "Detailed": 150 | |
| } | |
| # Return the length based on user choice, default to 50 if not specified | |
| return email_length_options.get(length, 50) | |
| def gradio_interface(): | |
| # Define the Gradio interface for the email response generator | |
| email_example = ( | |
| "EXAMPLE:\n" | |
| "Hello,\n\n" | |
| "I recently watched your documentary 'A perfect 14' and found it extremely " | |
| "eye-opening regarding what plus-models have to go through. I appreciate TogetherTV's effort to shed light " | |
| "on such critical topics. Do you have any reccomendations for other documentaries I could watch?\n\n" | |
| "Best regards,\n" | |
| "Jordan Ellis" | |
| ) | |
| email_response_bullet_points= ( | |
| "EXAMPLE:\n" | |
| "Thanks!\n" | |
| "We agree with you.\n" | |
| "'Gamechangers: Vigilante' is a good one to watch" | |
| ) | |
| interface = gr.Interface( | |
| fn=generate_together_tv_email_response, | |
| inputs=[ | |
| gr.components.Textbox(placeholder=email_example, lines=8, label="Email content"), | |
| gr.components.Textbox(placeholder=email_response_bullet_points, lines=8, label="Bullet Points"), | |
| gr.components.Textbox(placeholder="E.g., polite, enthusiastic, warm", lines=1, label="Tone"), | |
| gr.Dropdown(["Brief", "Matching Client's Email Length", "Detailed"], label="Email Response Length"), | |
| gr.Dropdown(["gpt-3.5-turbo", "gpt-4"], label="Choose Model") | |
| ], | |
| outputs=[ | |
| gr.components.Textbox(lines=8, label="Email Response"), | |
| gr.components.Textbox(label="Cost Estimate ($)") | |
| ], | |
| title="TTV Email Draft Generator", | |
| description="Craft AI-powered email responses effortlessly. Enter the email content, pinpoint key bullet points, choose the tone, and decide the response length. Opt for GPT-3.5 for solid performance or GPT-4 for enhanced quality." | |
| ) | |
| # Launch the Gradio interface | |
| interface.launch(share=True) | |
| if __name__ == "__main__": | |
| gradio_interface() | |