Spaces:
Sleeping
Sleeping
File size: 7,243 Bytes
60d4ea5 5001631 60d4ea5 5001631 71f483c 60d4ea5 1e6d097 60d4ea5 1e6d097 71f483c 60d4ea5 1e6d097 60d4ea5 71f483c 60d4ea5 1e6d097 60d4ea5 1e6d097 60d4ea5 1e6d097 60d4ea5 1e6d097 60d4ea5 1e6d097 60d4ea5 1e6d097 60d4ea5 1e6d097 60d4ea5 1e6d097 60d4ea5 1e6d097 60d4ea5 1e6d097 60d4ea5 1e6d097 60d4ea5 1e6d097 60d4ea5 71f483c 1e6d097 71f483c 1e6d097 71f483c 1e6d097 71f483c 1e6d097 d4fb664 60d4ea5 1e6d097 b1b0f5e d4fb664 b1b0f5e d4fb664 b1b0f5e 60d4ea5 b1b0f5e 71f483c 60d4ea5 b1b0f5e 60d4ea5 3ac33c9 b1b0f5e 60d4ea5 1e6d097 60d4ea5 d4fb664 60d4ea5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
from openai import OpenAI
import gradio as gr
from math import floor, log10
import dotenv
dotenv.load_dotenv()
import os
# Initialize OpenAI API
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
def generate_together_tv_email_response(email_content, bullet_points, tone, length, model_choice):
"""
Generates an email response based on the provided content, bullet points, tone, and length.
Chooses between different AI models for response generation.
Parameters:
- email_content: The content of the email to respond to
- bullet_points: Key points to be included in the response
- tone: The desired tone of the response (optional)
- length: Desired length of the email response
- model_choice: AI model to be used for generation (GPT-3.5 or GPT-4)
"""
# Determine the length of the response based on input parameters
email_length = get_email_length(email_content, length)
tone_statement = f"\n\nTone: {tone}" if tone else "\n\nTone: Warm and Kind"
model_choice = model_choice if model_choice else "gpt-3.5-turbo"
# Construct the prompt for the AI model
prompt = (
f"Email context: {email_content}\n\n"
f"Bullet Points: {bullet_points}{tone_statement}\n\n"
f"Response length: {email_length} words\n\n"
"Draft an email response."
)
# Use OpenAI API to generate the email response
try:
response = client.chat.completions.create(
model=model_choice,
messages=[
{
"role": "system",
"content": (
"This task involves generating a response to an email. "
"You will receive the original email, along with specific bullet points that summarize the content the user wishes to address in their response. "
"Additionally, the desired tone for the response will be specified. "
"Your task is to draft an email response that addresses the bullet points, "
"aligns with the tone specified, and reflects the ethos and values of TogetherTV, a TV channel committed to social change and community engagement. "
"Grammar MUST BE in UK English. "
"Be polite and informal unless told otherwise. "
"End the response with a polite closing and a signature from the TogetherTV team."
)
},
{
"role": "user",
"content": prompt
}
],
temperature=0.7,
max_tokens=500,
top_p=1
)
complete_response = response.choices[0].message.content
# Calculate the cost of token usage
cost = calculate_token_usage_price(response, model_choice)
# Return the generated response and the cost
return complete_response, cost
except Exception as e:
print(f"An error occurred: {e}")
return str(e)
def sig_figs(x: float, precision: int):
"""
Rounds a number to a specified number of significant figures.
Parameters:
- x (float): The number to be rounded.
- precision (int): The number of significant figures to round to.
Returns:
- A float rounded to the specified number of significant figures.
"""
x = float(x)
precision = int(precision)
return round(x, -int(floor(log10(abs(x)))) + (precision - 1))
def calculate_token_usage_price(response, model_choice):
"""
Calculates the cost of token usage for a given response and model.
Parameters:
- response: The response object from the OpenAI API.
- model_choice (str): The model used for the response (e.g., 'gpt-3.5-turbo').
Returns:
- The cost of token usage rounded to two significant figures.
"""
tokens_input = response.usage.prompt_tokens
tokens_output = response.usage.completion_tokens
# Define the rate per token based on the model choice
if model_choice == "gpt-3.5-turbo":
rate_input = 0.0010 / 1000
rate_output = 0.0020 / 1000
else: # Assuming gpt-4
rate_input = 0.03 / 1000
rate_output = 0.06 / 1000
# Calculate total cost
cost = (tokens_input * rate_input) + (tokens_output * rate_output)
return sig_figs(cost, 2)
def word_count(word):
"""
Counts the number of words in a given string.
Parameters:
- word (str): The string to count words in.
Returns:
- The number of words in the string.
"""
countOfWords = len(str(word).split())
return countOfWords
def get_email_length(email_content, length):
"""
Determines the email length based on the content and user's choice.
Parameters:
- email_content (str): The content of the email.
- length (str): The user's choice for the response length.
Returns:
- An integer representing the number of words for the email response.
"""
# Mapping of length choices to their respective values
email_length_options = {
"Brief": 50,
"Matching Client's Email Length": round(word_count(email_content)),
"Detailed": 150
}
# Return the length based on user choice, default to 50 if not specified
return email_length_options.get(length, 50)
def gradio_interface():
# Define the Gradio interface for the email response generator
email_example = (
"EXAMPLE:\n"
"Hello,\n\n"
"I recently watched your documentary 'A perfect 14' and found it extremely "
"eye-opening regarding what plus-models have to go through. I appreciate TogetherTV's effort to shed light "
"on such critical topics. Do you have any reccomendations for other documentaries I could watch?\n\n"
"Best regards,\n"
"Jordan Ellis"
)
email_response_bullet_points= (
"EXAMPLE:\n"
"Thanks!\n"
"We agree with you.\n"
"'Gamechangers: Vigilante' is a good one to watch"
)
interface = gr.Interface(
fn=generate_together_tv_email_response,
inputs=[
gr.components.Textbox(placeholder=email_example, lines=8, label="Email content"),
gr.components.Textbox(placeholder=email_response_bullet_points, lines=8, label="Bullet Points"),
gr.components.Textbox(placeholder="E.g., polite, enthusiastic, warm", lines=1, label="Tone"),
gr.Dropdown(["Brief", "Matching Client's Email Length", "Detailed"], label="Email Response Length"),
gr.Dropdown(["gpt-3.5-turbo", "gpt-4"], label="Choose Model")
],
outputs=[
gr.components.Textbox(lines=8, label="Email Response"),
gr.components.Textbox(label="Cost Estimate ($)")
],
title="TTV Email Draft Generator",
description="Craft AI-powered email responses effortlessly. Enter the email content, pinpoint key bullet points, choose the tone, and decide the response length. Opt for GPT-3.5 for solid performance or GPT-4 for enhanced quality."
)
# Launch the Gradio interface
interface.launch(share=True)
if __name__ == "__main__":
gradio_interface()
|