diff --git a/enterprise/app/services/enterprise/message_templates/response_bot_service.rb b/enterprise/app/services/enterprise/message_templates/response_bot_service.rb index 154c92936..03fe34a44 100644 --- a/enterprise/app/services/enterprise/message_templates/response_bot_service.rb +++ b/enterprise/app/services/enterprise/message_templates/response_bot_service.rb @@ -4,13 +4,22 @@ class Enterprise::MessageTemplates::ResponseBotService def perform ActiveRecord::Base.transaction do response = get_response(conversation.messages.last.content) - process_response(conversation.messages.last, response) + process_response(conversation.messages.last, response['response']) end rescue StandardError => e ChatwootExceptionTracker.new(e, account: conversation.account).capture_exception true end + def response_sections(content) + sections = '' + + inbox.get_responses(content).each do |response| + sections += "{context_id: #{response.id}, context: #{response.question} ? #{response.answer}}," + end + sections + end + private delegate :contact, :account, :inbox, to: :conversation @@ -34,15 +43,6 @@ class Enterprise::MessageTemplates::ResponseBotService message.message_type == 'incoming' ? 'user' : 'system' end - def response_sections(content) - sections = '' - - inbox.get_responses(content).each do |response| - sections += "{context_id: #{response.id}, context: #{response.question} ? #{response.answer}}" - end - sections - end - def process_response(message, response) if response == 'conversation_handoff' process_action(message, 'handoff') diff --git a/enterprise/lib/chat_gpt.rb b/enterprise/lib/chat_gpt.rb index 86c1b33bd..0ad4ac1b0 100644 --- a/enterprise/lib/chat_gpt.rb +++ b/enterprise/lib/chat_gpt.rb @@ -5,37 +5,56 @@ class ChatGpt def initialize(context_sections = '') @model = 'gpt-4' - system_message = { 'role': 'system', - 'content': 'You are a very enthusiastic customer support representative who loves ' \ - 'to help people! Given the following Context sections from the ' \ - 'documentation, continue the conversation with only that information, ' \ - "outputed in markdown format along with context_ids in format 'response \n {context_ids: [values] }' " \ - "\n If you are unsure and the answer is not explicitly written in the documentation, " \ - "say 'Sorry, I don't know how to help with that. Do you want to chat with a human agent?' " \ - "If they ask to Chat with human agent return text 'conversation_handoff'." \ - "Context sections: \n" \ - "\n\n #{context_sections}}" } - - @messages = [ - system_message - ] + @messages = [system_message(context_sections)] end def generate_response(input, previous_messages = []) - previous_messages.each do |message| - @messages << message - end - + @messages += previous_messages @messages << { 'role': 'user', 'content': input } if input.present? - headers = { 'Content-Type' => 'application/json', - 'Authorization' => "Bearer #{ENV.fetch('OPENAI_API_KEY')}" } - body = { - model: @model, - messages: @messages - }.to_json + response = request_gpt + JSON.parse(response['choices'][0]['message']['content'].strip) + end + + private + + def system_message(context_sections) + { + 'role': 'system', + 'content': system_content(context_sections) + } + end + + def system_content(context_sections) + <<~SYSTEM_PROMPT_MESSAGE + You are a very enthusiastic customer support representative who loves to help people. + Your answers will always be formatted in valid JSON hash, as shown below. Never respond in non JSON format. + + ``` + { + response: '' , + context_ids: [ids], + } + ``` + + response: will be the next response to the conversation + + context_ids: will be an array of unique context IDs that were used to generate the answer. choose top 3. + + The answers will be generated using the information provided at the end of the prompt under the context sections. You will not respond outside the context of the information provided in context sections. + + If the answer is not provided in context sections, Respond to the customer and ask whether they want to talk to another support agent . If they ask to Chat with another agent, return `conversation_handoff' as the response in JSON response + + ---------------------------------- + Context sections: + #{context_sections} + SYSTEM_PROMPT_MESSAGE + end + + def request_gpt + headers = { 'Content-Type' => 'application/json', 'Authorization' => "Bearer #{ENV.fetch('OPENAI_API_KEY')}" } + body = { model: @model, messages: @messages }.to_json response = HTTParty.post("#{self.class.base_uri}/v1/chat/completions", headers: headers, body: body) - response_body = JSON.parse(response.body) - response_body['choices'][0]['message']['content'].strip + JSON.parse(response.body) end end