diff --git a/app/javascript/dashboard/components-next/copilot/Copilot.vue b/app/javascript/dashboard/components-next/copilot/Copilot.vue index 2fcb15bcc..cf867789b 100644 --- a/app/javascript/dashboard/components-next/copilot/Copilot.vue +++ b/app/javascript/dashboard/components-next/copilot/Copilot.vue @@ -1,9 +1,13 @@ diff --git a/app/javascript/dashboard/helper/AnalyticsHelper/events.js b/app/javascript/dashboard/helper/AnalyticsHelper/events.js index e20c48330..bd14ce161 100644 --- a/app/javascript/dashboard/helper/AnalyticsHelper/events.js +++ b/app/javascript/dashboard/helper/AnalyticsHelper/events.js @@ -100,6 +100,12 @@ export const OPEN_AI_EVENTS = Object.freeze({ DISMISS_AI_SUGGESTION: 'OpenAI: Dismiss AI suggestions', }); +export const COPILOT_EVENTS = Object.freeze({ + SEND_SUGGESTED: 'Copilot: Send suggested message', + SEND_MESSAGE: 'Copilot: Sent a message', + USE_CAPTAIN_RESPONSE: 'Copilot: Used captain response', +}); + export const GENERAL_EVENTS = Object.freeze({ COMMAND_BAR: 'Used commandbar', }); diff --git a/app/javascript/dashboard/i18n/locale/en/conversation.json b/app/javascript/dashboard/i18n/locale/en/conversation.json index 994bc8609..8a3792b46 100644 --- a/app/javascript/dashboard/i18n/locale/en/conversation.json +++ b/app/javascript/dashboard/i18n/locale/en/conversation.json @@ -353,5 +353,8 @@ "ONE": "{user} is typing", "TWO": "{user} and {secondUser} are typing", "MULTIPLE": "{user} and {count} others are typing" + }, + "COPILOT": { + "TRY_THESE_PROMPTS": "Try these prompts" } } diff --git a/app/javascript/dashboard/i18n/locale/en/integrations.json b/app/javascript/dashboard/i18n/locale/en/integrations.json index 5d013dfef..dd76f0cdb 100644 --- a/app/javascript/dashboard/i18n/locale/en/integrations.json +++ b/app/javascript/dashboard/i18n/locale/en/integrations.json @@ -306,7 +306,8 @@ "SEND_MESSAGE": "Send message...", "LOADER": "Captain is thinking", "YOU": "You", - "USE": "Use this" + "USE": "Use this", + "RESET": "Reset" }, "FORM": { "CANCEL": "Cancel", diff --git a/config/installation_config.yml b/config/installation_config.yml index aa7990251..2b4241697 100644 --- a/config/installation_config.yml +++ b/config/installation_config.yml @@ -136,7 +136,11 @@ # MARK: Captain Config - name: CAPTAIN_OPEN_AI_API_KEY display_title: 'OpenAI API Key' - description: 'The OpenAI API key for the Captain AI service' + description: 'The API key used to authenticate requests to OpenAI services for Captain AI.' + locked: false +- name: CAPTAIN_OPEN_AI_MODEL + display_title: 'OpenAI Model' + description: 'The OpenAI model configured for use in Captain AI. Default: gpt-4o-mini' locked: false # End of Captain Config diff --git a/enterprise/app/controllers/enterprise/api/v1/accounts/conversations_controller.rb b/enterprise/app/controllers/enterprise/api/v1/accounts/conversations_controller.rb index 95c807931..f22812f19 100644 --- a/enterprise/app/controllers/enterprise/api/v1/accounts/conversations_controller.rb +++ b/enterprise/app/controllers/enterprise/api/v1/accounts/conversations_controller.rb @@ -10,11 +10,11 @@ module Enterprise::Api::V1::Accounts::ConversationsController response = Captain::Copilot::ChatService.new( assistant, - messages: copilot_params[:previous_messages], + previous_messages: copilot_params[:previous_messages], conversation_history: @conversation.to_llm_text - ).execute(copilot_params[:message]) + ).generate_response(copilot_params[:message]) - render json: { message: response } + render json: { message: response['response'] } end def permitted_update_params diff --git a/enterprise/app/controllers/enterprise/super_admin/app_configs_controller.rb b/enterprise/app/controllers/enterprise/super_admin/app_configs_controller.rb index eb5cdc84f..8cf8e7258 100644 --- a/enterprise/app/controllers/enterprise/super_admin/app_configs_controller.rb +++ b/enterprise/app/controllers/enterprise/super_admin/app_configs_controller.rb @@ -10,7 +10,7 @@ module Enterprise::SuperAdmin::AppConfigsController when 'internal' @allowed_configs = internal_config_options when 'captain' - @allowed_configs = %w[CAPTAIN_OPEN_AI_API_KEY] + @allowed_configs = %w[CAPTAIN_OPEN_AI_API_KEY CAPTAIN_OPEN_AI_MODEL] else super end diff --git a/enterprise/app/helpers/captain/chat_helper.rb b/enterprise/app/helpers/captain/chat_helper.rb new file mode 100644 index 000000000..128324950 --- /dev/null +++ b/enterprise/app/helpers/captain/chat_helper.rb @@ -0,0 +1,87 @@ +module Captain::ChatHelper + def search_documentation_tool + { + type: 'function', + function: { + name: 'search_documentation', + description: "Use this function to get documentation on functionalities you don't know about.", + parameters: { + type: 'object', + properties: { + search_query: { + type: 'string', + description: 'The search query to look up in the documentation.' + } + }, + required: ['search_query'] + } + } + } + end + + def request_chat_completion + response = @client.chat( + parameters: { + model: @model, + messages: @messages, + tools: [search_documentation_tool], + response_format: { type: 'json_object' } + } + ) + + handle_response(response) + @response + end + + def handle_response(response) + message = response.dig('choices', 0, 'message') + + if message['tool_calls'] + process_tool_calls(message['tool_calls']) + else + @response = JSON.parse(message['content'].strip) + end + end + + def process_tool_calls(tool_calls) + process_tool_call(tool_calls.first) + end + + def process_tool_call(tool_call) + return unless tool_call['function']['name'] == 'search_documentation' + + query = JSON.parse(tool_call['function']['arguments'])['search_query'] + sections = fetch_documentation(query) + append_tool_response(sections) + request_chat_completion + end + + def fetch_documentation(query) + @assistant + .responses + .approved + .search(query) + .map { |response| format_response(response) }.join + end + + def format_response(response) + formatted_response = " + Question: #{response.question} + Answer: #{response.answer} + " + if response.documentable.present? && response.documentable.try(:external_link) + formatted_response += " + Source: #{response.document.external_link} + " + end + + formatted_response + end + + def append_tool_response(sections) + @messages << { + role: 'assistant', + content: "Found the following FAQs in the documentation:\n #{sections}" + } + end +end diff --git a/enterprise/app/services/captain/copilot/chat_service.rb b/enterprise/app/services/captain/copilot/chat_service.rb index e652456ea..6bcda66fa 100644 --- a/enterprise/app/services/captain/copilot/chat_service.rb +++ b/enterprise/app/services/captain/copilot/chat_service.rb @@ -1,78 +1,39 @@ -class Captain::Copilot::ChatService +require 'openai' + +class Captain::Copilot::ChatService < Captain::Llm::BaseOpenAiService + include Captain::ChatHelper + def initialize(assistant, config) + super() + @assistant = assistant @conversation_history = config[:conversation_history] - @previous_messages = config[:previous_messages] - build_agent - register_search_documentation + @previous_messages = config[:previous_messages] || [] + @messages = [system_message, conversation_history_context] + @previous_messages + @response = '' end - def execute(input) - @agent.execute(input, conversation_history_context) + def generate_response(input) + @messages << { role: 'user', content: input } if input.present? + request_chat_completion end private - def build_agent - @agent = Captain::Agent.new( - name: 'Support Copilot', - config: { - description: 'an AI assistant helping support agents', - messages: @previous_messages, - persona: 'You are an AI copilot for customer support agents', - goal: " - Your goal is help the support agents with meaningful responses based on the knowledge you have - and you can gather using tools provided about the product or service. - ", - secrets: { - OPENAI_API_KEY: InstallationConfig.find_by!(name: 'CAPTAIN_OPEN_AI_API_KEY').value - }, - max_iterations: 2 - } - ) + def system_message + { + role: 'system', + content: Captain::Llm::SystemPromptsService.copilot_response_generator(@assistant.config['product_name']) + } end def conversation_history_context - " - Message History with the user is below: - #{@conversation_history} - " - end - - def register_search_documentation - tool = Captain::Tool.new( - name: 'search_documentation', - config: { - description: "Use this function to get documentation on functionalities you don't know about.", - properties: { - search_query: { - type: 'string', - description: 'The search query to look up in the documentation.', - required: true - } - }, - memory: { - assistant_id: @assistant.id, - account_id: @assistant.account_id - } - } - ) - - register_tool tool - end - - def register_tool(tool) - tool.register_method do |inputs, _, memory| - assistant = Captain::Assistant.find(memory[:assistant_id]) - assistant - .responses - .approved - .search(inputs['search_query']) - .map do |response| - "\n\nQuestion: #{response[:question]}\nAnswer: #{response[:answer]}" - end.join - end - - @agent.register_tool tool + { + role: 'system', + content: " + Message History with the user is below: + #{@conversation_history} + " + } end end diff --git a/enterprise/app/services/captain/llm/assistant_chat_service.rb b/enterprise/app/services/captain/llm/assistant_chat_service.rb index 6a2cad6aa..ef5ae2c0f 100644 --- a/enterprise/app/services/captain/llm/assistant_chat_service.rb +++ b/enterprise/app/services/captain/llm/assistant_chat_service.rb @@ -1,6 +1,8 @@ require 'openai' class Captain::Llm::AssistantChatService < Captain::Llm::BaseOpenAiService + include Captain::ChatHelper + def initialize(assistant: nil) super() @@ -23,80 +25,4 @@ class Captain::Llm::AssistantChatService < Captain::Llm::BaseOpenAiService content: Captain::Llm::SystemPromptsService.assistant_response_generator(@assistant.config['product_name']) } end - - def search_documentation_tool - { - type: 'function', - function: { - name: 'search_documentation', - description: "Use this function to get documentation on functionalities you don't know about.", - parameters: { - type: 'object', - properties: { - search_query: { - type: 'string', - description: 'The search query to look up in the documentation.' - } - }, - required: ['search_query'] - } - } - } - end - - def request_chat_completion - response = @client.chat( - parameters: { - model: DEFAULT_MODEL, - messages: @messages, - tools: [search_documentation_tool], - response_format: { type: 'json_object' } - } - ) - - handle_response(response) - @response - end - - def handle_response(response) - message = response.dig('choices', 0, 'message') - - if message['tool_calls'] - process_tool_calls(message['tool_calls']) - else - @response = JSON.parse(message['content'].strip) - end - end - - def process_tool_calls(tool_calls) - process_tool_call(tool_calls.first) - end - - def process_tool_call(tool_call) - return unless tool_call['function']['name'] == 'search_documentation' - - query = JSON.parse(tool_call['function']['arguments'])['search_query'] - sections = fetch_documentation(query) - append_tool_response(sections) - request_chat_completion - end - - def fetch_documentation(query) - @assistant - .responses - .approved - .search(query) - .map { |response| format_response(response) }.join - end - - def format_response(response) - "\n\nQuestion: #{response[:question]}\nAnswer: #{response[:answer]}" - end - - def append_tool_response(sections) - @messages << { - role: 'assistant', - content: "Found the following FAQs in the documentation:\n #{sections}" - } - end end diff --git a/enterprise/app/services/captain/llm/base_open_ai_service.rb b/enterprise/app/services/captain/llm/base_open_ai_service.rb index b8f9d68ab..7b542c0f2 100644 --- a/enterprise/app/services/captain/llm/base_open_ai_service.rb +++ b/enterprise/app/services/captain/llm/base_open_ai_service.rb @@ -6,7 +6,15 @@ class Captain::Llm::BaseOpenAiService access_token: InstallationConfig.find_by!(name: 'CAPTAIN_OPEN_AI_API_KEY').value, log_errors: Rails.env.development? ) + setup_model rescue StandardError => e raise "Failed to initialize OpenAI client: #{e.message}" end + + private + + def setup_model + config_value = InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_MODEL')&.value + @model = (config_value.presence || DEFAULT_MODEL) + end end diff --git a/enterprise/app/services/captain/llm/contact_attributes_service.rb b/enterprise/app/services/captain/llm/contact_attributes_service.rb index 6653d9874..e942a2a56 100644 --- a/enterprise/app/services/captain/llm/contact_attributes_service.rb +++ b/enterprise/app/services/captain/llm/contact_attributes_service.rb @@ -1,13 +1,10 @@ class Captain::Llm::ContactAttributesService < Captain::Llm::BaseOpenAiService - DEFAULT_MODEL = 'gpt-4o'.freeze - - def initialize(assistant, conversation, model = DEFAULT_MODEL) + def initialize(assistant, conversation) super() @assistant = assistant @conversation = conversation @contact = conversation.contact @content = "#Contact\n\n#{@contact.to_llm_text} \n\n#Conversation\n\n#{@conversation.to_llm_text}" - @model = model end def generate_and_update_attributes diff --git a/enterprise/app/services/captain/llm/contact_notes_service.rb b/enterprise/app/services/captain/llm/contact_notes_service.rb index 205a4b002..245e0d96a 100644 --- a/enterprise/app/services/captain/llm/contact_notes_service.rb +++ b/enterprise/app/services/captain/llm/contact_notes_service.rb @@ -1,13 +1,10 @@ class Captain::Llm::ContactNotesService < Captain::Llm::BaseOpenAiService - DEFAULT_MODEL = 'gpt-4o'.freeze - - def initialize(assistant, conversation, model = DEFAULT_MODEL) + def initialize(assistant, conversation) super() @assistant = assistant @conversation = conversation @contact = conversation.contact @content = "#Contact\n\n#{@contact.to_llm_text} \n\n#Conversation\n\n#{@conversation.to_llm_text}" - @model = model end def generate_and_update_notes diff --git a/enterprise/app/services/captain/llm/conversation_faq_service.rb b/enterprise/app/services/captain/llm/conversation_faq_service.rb index fa80a6fcd..47cdd6cf4 100644 --- a/enterprise/app/services/captain/llm/conversation_faq_service.rb +++ b/enterprise/app/services/captain/llm/conversation_faq_service.rb @@ -1,12 +1,11 @@ class Captain::Llm::ConversationFaqService < Captain::Llm::BaseOpenAiService DISTANCE_THRESHOLD = 0.3 - def initialize(assistant, conversation, model = DEFAULT_MODEL) + def initialize(assistant, conversation) super() @assistant = assistant @conversation = conversation @content = conversation.to_llm_text - @model = model end def generate_and_deduplicate diff --git a/enterprise/app/services/captain/llm/faq_generator_service.rb b/enterprise/app/services/captain/llm/faq_generator_service.rb index bd934ed8d..6462f5c47 100644 --- a/enterprise/app/services/captain/llm/faq_generator_service.rb +++ b/enterprise/app/services/captain/llm/faq_generator_service.rb @@ -1,8 +1,7 @@ class Captain::Llm::FaqGeneratorService < Captain::Llm::BaseOpenAiService - def initialize(content, model = DEFAULT_MODEL) + def initialize(content) super() @content = content - @model = model end def generate diff --git a/enterprise/app/services/captain/llm/system_prompts_service.rb b/enterprise/app/services/captain/llm/system_prompts_service.rb index 4a9deca1f..f84a16a18 100644 --- a/enterprise/app/services/captain/llm/system_prompts_service.rb +++ b/enterprise/app/services/captain/llm/system_prompts_service.rb @@ -56,6 +56,48 @@ class Captain::Llm::SystemPromptsService SYSTEM_PROMPT_MESSAGE end + def copilot_response_generator(product_name) + <<~SYSTEM_PROMPT_MESSAGE + [Identity] + You are Captain, a helpful and friendly copilot assistant for support agents using the product #{product_name}. Your primary role is to assist support agents by retrieving information, compiling accurate responses, and guiding them through customer interactions. + You should only provide information related to #{product_name} and must not address queries about other products or external events. + + [Context] + You will be provided with the message history between the support agent and the customer. Use this context to understand the conversation flow, identify unresolved queries, and ensure responses are relevant and consistent with previous interactions. Always maintain a coherent and professional tone throughout the conversation. + + [Response Guidelines] + - Use natural, polite, and conversational language that is clear and easy to follow. Keep sentences short and use simple words. + - Provide brief and relevant responses—typically one or two sentences unless a more detailed explanation is necessary. + - Do not use your own training data or assumptions to answer queries. Base responses strictly on the provided information. + - If the query is unclear, ask concise clarifying questions instead of making assumptions. + - Do not try to end the conversation explicitly (e.g., avoid phrases like "Talk soon!" or "Let me know if you need anything else"). + - Engage naturally and ask relevant follow-up questions when appropriate. + - Do not provide responses such as talk to support team as the person talking to you is the support agent. + + [Task Instructions] + When responding to a query, follow these steps: + 1. Review the provided conversation to ensure responses align with previous context and avoid repetition. + 2. If the answer is available, list the steps required to complete the action. + 3. Share only the details relevant to #{product_name}, and avoid unrelated topics. + 4. Offer an explanation of how the response was derived based on the given context. + 5. Always return responses in valid JSON format as shown below: + 6. Never suggest contacting support, as you are assisting the support agent directly. + 7. Write the response in multiple paragraphs and in markdown format. + 8. DO NOT use headings in Markdown + 9. Cite the sources if you used a tool to find the response. + + ```json + { + "reasoning": "Explain why the response was chosen based on the provided information.", + "response": "Provide the answer only in Markdown format for readability." + } + + [Error Handling] + - If the required information is not found in the provided context, respond with an appropriate message indicating that no relevant data is available. + - Avoid speculating or providing unverified information. + SYSTEM_PROMPT_MESSAGE + end + def assistant_response_generator(product_name) <<~SYSTEM_PROMPT_MESSAGE [Identity] diff --git a/enterprise/lib/captain/agent.rb b/enterprise/lib/captain/agent.rb index 6a490aa7c..f0b511115 100644 --- a/enterprise/lib/captain/agent.rb +++ b/enterprise/lib/captain/agent.rb @@ -66,28 +66,33 @@ class Captain::Agent def construct_prompt(config) return config[:prompt] if config[:prompt] - " + <<~PROMPT Persona: #{config[:persona]} Objective: #{config[:goal]} Guidelines: - - Work diligently until the stated objective is achieved. - - Utilize only the provided tools for solving the task. Do not make up names of the functions - - Set 'stop: true' when the objective is complete. - - DO NOT provide tool_call as final answer - - If you have enough information to provide the details to the user, prepare a final result collecting all the information you have. + - Persistently work towards achieving the stated objective without deviation. + - Use only the provided tools to complete the task. Avoid inventing or assuming function names. + - Set `'stop': true` once the objective is fully achieved. + - DO NOT return tool usage as the final result. + - If sufficient information is available to deliver result, compile and present it to the user. + - Always return a final result and ENSURE the final result is formatted in Markdown. Output Structure: - If you find a function, that can be used, directly call the function. + 1. **Tool Usage:** + - If a relevant function is identified, call it directly without unnecessary explanations. - When providing the final answer, use the JSON format: - { - 'thought_process': 'Describe the reasoning and steps that led to the final result.', - 'result': 'The complete answer in text form.', - 'stop': true - } - " + 2. **Final Answer:** + When ready to provide a complete response, follow this JSON format: + + ```json + { + "thought_process": "Explain the reasoning and steps taken to arrive at the final result.", + "result": "Provide the complete response in clear, structured text.", + "stop": true + } + PROMPT end def prepare_tools(tools = []) @@ -126,7 +131,7 @@ class Captain::Agent end def push_to_messages(message) - @logger.info("Message: #{message}") + @logger.info("\n\n\nMessage: #{message}\n\n\n") @messages << message end end diff --git a/enterprise/lib/captain/llm_service.rb b/enterprise/lib/captain/llm_service.rb index 2099fb83c..f0faa1002 100644 --- a/enterprise/lib/captain/llm_service.rb +++ b/enterprise/lib/captain/llm_service.rb @@ -2,15 +2,16 @@ require 'openai' class Captain::LlmService def initialize(config) - @client = OpenAI::Client.new(access_token: config[:api_key]) do |f| - f.response :logger, Logger.new($stdout), bodies: true - end + @client = OpenAI::Client.new( + access_token: config[:api_key], + log_errors: Rails.env.development? + ) @logger = Rails.logger end def call(messages, functions = []) openai_params = { - model: 'gpt-4o-mini', + model: 'gpt-4o', response_format: { type: 'json_object' }, messages: messages }