feat: search documentation tool for reply suggestions (#13340)
Co-authored-by: Shivam Mishra <scm.mymail@gmail.com>
This commit is contained in:
@@ -1,4 +1,6 @@
|
||||
class Captain::Tools::BaseTool < RubyLLM::Tool
|
||||
prepend Captain::Tools::Instrumentation
|
||||
|
||||
attr_accessor :assistant
|
||||
|
||||
def initialize(assistant, user: nil)
|
||||
|
||||
10
enterprise/app/services/captain/tools/instrumentation.rb
Normal file
10
enterprise/app/services/captain/tools/instrumentation.rb
Normal file
@@ -0,0 +1,10 @@
|
||||
module Captain::Tools::Instrumentation
|
||||
extend ActiveSupport::Concern
|
||||
include Integrations::LlmInstrumentation
|
||||
|
||||
def execute(**args)
|
||||
instrument_tool_call(name, args) do
|
||||
super
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,42 @@
|
||||
class Captain::Tools::SearchReplyDocumentationService < RubyLLM::Tool
|
||||
prepend Captain::Tools::Instrumentation
|
||||
|
||||
description 'Search and retrieve documentation/FAQs from knowledge base'
|
||||
|
||||
param :query, desc: 'Search Query', required: true
|
||||
|
||||
def initialize(account:, assistant: nil)
|
||||
@account = account
|
||||
@assistant = assistant
|
||||
super()
|
||||
end
|
||||
|
||||
def name
|
||||
'search_documentation'
|
||||
end
|
||||
|
||||
def execute(query:)
|
||||
Rails.logger.info { "#{self.class.name}: #{query}" }
|
||||
|
||||
responses = search_responses(query)
|
||||
return 'No FAQs found for the given query' if responses.empty?
|
||||
|
||||
responses.map { |response| format_response(response) }.join
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def search_responses(query)
|
||||
if @assistant.present?
|
||||
@assistant.responses.approved.search(query, account_id: @account.id)
|
||||
else
|
||||
@account.captain_assistant_responses.approved.search(query, account_id: @account.id)
|
||||
end
|
||||
end
|
||||
|
||||
def format_response(response)
|
||||
result = "\nQuestion: #{response.question}\nAnswer: #{response.answer}\n"
|
||||
result += "Source: #{response.documentable.external_link}\n" if response.documentable.present? && response.documentable.try(:external_link)
|
||||
result
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,24 @@
|
||||
module Enterprise::Captain::ReplySuggestionService
|
||||
def make_api_call(model:, messages:, tools: [])
|
||||
return super unless use_search_tool?
|
||||
|
||||
super(model: model, messages: messages, tools: [build_search_tool])
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def use_search_tool?
|
||||
ChatwootApp.chatwoot_cloud? || ChatwootApp.self_hosted_enterprise?
|
||||
end
|
||||
|
||||
def prompt_variables
|
||||
return super unless use_search_tool?
|
||||
|
||||
super.merge('has_search_tool' => true)
|
||||
end
|
||||
|
||||
def build_search_tool
|
||||
assistant = conversation&.inbox&.captain_assistant
|
||||
Captain::Tools::SearchReplyDocumentationService.new(account: account, assistant: assistant)
|
||||
end
|
||||
end
|
||||
@@ -1,5 +1,6 @@
|
||||
class Captain::BaseTaskService
|
||||
include Integrations::LlmInstrumentation
|
||||
include Captain::ToolInstrumentation
|
||||
|
||||
# gpt-4o-mini supports 128,000 tokens
|
||||
# 1 token is approx 4 characters
|
||||
@@ -35,44 +36,52 @@ class Captain::BaseTaskService
|
||||
"#{endpoint}/v1"
|
||||
end
|
||||
|
||||
def make_api_call(model:, messages:)
|
||||
def make_api_call(model:, messages:, tools: [])
|
||||
# Community edition prerequisite checks
|
||||
# Enterprise module handles these with more specific error messages (cloud vs self-hosted)
|
||||
return { error: I18n.t('captain.disabled'), error_code: 403 } unless captain_tasks_enabled?
|
||||
return { error: I18n.t('captain.api_key_missing'), error_code: 401 } unless api_key_configured?
|
||||
|
||||
instrumentation_params = build_instrumentation_params(model, messages)
|
||||
instrumentation_method = tools.any? ? :instrument_tool_session : :instrument_llm_call
|
||||
|
||||
response = instrument_llm_call(instrumentation_params) do
|
||||
execute_ruby_llm_request(model: model, messages: messages)
|
||||
response = send(instrumentation_method, instrumentation_params) do
|
||||
execute_ruby_llm_request(model: model, messages: messages, tools: tools)
|
||||
end
|
||||
|
||||
# Build follow-up context for client-side refinement, when applicable
|
||||
if build_follow_up_context? && response[:message].present?
|
||||
response.merge(follow_up_context: build_follow_up_context(messages, response))
|
||||
else
|
||||
response
|
||||
end
|
||||
return response unless build_follow_up_context? && response[:message].present?
|
||||
|
||||
response.merge(follow_up_context: build_follow_up_context(messages, response))
|
||||
end
|
||||
|
||||
def execute_ruby_llm_request(model:, messages:)
|
||||
def execute_ruby_llm_request(model:, messages:, tools: [])
|
||||
Llm::Config.with_api_key(api_key, api_base: api_base) do |context|
|
||||
chat = context.chat(model: model)
|
||||
system_msg = messages.find { |m| m[:role] == 'system' }
|
||||
chat.with_instructions(system_msg[:content]) if system_msg
|
||||
chat = build_chat(context, model: model, messages: messages, tools: tools)
|
||||
|
||||
conversation_messages = messages.reject { |m| m[:role] == 'system' }
|
||||
return { error: 'No conversation messages provided', error_code: 400, request_messages: messages } if conversation_messages.empty?
|
||||
|
||||
add_messages_if_needed(chat, conversation_messages)
|
||||
response = chat.ask(conversation_messages.last[:content])
|
||||
build_ruby_llm_response(response, messages)
|
||||
build_ruby_llm_response(chat.ask(conversation_messages.last[:content]), messages)
|
||||
end
|
||||
rescue StandardError => e
|
||||
ChatwootExceptionTracker.new(e, account: account).capture_exception
|
||||
{ error: e.message, request_messages: messages }
|
||||
end
|
||||
|
||||
def build_chat(context, model:, messages:, tools: [])
|
||||
chat = context.chat(model: model)
|
||||
system_msg = messages.find { |m| m[:role] == 'system' }
|
||||
chat.with_instructions(system_msg[:content]) if system_msg
|
||||
|
||||
if tools.any?
|
||||
tools.each { |tool| chat = chat.with_tool(tool) }
|
||||
chat.on_end_message { |message| record_generation(chat, message, model) }
|
||||
end
|
||||
|
||||
chat
|
||||
end
|
||||
|
||||
def add_messages_if_needed(chat, conversation_messages)
|
||||
return if conversation_messages.length == 1
|
||||
|
||||
@@ -177,5 +186,4 @@ class Captain::BaseTaskService
|
||||
user_msg ? user_msg[:content] : nil
|
||||
end
|
||||
end
|
||||
|
||||
Captain::BaseTaskService.prepend_mod_with('Captain::BaseTaskService')
|
||||
|
||||
@@ -38,3 +38,5 @@ class Captain::ReplySuggestionService < Captain::BaseTaskService
|
||||
'reply_suggestion'
|
||||
end
|
||||
end
|
||||
|
||||
Captain::ReplySuggestionService.prepend_mod_with('Captain::ReplySuggestionService')
|
||||
|
||||
48
lib/captain/tool_instrumentation.rb
Normal file
48
lib/captain/tool_instrumentation.rb
Normal file
@@ -0,0 +1,48 @@
|
||||
module Captain::ToolInstrumentation
|
||||
extend ActiveSupport::Concern
|
||||
|
||||
private
|
||||
|
||||
# Custom instrumentation for tool flows - outputs just the message (not full hash)
|
||||
def instrument_tool_session(params)
|
||||
return yield unless ChatwootApp.otel_enabled?
|
||||
|
||||
response = nil
|
||||
executed = false
|
||||
tracer.in_span(params[:span_name]) do |span|
|
||||
span.set_attribute('langfuse.user.id', params[:account_id].to_s) if params[:account_id]
|
||||
span.set_attribute('langfuse.tags', [params[:feature_name]].to_json)
|
||||
span.set_attribute('langfuse.observation.input', params[:messages].to_json)
|
||||
|
||||
response = yield
|
||||
executed = true
|
||||
|
||||
# Output just the message for cleaner Langfuse display
|
||||
span.set_attribute('langfuse.observation.output', response[:message] || response.to_json)
|
||||
end
|
||||
response
|
||||
rescue StandardError => e
|
||||
ChatwootExceptionTracker.new(e, account: account).capture_exception
|
||||
executed ? response : yield
|
||||
end
|
||||
|
||||
def record_generation(chat, message, model)
|
||||
return unless ChatwootApp.otel_enabled?
|
||||
return unless message.respond_to?(:role) && message.role.to_s == 'assistant'
|
||||
|
||||
tracer.in_span("llm.#{event_name}.generation") do |span|
|
||||
span.set_attribute('gen_ai.system', 'openai')
|
||||
span.set_attribute('gen_ai.request.model', model)
|
||||
span.set_attribute('gen_ai.usage.input_tokens', message.input_tokens)
|
||||
span.set_attribute('gen_ai.usage.output_tokens', message.output_tokens) if message.respond_to?(:output_tokens)
|
||||
span.set_attribute('langfuse.observation.input', format_chat_messages(chat))
|
||||
span.set_attribute('langfuse.observation.output', message.content.to_s) if message.respond_to?(:content)
|
||||
end
|
||||
rescue StandardError => e
|
||||
Rails.logger.warn "Failed to record generation: #{e.message}"
|
||||
end
|
||||
|
||||
def format_chat_messages(chat)
|
||||
chat.messages[0...-1].map { |m| { role: m.role.to_s, content: m.content.to_s } }.to_json
|
||||
end
|
||||
end
|
||||
@@ -21,6 +21,10 @@ module ChatwootApp
|
||||
enterprise? && GlobalConfig.get_value('DEPLOYMENT_ENV') == 'cloud'
|
||||
end
|
||||
|
||||
def self.self_hosted_enterprise?
|
||||
enterprise? && !chatwoot_cloud? && GlobalConfig.get_value('INSTALLATION_PRICING_PLAN') == 'enterprise'
|
||||
end
|
||||
|
||||
def self.custom?
|
||||
@custom ||= root.join('custom').exist?
|
||||
end
|
||||
|
||||
@@ -31,5 +31,10 @@ General guidelines:
|
||||
- Move the conversation forward
|
||||
- Do not invent product details, policies, or links that weren't mentioned
|
||||
- Reply in the customer's language
|
||||
{% if has_search_tool %}
|
||||
|
||||
**Important**: You have access to a `search_documentation` tool that can search the company's knowledge base for product details, policies, FAQs, and other information.
|
||||
**Use the search_documentation tool first** to find relevant information before composing your reply. This ensures your response is accurate and based on actual company documentation.
|
||||
{% endif %}
|
||||
|
||||
Output only the reply.
|
||||
|
||||
@@ -161,11 +161,6 @@ RSpec.describe Captain::BaseTaskService do
|
||||
end
|
||||
end
|
||||
|
||||
it 'calls execute_ruby_llm_request with correct parameters' do
|
||||
expect(service).to receive(:execute_ruby_llm_request).with(model: model, messages: messages).and_call_original
|
||||
service.send(:make_api_call, model: model, messages: messages)
|
||||
end
|
||||
|
||||
it 'instruments the LLM call' do
|
||||
expect(service).to receive(:instrument_llm_call).and_call_original
|
||||
service.send(:make_api_call, model: model, messages: messages)
|
||||
|
||||
@@ -19,6 +19,8 @@ RSpec.describe Captain::ReplySuggestionService do
|
||||
mock_context = instance_double(RubyLLM::Context, chat: mock_chat)
|
||||
|
||||
allow(Llm::Config).to receive(:with_api_key).and_yield(mock_context)
|
||||
allow(mock_chat).to receive(:with_tool).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:on_end_message).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:with_instructions) { |msg| captured_messages << { role: 'system', content: msg } }
|
||||
allow(mock_chat).to receive(:add_message) { |args| captured_messages << args }
|
||||
allow(mock_chat).to receive(:ask) do |msg|
|
||||
|
||||
Reference in New Issue
Block a user