# Pull Request Template ## Description Add an api_key override so internal conversation completions prefers using the system API key and do not consume customer OpenAI credits. ## Type of change Please delete options that are not relevant. - [x] Bug fix (non-breaking change which fixes an issue) ## How Has This Been Tested? Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration. specs and locally ## Checklist: - [x] My code follows the style guidelines of this project - [x] I have performed a self-review of my code - [x] I have commented on my code, particularly in hard-to-understand areas - [ ] I have made corresponding changes to the documentation - [x] My changes generate no new warnings - [x] I have added tests that prove my fix is effective or that my feature works - [x] New and existing unit tests pass locally with my changes - [x] Any dependent changes have been merged and published in downstream modules
77 lines
2.6 KiB
Ruby
77 lines
2.6 KiB
Ruby
# Evaluates whether a conversation is complete and can be auto-resolved.
|
|
# Used by InboxPendingConversationsResolutionJob to determine if inactive
|
|
# conversations should be resolved or handed off to human agents.
|
|
#
|
|
# NOTE: This service intentionally does NOT count toward Captain usage limits.
|
|
# The response excludes the :message key that Enterprise::Captain::BaseTaskService
|
|
# checks for usage tracking. This is an internal operational evaluation,
|
|
# not a customer-facing value-add, so we don't charge for it.
|
|
class Captain::ConversationCompletionService < Captain::BaseTaskService
|
|
RESPONSE_SCHEMA = Captain::ConversationCompletionSchema
|
|
|
|
pattr_initialize [:account!, :conversation_display_id!]
|
|
|
|
def perform
|
|
content = format_messages_as_string
|
|
return default_incomplete_response('No messages found') if content.blank?
|
|
|
|
response = make_api_call(
|
|
model: InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_MODEL')&.value.presence || GPT_MODEL,
|
|
messages: [
|
|
{ role: 'system', content: prompt_from_file('conversation_completion') },
|
|
{ role: 'user', content: content }
|
|
],
|
|
schema: RESPONSE_SCHEMA
|
|
)
|
|
|
|
return default_incomplete_response(response[:error]) if response[:error].present?
|
|
|
|
parse_response(response[:message])
|
|
end
|
|
|
|
private
|
|
|
|
def prompt_from_file(file_name)
|
|
Rails.root.join('enterprise/lib/captain/prompts', "#{file_name}.liquid").read
|
|
end
|
|
|
|
def format_messages_as_string
|
|
messages = conversation_messages(start_from: 0)
|
|
messages.map do |msg|
|
|
sender_type = msg[:role] == 'user' ? 'Customer' : 'Assistant'
|
|
"#{sender_type}: #{msg[:content]}"
|
|
end.join("\n")
|
|
end
|
|
|
|
def parse_response(message)
|
|
return default_incomplete_response('Invalid response format') unless message.is_a?(Hash)
|
|
|
|
{
|
|
complete: message['complete'] == true,
|
|
reason: message['reason'] || 'No reason provided'
|
|
}
|
|
end
|
|
|
|
def default_incomplete_response(reason)
|
|
{ complete: false, reason: reason }
|
|
end
|
|
|
|
# Prefer the system API key over the account's OpenAI hook key.
|
|
# This is an internal operational evaluation, not a customer-triggered feature,
|
|
# so it should not consume the customer's OpenAI credits on hosted platforms.
|
|
# Falls back to the account hook for self-hosted deployments without a system key.
|
|
def api_key
|
|
@api_key ||= system_api_key.presence || openai_hook&.settings&.dig('api_key')
|
|
end
|
|
|
|
def event_name
|
|
'captain.conversation_completion'
|
|
end
|
|
|
|
def build_follow_up_context?
|
|
false
|
|
end
|
|
end
|
|
|
|
Captain::ConversationCompletionService.prepend_mod_with('Captain::ConversationCompletionService')
|