feat: new Captain Editor (#13235)

Co-authored-by: Aakash Bakhle <48802744+aakashb95@users.noreply.github.com>
Co-authored-by: Vishnu Narayanan <iamwishnu@gmail.com>
Co-authored-by: Sivin Varghese <64252451+iamsivin@users.noreply.github.com>
Co-authored-by: iamsivin <iamsivin@gmail.com>
Co-authored-by: aakashb95 <aakashbakhle@gmail.com>
This commit is contained in:
Shivam Mishra
2026-01-21 13:39:07 +05:30
committed by GitHub
parent c77c9c9d8a
commit 6a482926b4
83 changed files with 3887 additions and 1798 deletions

View File

@@ -0,0 +1,325 @@
require 'rails_helper'
RSpec.describe Captain::BaseTaskService do
let(:account) { create(:account) }
let(:inbox) { create(:inbox, account: account) }
let(:conversation) { create(:conversation, account: account, inbox: inbox) }
# Create a concrete test service class since BaseTaskService is abstract
let(:test_service_class) do
Class.new(described_class) do
def perform
{ message: 'Test response' }
end
def event_name
'test_event'
end
end
end
let(:service) { test_service_class.new(account: account, conversation_display_id: conversation.display_id) }
before do
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
# Stub captain enabled check to allow OSS specs to test base functionality
# without enterprise module interference
allow(account).to receive(:feature_enabled?).and_call_original
allow(account).to receive(:feature_enabled?).with('captain_tasks').and_return(true)
end
describe '#perform' do
it 'returns the expected result' do
result = service.perform
expect(result).to eq({ message: 'Test response' })
end
end
describe '#event_name' do
it 'raises NotImplementedError for base class' do
base_service = described_class.new(account: account, conversation_display_id: conversation.display_id)
expect { base_service.send(:event_name) }.to raise_error(NotImplementedError, /must implement #event_name/)
end
it 'returns custom event name in subclass' do
expect(service.send(:event_name)).to eq('test_event')
end
end
describe '#conversation' do
it 'finds conversation by display_id' do
expect(service.send(:conversation)).to eq(conversation)
end
it 'memoizes the conversation' do
expect(account.conversations).to receive(:find_by).once.and_return(conversation)
service.send(:conversation)
service.send(:conversation)
end
end
describe '#conversation_messages' do
let(:message1) { create(:message, conversation: conversation, message_type: :incoming, content: 'Hello', created_at: 1.hour.ago) }
let(:message2) { create(:message, conversation: conversation, message_type: :outgoing, content: 'Hi there', created_at: 30.minutes.ago) }
let(:message3) { create(:message, conversation: conversation, message_type: :incoming, content: 'How are you?', created_at: 10.minutes.ago) }
let(:private_message) { create(:message, conversation: conversation, message_type: :incoming, content: 'Private', private: true) }
before do
message1
message2
message3
private_message
end
it 'returns messages in array format with role and content' do
messages = service.send(:conversation_messages)
expect(messages).to be_an(Array)
expect(messages.length).to eq(3)
expect(messages[0]).to eq({ role: 'user', content: 'Hello' })
expect(messages[1]).to eq({ role: 'assistant', content: 'Hi there' })
expect(messages[2]).to eq({ role: 'user', content: 'How are you?' })
end
it 'excludes private messages' do
messages = service.send(:conversation_messages)
contents = messages.pluck(:content)
expect(contents).not_to include('Private')
end
it 'respects token limit' do
# Create messages that collectively exceed token limit
# Message validation max is 150000, so create multiple large messages
10.times do |i|
create(:message, conversation: conversation, message_type: :incoming,
content: 'a' * 100_000, created_at: i.minutes.ago)
end
messages = service.send(:conversation_messages)
total_length = messages.sum { |m| m[:content].length }
expect(total_length).to be <= Captain::BaseTaskService::TOKEN_LIMIT
end
it 'respects start_from offset for token counting' do
# With a start_from offset, fewer messages should fit
start_from = Captain::BaseTaskService::TOKEN_LIMIT - 100
messages = service.send(:conversation_messages, start_from: start_from)
total_length = messages.sum { |m| m[:content].length }
expect(total_length).to be <= 100
end
end
describe '#make_api_call' do
let(:model) { 'gpt-4' }
let(:messages) { [{ role: 'system', content: 'Test' }, { role: 'user', content: 'Hello' }] }
let(:mock_chat) { instance_double(RubyLLM::Chat) }
let(:mock_context) { instance_double(RubyLLM::Context, chat: mock_chat) }
let(:mock_response) { instance_double(RubyLLM::Message, content: 'Response', input_tokens: 10, output_tokens: 20) }
before do
allow(Llm::Config).to receive(:with_api_key).and_yield(mock_context)
allow(mock_chat).to receive(:with_instructions)
allow(mock_chat).to receive(:ask).and_return(mock_response)
end
context 'when captain_tasks is disabled' do
before do
allow(account).to receive(:feature_enabled?).with('captain_tasks').and_return(false)
end
it 'returns disabled error' do
result = service.send(:make_api_call, model: model, messages: messages)
expect(result[:error]).to eq(I18n.t('captain.disabled'))
expect(result[:error_code]).to eq(403)
end
it 'does not make API call' do
expect(Llm::Config).not_to receive(:with_api_key)
service.send(:make_api_call, model: model, messages: messages)
end
end
context 'when API key is not configured' do
before do
InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_API_KEY')&.destroy
# Clear memoized api_key
service.instance_variable_set(:@api_key, nil)
end
it 'returns api key missing error' do
result = service.send(:make_api_call, model: model, messages: messages)
expect(result[:error]).to eq(I18n.t('captain.api_key_missing'))
expect(result[:error_code]).to eq(401)
end
it 'does not make API call' do
expect(Llm::Config).not_to receive(:with_api_key)
service.send(:make_api_call, model: model, messages: messages)
end
end
it 'calls execute_ruby_llm_request with correct parameters' do
expect(service).to receive(:execute_ruby_llm_request).with(model: model, messages: messages).and_call_original
service.send(:make_api_call, model: model, messages: messages)
end
it 'instruments the LLM call' do
expect(service).to receive(:instrument_llm_call).and_call_original
service.send(:make_api_call, model: model, messages: messages)
end
it 'returns formatted response with tokens' do
result = service.send(:make_api_call, model: model, messages: messages)
expect(result[:message]).to eq('Response')
expect(result[:usage]['prompt_tokens']).to eq(10)
expect(result[:usage]['completion_tokens']).to eq(20)
expect(result[:usage]['total_tokens']).to eq(30)
end
end
describe 'chat setup' do
let(:model) { 'gpt-4' }
let(:mock_chat) { instance_double(RubyLLM::Chat) }
let(:mock_context) { instance_double(RubyLLM::Context, chat: mock_chat) }
let(:mock_response) { instance_double(RubyLLM::Message, content: 'Response', input_tokens: 10, output_tokens: 20) }
before do
allow(Llm::Config).to receive(:with_api_key).and_yield(mock_context)
allow(mock_response).to receive(:input_tokens).and_return(10)
allow(mock_response).to receive(:output_tokens).and_return(20)
end
context 'with system instructions' do
let(:messages) { [{ role: 'system', content: 'You are helpful' }, { role: 'user', content: 'Hello' }] }
it 'applies system instructions to chat' do
expect(mock_chat).to receive(:with_instructions).with('You are helpful')
expect(mock_chat).to receive(:ask).with('Hello').and_return(mock_response)
service.send(:make_api_call, model: model, messages: messages)
end
end
context 'with conversation history' do
let(:messages) do
[
{ role: 'system', content: 'You are helpful' },
{ role: 'user', content: 'First message' },
{ role: 'assistant', content: 'First response' },
{ role: 'user', content: 'Second message' }
]
end
it 'adds conversation history before asking' do
expect(mock_chat).to receive(:with_instructions).with('You are helpful')
expect(mock_chat).to receive(:add_message).with(role: :user, content: 'First message').ordered
expect(mock_chat).to receive(:add_message).with(role: :assistant, content: 'First response').ordered
expect(mock_chat).to receive(:ask).with('Second message').and_return(mock_response)
service.send(:make_api_call, model: model, messages: messages)
end
end
context 'with single message' do
let(:messages) { [{ role: 'system', content: 'You are helpful' }, { role: 'user', content: 'Hello' }] }
it 'does not add conversation history' do
expect(mock_chat).to receive(:with_instructions).with('You are helpful')
expect(mock_chat).not_to receive(:add_message)
expect(mock_chat).to receive(:ask).with('Hello').and_return(mock_response)
service.send(:make_api_call, model: model, messages: messages)
end
end
end
describe 'error handling' do
let(:model) { 'gpt-4' }
let(:messages) { [{ role: 'user', content: 'Hello' }] }
let(:error) { StandardError.new('API Error') }
let(:exception_tracker) { instance_double(ChatwootExceptionTracker) }
before do
allow(Llm::Config).to receive(:with_api_key).and_raise(error)
allow(ChatwootExceptionTracker).to receive(:new).with(error, account: account).and_return(exception_tracker)
allow(exception_tracker).to receive(:capture_exception)
end
it 'tracks exceptions' do
expect(ChatwootExceptionTracker).to receive(:new).with(error, account: account).and_return(exception_tracker)
expect(exception_tracker).to receive(:capture_exception)
service.send(:make_api_call, model: model, messages: messages)
end
it 'returns error response' do
expect(exception_tracker).to receive(:capture_exception)
result = service.send(:make_api_call, model: model, messages: messages)
expect(result[:error]).to eq('API Error')
expect(result[:request_messages]).to eq(messages)
end
end
describe '#api_key' do
context 'when openai hook is configured' do
let(:hook) { create(:integrations_hook, account: account, app_id: 'openai', status: 'enabled', settings: { 'api_key' => 'hook-key' }) }
before { hook }
it 'uses api key from hook' do
expect(service.send(:api_key)).to eq('hook-key')
end
end
context 'when openai hook is not configured' do
it 'uses system api key' do
expect(service.send(:api_key)).to eq('test-key')
end
end
end
describe '#prompt_from_file' do
it 'reads prompt from file' do
allow(Rails.root).to receive(:join).and_return(instance_double(Pathname, read: 'Test prompt content'))
expect(service.send(:prompt_from_file, 'test')).to eq('Test prompt content')
end
end
describe '#extract_original_context' do
it 'returns the most recent user message' do
messages = [
{ role: 'user', content: 'First question' },
{ role: 'assistant', content: 'First response' },
{ role: 'user', content: 'Follow-up question' }
]
result = service.send(:extract_original_context, messages)
expect(result).to eq('Follow-up question')
end
it 'returns nil when no user messages exist' do
messages = [
{ role: 'system', content: 'System prompt' },
{ role: 'assistant', content: 'Response' }
]
result = service.send(:extract_original_context, messages)
expect(result).to be_nil
end
it 'returns the only user message when there is just one' do
messages = [
{ role: 'system', content: 'System prompt' },
{ role: 'user', content: 'Single question' }
]
result = service.send(:extract_original_context, messages)
expect(result).to eq('Single question')
end
end
end

View File

@@ -0,0 +1,164 @@
require 'rails_helper'
RSpec.describe Captain::FollowUpService do
let(:account) { create(:account) }
let(:inbox) { create(:inbox, account: account) }
let(:conversation) { create(:conversation, account: account, inbox: inbox) }
let(:user_message) { 'Make it more concise' }
let(:follow_up_context) do
{
'event_name' => 'professional',
'original_context' => 'Please help me with this issue',
'last_response' => 'I would be happy to assist you with this matter.',
'conversation_history' => [
{ 'role' => 'user', 'content' => 'Make it shorter' },
{ 'role' => 'assistant', 'content' => 'Happy to help with this.' }
]
}
end
let(:service) do
described_class.new(
account: account,
follow_up_context: follow_up_context,
user_message: user_message,
conversation_display_id: conversation.display_id
)
end
before do
# Stub captain enabled check to allow specs to test base functionality
# without enterprise module interference
allow(account).to receive(:feature_enabled?).and_call_original
allow(account).to receive(:feature_enabled?).with('captain_tasks').and_return(true)
end
describe '#perform' do
context 'when conversation_display_id is provided' do
it 'resolves conversation for instrumentation' do
expect(service.send(:conversation)).to eq(conversation)
end
end
context 'when follow-up context exists' do
it 'constructs messages array with full conversation history' do
expect(service).to receive(:make_api_call) do |args|
messages = args[:messages]
expect(messages).to match(
[
a_hash_including(role: 'system', content: include('tone rewrite (professional)')),
{ role: 'user', content: 'Please help me with this issue' },
{ role: 'assistant', content: 'I would be happy to assist you with this matter.' },
{ role: 'user', content: 'Make it shorter' },
{ role: 'assistant', content: 'Happy to help with this.' },
{ role: 'user', content: 'Make it more concise' }
]
)
{ message: 'Refined response' }
end
service.perform
end
it 'returns updated follow-up context' do
allow(service).to receive(:make_api_call).and_return({ message: 'Refined response' })
result = service.perform
expect(result[:message]).to eq('Refined response')
expect(result[:follow_up_context]['last_response']).to eq('Refined response')
expect(result[:follow_up_context]['conversation_history'].length).to eq(4)
expect(result[:follow_up_context]['conversation_history'][-2]['content']).to eq('Make it more concise')
expect(result[:follow_up_context]['conversation_history'][-1]['content']).to eq('Refined response')
end
end
context 'when follow-up context is missing' do
let(:follow_up_context) { nil }
it 'returns error with 400 code' do
result = service.perform
expect(result[:error]).to eq('Follow-up context missing')
expect(result[:error_code]).to eq(400)
end
end
end
describe '#build_follow_up_system_prompt' do
it 'describes tone rewrite actions' do
%w[professional casual friendly confident straightforward].each do |tone|
session = { 'event_name' => tone }
prompt = service.send(:build_follow_up_system_prompt, session)
expect(prompt).to include("tone rewrite (#{tone})")
expect(prompt).to include('help them refine the result')
end
end
it 'describes fix_spelling_grammar action' do
session = { 'event_name' => 'fix_spelling_grammar' }
prompt = service.send(:build_follow_up_system_prompt, session)
expect(prompt).to include('spelling and grammar correction')
end
it 'describes improve action' do
session = { 'event_name' => 'improve' }
prompt = service.send(:build_follow_up_system_prompt, session)
expect(prompt).to include('message improvement')
end
it 'describes summarize action' do
session = { 'event_name' => 'summarize' }
prompt = service.send(:build_follow_up_system_prompt, session)
expect(prompt).to include('conversation summary')
end
it 'describes reply_suggestion action' do
session = { 'event_name' => 'reply_suggestion' }
prompt = service.send(:build_follow_up_system_prompt, session)
expect(prompt).to include('reply suggestion')
end
it 'describes label_suggestion action' do
session = { 'event_name' => 'label_suggestion' }
prompt = service.send(:build_follow_up_system_prompt, session)
expect(prompt).to include('label suggestion')
end
it 'uses event_name directly for unknown actions' do
session = { 'event_name' => 'custom_action' }
prompt = service.send(:build_follow_up_system_prompt, session)
expect(prompt).to include('custom_action')
end
end
describe '#describe_previous_action' do
it 'returns tone description for tone operations' do
expect(service.send(:describe_previous_action, 'professional')).to eq('tone rewrite (professional)')
expect(service.send(:describe_previous_action, 'casual')).to eq('tone rewrite (casual)')
expect(service.send(:describe_previous_action, 'friendly')).to eq('tone rewrite (friendly)')
expect(service.send(:describe_previous_action, 'confident')).to eq('tone rewrite (confident)')
expect(service.send(:describe_previous_action, 'straightforward')).to eq('tone rewrite (straightforward)')
end
it 'returns specific descriptions for other operations' do
expect(service.send(:describe_previous_action, 'fix_spelling_grammar')).to eq('spelling and grammar correction')
expect(service.send(:describe_previous_action, 'improve')).to eq('message improvement')
expect(service.send(:describe_previous_action, 'summarize')).to eq('conversation summary')
expect(service.send(:describe_previous_action, 'reply_suggestion')).to eq('reply suggestion')
expect(service.send(:describe_previous_action, 'label_suggestion')).to eq('label suggestion')
end
it 'returns event name for unknown operations' do
expect(service.send(:describe_previous_action, 'unknown')).to eq('unknown')
end
end
end

View File

@@ -0,0 +1,169 @@
require 'rails_helper'
RSpec.describe Captain::LabelSuggestionService do
let(:account) { create(:account) }
let(:inbox) { create(:inbox, account: account) }
let(:conversation) { create(:conversation, account: account, inbox: inbox) }
let(:label1) { create(:label, account: account, title: 'bug') }
let(:label2) { create(:label, account: account, title: 'feature-request') }
let(:service) { described_class.new(account: account, conversation_display_id: conversation.display_id) }
let(:mock_chat) { instance_double(RubyLLM::Chat) }
let(:mock_context) { instance_double(RubyLLM::Context, chat: mock_chat) }
let(:mock_response) { instance_double(RubyLLM::Message, content: 'bug, feature-request', input_tokens: 100, output_tokens: 20) }
before do
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
label1
label2
allow(Llm::Config).to receive(:with_api_key).and_yield(mock_context)
allow(mock_chat).to receive(:with_instructions)
allow(mock_chat).to receive(:ask).and_return(mock_response)
# Stub captain enabled check to allow specs to test base functionality
# without enterprise module interference
allow(account).to receive(:feature_enabled?).and_call_original
allow(account).to receive(:feature_enabled?).with('captain_tasks').and_return(true)
end
describe '#label_suggestion_message' do
context 'with valid conversation' do
before do
# Create enough incoming messages to pass validation
3.times do |i|
create(:message, conversation: conversation, message_type: :incoming,
content: "Message #{i}", created_at: i.minutes.ago)
end
end
it 'returns label suggestions' do
result = service.perform
expect(result[:message]).to eq('bug, feature-request')
end
it 'removes "Labels:" prefix from response' do
allow(mock_response).to receive(:content).and_return('Labels: bug, feature-request')
result = service.perform
expect(result[:message]).to eq(' bug, feature-request')
end
it 'removes "Label:" prefix (singular) from response' do
allow(mock_response).to receive(:content).and_return('label: bug')
result = service.perform
expect(result[:message]).to eq(' bug')
end
it 'builds labels_with_messages format correctly' do
expect(service).to receive(:make_api_call) do |args|
user_message = args[:messages].find { |m| m[:role] == 'user' }[:content]
expect(user_message).to include('Messages:')
expect(user_message).to include('Labels:')
expect(user_message).to include('bug, feature-request')
{ message: 'bug' }
end
service.perform
end
end
context 'with invalid conversation' do
it 'returns nil when conversation has less than 3 incoming messages' do
create(:message, conversation: conversation, message_type: :incoming, content: 'Message 1')
create(:message, conversation: conversation, message_type: :incoming, content: 'Message 2')
result = service.perform
expect(result).to be_nil
end
it 'returns nil when conversation has more than 100 messages' do
101.times do |i|
create(:message, conversation: conversation, message_type: :incoming, content: "Message #{i}")
end
result = service.perform
expect(result).to be_nil
end
it 'returns nil when conversation has >20 messages and last is not incoming' do
21.times do |i|
create(:message, conversation: conversation, message_type: :incoming, content: "Message #{i}")
end
create(:message, conversation: conversation, message_type: :outgoing, content: 'Agent reply')
result = service.perform
expect(result).to be_nil
end
end
context 'when caching' do
before do
3.times do |i|
create(:message, conversation: conversation, message_type: :incoming,
content: "Message #{i}", created_at: i.minutes.ago)
end
end
it 'reads from cache on cache hit' do
# Warm up cache
service.perform
# Create new service instance to test cache read
new_service = described_class.new(account: account, conversation_display_id: conversation.display_id)
expect(new_service).not_to receive(:make_api_call)
result = new_service.perform
expect(result[:message]).to eq('bug, feature-request')
end
it 'writes to cache on cache miss' do
expect(Redis::Alfred).to receive(:setex).and_call_original
service.perform
end
it 'returns nil for invalid cached JSON' do
# Set invalid JSON in cache
cache_key = service.send(:cache_key)
Redis::Alfred.set(cache_key, 'invalid json')
result = service.perform
# Should make API call since cache read failed
expect(result[:message]).to eq('bug, feature-request')
end
it 'does not cache error responses' do
error_response = { error: 'API Error', request_messages: [] }
allow(service).to receive(:make_api_call).and_return(error_response)
expect(Redis::Alfred).not_to receive(:setex)
service.perform
end
end
context 'when no labels exist' do
before do
Label.destroy_all
3.times do |i|
create(:message, conversation: conversation, message_type: :incoming,
content: "Message #{i}")
end
end
it 'returns nil' do
result = service.perform
expect(result).to be_nil
end
end
end
end

View File

@@ -0,0 +1,92 @@
require 'rails_helper'
RSpec.describe Captain::ReplySuggestionService do
subject(:service) { described_class.new(account: account, conversation_display_id: conversation.display_id, user: agent) }
let(:account) { create(:account) }
let(:agent) { create(:user, account: account, name: 'Jane Smith') }
let(:inbox) { create(:inbox, account: account) }
let(:conversation) { create(:conversation, account: account, inbox: inbox) }
let(:captured_messages) { [] }
before do
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
create(:message, conversation: conversation, message_type: :incoming, content: 'I need help')
allow(account).to receive(:feature_enabled?).with('captain_tasks').and_return(true)
mock_response = instance_double(RubyLLM::Message, content: 'Sure, I can help!', input_tokens: 50, output_tokens: 20)
mock_chat = instance_double(RubyLLM::Chat)
mock_context = instance_double(RubyLLM::Context, chat: mock_chat)
allow(Llm::Config).to receive(:with_api_key).and_yield(mock_context)
allow(mock_chat).to receive(:with_instructions) { |msg| captured_messages << { role: 'system', content: msg } }
allow(mock_chat).to receive(:add_message) { |args| captured_messages << args }
allow(mock_chat).to receive(:ask) do |msg|
captured_messages << { role: 'user', content: msg }
mock_response
end
end
describe '#perform' do
it 'returns the suggested reply' do
result = service.perform
expect(result[:message]).to eq('Sure, I can help!')
end
it 'formats conversation using LlmFormatter' do
service.perform
user_message = captured_messages.find { |m| m[:role] == 'user' }
expect(user_message[:content]).to include('Message History:')
expect(user_message[:content]).to include('User: I need help')
end
context 'with chat channel' do
it 'uses chat-specific instructions' do
service.perform
system_prompt = captured_messages.find { |m| m[:role] == 'system' }[:content]
expect(system_prompt).to include('CHAT conversation')
expect(system_prompt).to include('brief, conversational')
expect(system_prompt).not_to include('EMAIL conversation')
end
end
context 'with email channel' do
let(:email_channel) { create(:channel_email, account: account) }
let(:inbox) { create(:inbox, account: account, channel: email_channel) }
it 'uses email-specific instructions' do
service.perform
system_prompt = captured_messages.find { |m| m[:role] == 'system' }[:content]
expect(system_prompt).to include('EMAIL conversation')
expect(system_prompt).to include('professional email')
expect(system_prompt).not_to include('CHAT conversation')
end
context 'when agent has a signature' do
let(:agent) { create(:user, account: account, name: 'Jane Smith', message_signature: "Best,\nJane Smith") }
it 'includes the signature in the prompt' do
service.perform
system_prompt = captured_messages.find { |m| m[:role] == 'system' }[:content]
expect(system_prompt).to include("Best,\nJane Smith")
end
end
context 'when agent has no signature' do
let(:agent) { create(:user, account: account, name: 'Jane Smith', message_signature: nil) }
it 'falls back to agent name for sign-off' do
service.perform
system_prompt = captured_messages.find { |m| m[:role] == 'system' }[:content]
expect(system_prompt).to include("sign-off using the agent's name: Jane Smith")
end
end
end
end
end

View File

@@ -0,0 +1,166 @@
require 'rails_helper'
RSpec.describe Captain::RewriteService do
let(:account) { create(:account) }
let(:inbox) { create(:inbox, account: account) }
let(:conversation) { create(:conversation, account: account, inbox: inbox) }
let(:content) { 'I need help with my order' }
let(:operation) { 'fix_spelling_grammar' }
let(:service) { described_class.new(account: account, content: content, operation: operation, conversation_display_id: conversation.display_id) }
let(:mock_chat) { instance_double(RubyLLM::Chat) }
let(:mock_context) { instance_double(RubyLLM::Context, chat: mock_chat) }
let(:mock_response) { instance_double(RubyLLM::Message, content: 'Rewritten text', input_tokens: 10, output_tokens: 5) }
before do
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
allow(Llm::Config).to receive(:with_api_key).and_yield(mock_context)
allow(mock_chat).to receive(:with_instructions)
allow(mock_chat).to receive(:ask).and_return(mock_response)
# Stub captain enabled check to allow specs to test base functionality
# without enterprise module interference
allow(account).to receive(:feature_enabled?).and_call_original
allow(account).to receive(:feature_enabled?).with('captain_tasks').and_return(true)
end
describe '#perform with fix_spelling_grammar operation' do
let(:operation) { 'fix_spelling_grammar' }
it 'uses fix_spelling_grammar prompt' do
expect(service).to receive(:prompt_from_file).with('fix_spelling_grammar').and_return('Fix errors')
expect(service).to receive(:make_api_call) do |args|
expect(args[:messages][0][:content]).to eq('Fix errors')
expect(args[:messages][1][:content]).to eq(content)
{ message: 'Fixed' }
end
service.perform
end
end
describe 'tone rewrite methods' do
let(:tone_prompt_template) { 'Rewrite in {{ tone }} tone' }
before do
allow(service).to receive(:prompt_from_file).with('tone_rewrite').and_return(tone_prompt_template)
end
describe '#perform with casual operation' do
let(:operation) { 'casual' }
it 'uses casual tone' do
expect(service).to receive(:make_api_call) do |args|
expect(args[:messages][0][:content]).to eq('Rewrite in casual tone')
{ message: 'Hey, need help?' }
end
service.perform
end
end
describe '#perform with professional operation' do
let(:operation) { 'professional' }
it 'uses professional tone' do
expect(service).to receive(:make_api_call) do |args|
expect(args[:messages][0][:content]).to eq('Rewrite in professional tone')
{ message: 'Professional text' }
end
service.perform
end
end
describe '#perform with friendly operation' do
let(:operation) { 'friendly' }
it 'uses friendly tone' do
expect(service).to receive(:make_api_call) do |args|
expect(args[:messages][0][:content]).to eq('Rewrite in friendly tone')
{ message: 'Friendly text' }
end
service.perform
end
end
describe '#perform with confident operation' do
let(:operation) { 'confident' }
it 'uses confident tone' do
expect(service).to receive(:make_api_call) do |args|
expect(args[:messages][0][:content]).to eq('Rewrite in confident tone')
{ message: 'Confident text' }
end
service.perform
end
end
describe '#perform with straightforward operation' do
let(:operation) { 'straightforward' }
it 'uses straightforward tone' do
expect(service).to receive(:make_api_call) do |args|
expect(args[:messages][0][:content]).to eq('Rewrite in straightforward tone')
{ message: 'Straightforward text' }
end
service.perform
end
end
end
describe '#perform with improve operation' do
let(:operation) { 'improve' }
let(:improve_template) { 'Context: {{ conversation_context }}\nDraft: {{ draft_message }}' }
before do
create(:message, conversation: conversation, message_type: :incoming, content: 'Customer message')
allow(service).to receive(:prompt_from_file).with('improve').and_return(improve_template)
end
it 'uses conversation context and draft message with Liquid template' do
expect(service).to receive(:make_api_call) do |args|
system_content = args[:messages][0][:content]
expect(system_content).to include('Context:')
expect(system_content).to include('Draft: I need help with my order')
expect(args[:messages][1][:content]).to eq(content)
{ message: 'Improved text' }
end
service.perform
end
it 'returns formatted response' do
result = service.perform
expect(result[:message]).to eq('Rewritten text')
end
end
describe '#perform with invalid operation' do
it 'raises ArgumentError for unknown operation' do
invalid_service = described_class.new(
account: account,
content: content,
operation: 'invalid_operation',
conversation_display_id: conversation.display_id
)
expect { invalid_service.perform }.to raise_error(ArgumentError, /Invalid operation/)
end
it 'prevents method injection attacks' do
dangerous_service = described_class.new(
account: account,
content: content,
operation: 'perform',
conversation_display_id: conversation.display_id
)
expect { dangerous_service.perform }.to raise_error(ArgumentError, /Invalid operation/)
end
end
end

View File

@@ -0,0 +1,55 @@
require 'rails_helper'
RSpec.describe Captain::SummaryService do
let(:account) { create(:account) }
let(:inbox) { create(:inbox, account: account) }
let(:conversation) { create(:conversation, account: account, inbox: inbox) }
let(:service) { described_class.new(account: account, conversation_display_id: conversation.display_id) }
let(:mock_chat) { instance_double(RubyLLM::Chat) }
let(:mock_context) { instance_double(RubyLLM::Context, chat: mock_chat) }
let(:mock_response) { instance_double(RubyLLM::Message, content: 'Summary of conversation', input_tokens: 100, output_tokens: 50) }
before do
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
allow(Llm::Config).to receive(:with_api_key).and_yield(mock_context)
allow(mock_chat).to receive(:with_instructions)
allow(mock_chat).to receive(:ask).and_return(mock_response)
# Stub captain enabled check to allow specs to test base functionality
# without enterprise module interference
allow(account).to receive(:feature_enabled?).and_call_original
allow(account).to receive(:feature_enabled?).with('captain_tasks').and_return(true)
end
describe '#perform' do
it 'passes correct model to API' do
expect(service).to receive(:make_api_call).with(
hash_including(model: Captain::BaseTaskService::GPT_MODEL)
).and_call_original
service.perform
end
it 'passes system prompt and conversation text as messages' do
allow(service).to receive(:prompt_from_file).with('summary').and_return('Summarize this')
expect(service).to receive(:make_api_call) do |args|
expect(args[:messages].length).to eq(2)
expect(args[:messages][0][:role]).to eq('system')
expect(args[:messages][0][:content]).to eq('Summarize this')
expect(args[:messages][1][:role]).to eq('user')
expect(args[:messages][1][:content]).to be_a(String)
{ message: 'Summary' }
end
service.perform
end
it 'returns formatted response' do
result = service.perform
expect(result[:message]).to eq('Summary of conversation')
expect(result[:usage]['prompt_tokens']).to eq(100)
expect(result[:usage]['completion_tokens']).to eq(50)
end
end
end

View File

@@ -1,201 +0,0 @@
require 'rails_helper'
RSpec.describe Integrations::Openai::ProcessorService do
subject(:service) { described_class.new(hook: hook, event: event) }
let(:account) { create(:account) }
let(:hook) { create(:integrations_hook, :openai, account: account) }
# Mock RubyLLM objects
let(:mock_chat) { instance_double(RubyLLM::Chat) }
let(:mock_context) { instance_double(RubyLLM::Context) }
let(:mock_config) { OpenStruct.new }
let(:mock_response) do
instance_double(
RubyLLM::Message,
content: 'This is a reply from openai.',
input_tokens: nil,
output_tokens: nil
)
end
let(:mock_response_with_usage) do
instance_double(
RubyLLM::Message,
content: 'This is a reply from openai.',
input_tokens: 50,
output_tokens: 20
)
end
before do
allow(RubyLLM).to receive(:context).and_yield(mock_config).and_return(mock_context)
allow(mock_context).to receive(:chat).and_return(mock_chat)
allow(mock_chat).to receive(:with_instructions).and_return(mock_chat)
allow(mock_chat).to receive(:add_message).and_return(mock_chat)
allow(mock_chat).to receive(:ask).and_return(mock_response)
end
describe '#perform' do
describe 'text transformation operations' do
shared_examples 'text transformation operation' do |event_name|
let(:event) { { 'name' => event_name, 'data' => { 'content' => 'This is a test' } } }
it 'returns the transformed text' do
result = service.perform
expect(result[:message]).to eq('This is a reply from openai.')
end
it 'sends the user content to the LLM' do
service.perform
expect(mock_chat).to have_received(:ask).with('This is a test')
end
it 'sets system instructions' do
service.perform
expect(mock_chat).to have_received(:with_instructions).with(a_string_including('You are a helpful support agent'))
end
end
it_behaves_like 'text transformation operation', 'rephrase'
it_behaves_like 'text transformation operation', 'fix_spelling_grammar'
it_behaves_like 'text transformation operation', 'shorten'
it_behaves_like 'text transformation operation', 'expand'
it_behaves_like 'text transformation operation', 'make_friendly'
it_behaves_like 'text transformation operation', 'make_formal'
it_behaves_like 'text transformation operation', 'simplify'
end
describe 'conversation-based operations' do
let!(:conversation) { create(:conversation, account: account) }
before do
create(:message, account: account, conversation: conversation, message_type: :incoming, content: 'hello agent')
create(:message, account: account, conversation: conversation, message_type: :outgoing, content: 'hello customer')
end
context 'with reply_suggestion event' do
let(:event) { { 'name' => 'reply_suggestion', 'data' => { 'conversation_display_id' => conversation.display_id } } }
it 'returns the suggested reply' do
result = service.perform
expect(result[:message]).to eq('This is a reply from openai.')
end
it 'adds conversation history before asking' do
service.perform
# Should add the first message as history, then ask with the last message
expect(mock_chat).to have_received(:add_message).with(role: :user, content: 'hello agent')
expect(mock_chat).to have_received(:ask).with('hello customer')
end
end
context 'with summarize event' do
let(:event) { { 'name' => 'summarize', 'data' => { 'conversation_display_id' => conversation.display_id } } }
it 'returns the summary' do
result = service.perform
expect(result[:message]).to eq('This is a reply from openai.')
end
it 'sends formatted conversation as a single message' do
service.perform
# Summarize sends conversation as a formatted string in one user message
expect(mock_chat).to have_received(:ask).with(a_string_matching(/Customer.*hello agent.*Agent.*hello customer/m))
end
end
context 'with label_suggestion event and no labels' do
let(:event) { { 'name' => 'label_suggestion', 'data' => { 'conversation_display_id' => conversation.display_id } } }
it 'returns nil' do
expect(service.perform).to be_nil
end
end
end
describe 'edge cases' do
context 'with unknown event name' do
let(:event) { { 'name' => 'unknown', 'data' => {} } }
it 'returns nil' do
expect(service.perform).to be_nil
end
end
end
describe 'response structure' do
let(:event) { { 'name' => 'rephrase', 'data' => { 'content' => 'test message' } } }
context 'when response includes usage data' do
before do
allow(mock_chat).to receive(:ask).and_return(mock_response_with_usage)
end
it 'returns message with usage data' do
result = service.perform
expect(result[:message]).to eq('This is a reply from openai.')
expect(result[:usage]['prompt_tokens']).to eq(50)
expect(result[:usage]['completion_tokens']).to eq(20)
expect(result[:usage]['total_tokens']).to eq(70)
end
it 'includes request_messages in response' do
result = service.perform
expect(result[:request_messages]).to be_an(Array)
expect(result[:request_messages].length).to eq(2)
end
end
context 'when response does not include usage data' do
it 'returns message with zero total tokens' do
result = service.perform
expect(result[:message]).to eq('This is a reply from openai.')
expect(result[:usage]['total_tokens']).to eq(0)
end
it 'includes request_messages in response' do
result = service.perform
expect(result[:request_messages]).to be_an(Array)
end
end
end
describe 'endpoint configuration' do
let(:event) { { 'name' => 'rephrase', 'data' => { 'content' => 'test message' } } }
context 'without CAPTAIN_OPEN_AI_ENDPOINT configured' do
before { InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.destroy }
it 'uses default OpenAI endpoint' do
expect(Llm::Config).to receive(:with_api_key).with(
hook.settings['api_key'],
api_base: 'https://api.openai.com/v1'
).and_call_original
service.perform
end
end
context 'with CAPTAIN_OPEN_AI_ENDPOINT configured' do
before do
InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.destroy
create(:installation_config, name: 'CAPTAIN_OPEN_AI_ENDPOINT', value: 'https://custom.azure.com/')
end
it 'uses custom endpoint' do
expect(Llm::Config).to receive(:with_api_key).with(
hook.settings['api_key'],
api_base: 'https://custom.azure.com/v1'
).and_call_original
service.perform
end
end
end
end
end