Files
leadchat/spec/lib/captain/summary_service_spec.rb
Petterson b7b6e67df7 fix(captain): localize AI summary to account language (#13790)
AI-generated summaries now respect the account's language setting.
Previously, summaries were always returned in English regardless of the
user's configured language, making section headings like "Customer
Intent" and "Action Items" appear in English even for non-English
accounts.

Previous behavior:
<img width="1336" height="790" alt="image"
src="https://github.com/user-attachments/assets/5df8b78b-1218-438d-9578-a806b5cb94ac"
/>


Current Behavior: 
<img width="1253" height="372" alt="image"
src="https://github.com/user-attachments/assets/ae932c97-06da-4baf-9f77-9719bc9162e8"
/>


## What changed
- Added explicit account locale to the AI system prompt in
`Captain::SummaryService`
- Updated the summary prompt template to instruct the model to translate
section headings

## How to test
1. Configure an account with a non-English language (e.g., Portuguese)
2. Open a conversation with messages
3. Use the Copilot "Summarize" feature
4. Verify that section headings ("Customer Intent", "Conversation
Summary", etc.) appear in the account's language

---------

Co-authored-by: Aakash Bakhle <48802744+aakashb95@users.noreply.github.com>
2026-04-14 17:36:10 +05:30

57 lines
2.3 KiB
Ruby

require 'rails_helper'
RSpec.describe Captain::SummaryService do
let(:account) { create(:account) }
let(:inbox) { create(:inbox, account: account) }
let(:conversation) { create(:conversation, account: account, inbox: inbox) }
let(:service) { described_class.new(account: account, conversation_display_id: conversation.display_id) }
let(:mock_chat) { instance_double(RubyLLM::Chat) }
let(:mock_context) { instance_double(RubyLLM::Context, chat: mock_chat) }
let(:mock_response) { instance_double(RubyLLM::Message, content: 'Summary of conversation', input_tokens: 100, output_tokens: 50) }
before do
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
allow(Llm::Config).to receive(:with_api_key).and_yield(mock_context)
allow(mock_chat).to receive(:with_instructions)
allow(mock_chat).to receive(:ask).and_return(mock_response)
# Stub captain enabled check to allow specs to test base functionality
# without enterprise module interference
allow(account).to receive(:feature_enabled?).and_call_original
allow(account).to receive(:feature_enabled?).with('captain_tasks').and_return(true)
end
describe '#perform' do
it 'passes correct model to API' do
expect(service).to receive(:make_api_call).with(
hash_including(model: Captain::BaseTaskService::GPT_MODEL)
).and_call_original
service.perform
end
it 'passes system prompt and conversation text as messages' do
allow(service).to receive(:prompt_from_file).with('summary').and_return('Summarize this')
expect(service).to receive(:make_api_call) do |args|
expect(args[:messages].length).to eq(2)
expect(args[:messages][0][:role]).to eq('system')
expect(args[:messages][0][:content]).to include('Summarize this')
expect(args[:messages][0][:content]).to include("Reply in #{account.locale_english_name}")
expect(args[:messages][1][:role]).to eq('user')
expect(args[:messages][1][:content]).to be_a(String)
{ message: 'Summary' }
end
service.perform
end
it 'returns formatted response' do
result = service.perform
expect(result[:message]).to eq('Summary of conversation')
expect(result[:usage]['prompt_tokens']).to eq(100)
expect(result[:usage]['completion_tokens']).to eq(50)
end
end
end