feat: Add support for Langfuse LLM Tracing via OTEL (#12905)
This PR adds LLM instrumentation on langfuse for ai-editor feature ## Type of change New feature (non-breaking change which adds functionality) Needs langfuse account and env vars to be set ## How Has This Been Tested? I configured personal langfuse credentials and instrumented the app, traces can be seen in langfuse. each conversation is one session. <img width="1683" height="714" alt="image" src="https://github.com/user-attachments/assets/3fcba1c9-63cf-44b9-a355-fd6608691559" /> <img width="1446" height="172" alt="image" src="https://github.com/user-attachments/assets/dfa6e98f-4741-4e04-9a9e-078d1f01e97b" /> ## Checklist: - [x ] My code follows the style guidelines of this project - [ x] I have performed a self-review of my code - [ x] I have commented on my code, particularly in hard-to-understand areas - [ ] I have made corresponding changes to the documentation - [ x] My changes generate no new warnings - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] New and existing unit tests pass locally with my changes - [ ] Any dependent changes have been merged and published in downstream modules --------- Co-authored-by: aakashb95 <aakash@chatwoot.com> Co-authored-by: Vishnu Narayanan <iamwishnu@gmail.com> Co-authored-by: Pranav <pranav@chatwoot.com>
This commit is contained in:
217
spec/lib/integrations/llm_instrumentation_spec.rb
Normal file
217
spec/lib/integrations/llm_instrumentation_spec.rb
Normal file
@@ -0,0 +1,217 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Integrations::LlmInstrumentation do
|
||||
let(:test_class) do
|
||||
Class.new do
|
||||
include Integrations::LlmInstrumentation
|
||||
end
|
||||
end
|
||||
|
||||
let(:instance) { test_class.new }
|
||||
let!(:otel_config) do
|
||||
InstallationConfig.find_or_create_by(name: 'OTEL_PROVIDER') do |config|
|
||||
config.value = 'langfuse'
|
||||
end
|
||||
end
|
||||
|
||||
let(:params) do
|
||||
{
|
||||
span_name: 'llm.test',
|
||||
account_id: 123,
|
||||
conversation_id: 456,
|
||||
feature_name: 'reply_suggestion',
|
||||
model: 'gpt-4o-mini',
|
||||
messages: [{ 'role' => 'user', 'content' => 'Hello' }],
|
||||
temperature: 0.7
|
||||
}
|
||||
end
|
||||
|
||||
before do
|
||||
InstallationConfig.find_or_create_by(name: 'LANGFUSE_SECRET_KEY') do |config|
|
||||
config.value = 'test-secret-key'
|
||||
end
|
||||
end
|
||||
|
||||
describe '#instrument_llm_call' do
|
||||
context 'when OTEL provider is not configured' do
|
||||
before { otel_config.update(value: '') }
|
||||
|
||||
it 'executes the block without tracing' do
|
||||
result = instance.instrument_llm_call(params) { 'my_result' }
|
||||
expect(result).to eq('my_result')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when OTEL provider is configured' do
|
||||
it 'executes the block and returns the result' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
result = instance.instrument_llm_call(params) { 'my_result' }
|
||||
|
||||
expect(result).to eq('my_result')
|
||||
end
|
||||
|
||||
it 'creates a tracing span with the provided span name' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
instance.instrument_llm_call(params) { 'result' }
|
||||
|
||||
expect(mock_tracer).to have_received(:in_span).with('llm.test')
|
||||
end
|
||||
|
||||
it 'returns the block result even if instrumentation has errors' do
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_raise(StandardError.new('Instrumentation failed'))
|
||||
|
||||
result = instance.instrument_llm_call(params) { 'my_result' }
|
||||
|
||||
expect(result).to eq('my_result')
|
||||
end
|
||||
|
||||
it 'handles errors gracefully and captures exceptions' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
allow(mock_span).to receive(:set_attribute).and_raise(StandardError.new('Span error'))
|
||||
allow(ChatwootExceptionTracker).to receive(:new).and_call_original
|
||||
|
||||
result = instance.instrument_llm_call(params) { 'my_result' }
|
||||
|
||||
expect(result).to eq('my_result')
|
||||
expect(ChatwootExceptionTracker).to have_received(:new)
|
||||
end
|
||||
|
||||
it 'sets correct request attributes on the span' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
instance.instrument_llm_call(params) { 'result' }
|
||||
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.provider.name', 'openai')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.request.model', 'gpt-4o-mini')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.request.temperature', 0.7)
|
||||
end
|
||||
|
||||
it 'sets correct prompt message attributes' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
custom_params = params.merge(
|
||||
messages: [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful assistant' },
|
||||
{ 'role' => 'user', 'content' => 'Hello' }
|
||||
]
|
||||
)
|
||||
|
||||
instance.instrument_llm_call(custom_params) { 'result' }
|
||||
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.prompt.0.role', 'system')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.prompt.0.content', 'You are a helpful assistant')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.prompt.1.role', 'user')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.prompt.1.content', 'Hello')
|
||||
end
|
||||
|
||||
it 'sets correct metadata attributes' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
instance.instrument_llm_call(params) { 'result' }
|
||||
|
||||
expect(mock_span).to have_received(:set_attribute).with('langfuse.user.id', '123')
|
||||
expect(mock_span).to have_received(:set_attribute).with('langfuse.session.id', '123_456')
|
||||
expect(mock_span).to have_received(:set_attribute).with('langfuse.trace.tags', '["reply_suggestion"]')
|
||||
end
|
||||
|
||||
it 'sets completion message attributes when result contains message' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
result = instance.instrument_llm_call(params) do
|
||||
{ message: 'AI response here' }
|
||||
end
|
||||
|
||||
expect(result).to eq({ message: 'AI response here' })
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.completion.0.role', 'assistant')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.completion.0.content', 'AI response here')
|
||||
end
|
||||
|
||||
it 'sets usage metrics when result contains usage data' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
result = instance.instrument_llm_call(params) do
|
||||
{
|
||||
usage: {
|
||||
'prompt_tokens' => 150,
|
||||
'completion_tokens' => 200,
|
||||
'total_tokens' => 350
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
expect(result[:usage]['prompt_tokens']).to eq(150)
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.usage.input_tokens', 150)
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.usage.output_tokens', 200)
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.usage.total_tokens', 350)
|
||||
end
|
||||
|
||||
it 'sets error attributes when result contains error' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
mock_status = instance_double(OpenTelemetry::Trace::Status)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
allow(OpenTelemetry::Trace::Status).to receive(:error).and_return(mock_status)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
result = instance.instrument_llm_call(params) do
|
||||
{
|
||||
error: { message: 'API rate limit exceeded' },
|
||||
error_code: 'rate_limit_exceeded'
|
||||
}
|
||||
end
|
||||
|
||||
expect(result[:error_code]).to eq('rate_limit_exceeded')
|
||||
expect(mock_span).to have_received(:set_attribute)
|
||||
.with('gen_ai.response.error', '{"message":"API rate limit exceeded"}')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.response.error_code', 'rate_limit_exceeded')
|
||||
expect(mock_span).to have_received(:status=).with(mock_status)
|
||||
expect(OpenTelemetry::Trace::Status).to have_received(:error).with('API Error: rate_limit_exceeded')
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -1,303 +1,230 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Integrations::Openai::ProcessorService do
|
||||
subject { described_class.new(hook: hook, event: event) }
|
||||
subject(:service) { described_class.new(hook: hook, event: event) }
|
||||
|
||||
let(:account) { create(:account) }
|
||||
let(:hook) { create(:integrations_hook, :openai, account: account) }
|
||||
let(:expected_headers) { { 'Authorization' => "Bearer #{hook.settings['api_key']}" } }
|
||||
let(:openai_response) do
|
||||
{
|
||||
'choices' => [
|
||||
{
|
||||
'message' => {
|
||||
'content' => 'This is a reply from openai.'
|
||||
}
|
||||
}
|
||||
]
|
||||
'choices' => [{ 'message' => { 'content' => 'This is a reply from openai.' } }]
|
||||
}.to_json
|
||||
end
|
||||
let!(:conversation) { create(:conversation, account: account) }
|
||||
let!(:customer_message) { create(:message, account: account, conversation: conversation, message_type: :incoming, content: 'hello agent') }
|
||||
let!(:agent_message) { create(:message, account: account, conversation: conversation, message_type: :outgoing, content: 'hello customer') }
|
||||
let!(:summary_prompt) do
|
||||
if ChatwootApp.enterprise?
|
||||
Rails.root.join('enterprise/lib/enterprise/integrations/openai_prompts/summary.txt').read
|
||||
else
|
||||
'Please summarize the key points from the following conversation between support agents and customer as bullet points for the next ' \
|
||||
"support agent looking into the conversation. Reply in the user's language."
|
||||
end
|
||||
let(:openai_response_with_usage) do
|
||||
{
|
||||
'choices' => [{ 'message' => { 'content' => 'This is a reply from openai.' } }],
|
||||
'usage' => {
|
||||
'prompt_tokens' => 50,
|
||||
'completion_tokens' => 20,
|
||||
'total_tokens' => 70
|
||||
}
|
||||
}.to_json
|
||||
end
|
||||
|
||||
describe '#perform' do
|
||||
context 'when event name is rephrase' do
|
||||
let(:event) { { 'name' => 'rephrase', 'data' => { 'tone' => 'friendly', 'content' => 'This is a test message' } } }
|
||||
|
||||
it 'returns the rephrased message using the tone in data' do
|
||||
request_body = {
|
||||
shared_examples 'text transformation operation' do |event_name, system_prompt|
|
||||
let(:event) { { 'name' => event_name, 'data' => { 'content' => 'This is a test' } } }
|
||||
let(:expected_request_body) do
|
||||
{
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{
|
||||
'role' => 'system',
|
||||
'content' => 'You are a helpful support agent. ' \
|
||||
'Please rephrase the following response. ' \
|
||||
'Ensure that the reply should be in user language.'
|
||||
},
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
{ 'role' => 'system', 'content' => system_prompt },
|
||||
{ 'role' => 'user', 'content' => 'This is a test' }
|
||||
]
|
||||
}.to_json
|
||||
end
|
||||
|
||||
it "returns the #{event_name.tr('_', ' ')} text" do
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
.with(body: expected_request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response)
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
result = service.perform
|
||||
expect(result[:message]).to eq('This is a reply from openai.')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is reply_suggestion' do
|
||||
let(:event) { { 'name' => 'reply_suggestion', 'data' => { 'conversation_display_id' => conversation.display_id } } }
|
||||
|
||||
it 'returns the suggested reply' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ role: 'system',
|
||||
content: Rails.root.join('lib/integrations/openai/openai_prompts/reply.txt').read },
|
||||
{ role: 'user', content: customer_message.content },
|
||||
{ role: 'assistant', content: agent_message.content }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
# Update the stub_request with the correct messages order
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
shared_examples 'successful openai response' do
|
||||
it 'returns the expected message' do
|
||||
result = service.perform
|
||||
expect(result[:message]).to eq('This is a reply from openai.')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is summarize' do
|
||||
let(:event) { { 'name' => 'summarize', 'data' => { 'conversation_display_id' => conversation.display_id } } }
|
||||
let(:conversation_messages) do
|
||||
"Customer #{customer_message.sender.name} : #{customer_message.content}\nAgent #{agent_message.sender.name} : #{agent_message.content}\n"
|
||||
describe 'text transformation operations' do
|
||||
base_prompt = 'You are a helpful support agent. '
|
||||
language_suffix = 'Ensure that the reply should be in user language.'
|
||||
|
||||
it_behaves_like 'text transformation operation', 'rephrase',
|
||||
"#{base_prompt}Please rephrase the following response. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'fix_spelling_grammar',
|
||||
"#{base_prompt}Please fix the spelling and grammar of the following response. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'shorten',
|
||||
"#{base_prompt}Please shorten the following response. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'expand',
|
||||
"#{base_prompt}Please expand the following response. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'make_friendly',
|
||||
"#{base_prompt}Please make the following response more friendly. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'make_formal',
|
||||
"#{base_prompt}Please make the following response more formal. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'simplify',
|
||||
"#{base_prompt}Please simplify the following response. #{language_suffix}"
|
||||
end
|
||||
|
||||
describe 'conversation-based operations' do
|
||||
let!(:conversation) { create(:conversation, account: account) }
|
||||
let!(:customer_message) do
|
||||
create(:message, account: account, conversation: conversation, message_type: :incoming, content: 'hello agent')
|
||||
end
|
||||
let!(:agent_message) do
|
||||
create(:message, account: account, conversation: conversation, message_type: :outgoing, content: 'hello customer')
|
||||
end
|
||||
|
||||
it 'returns the summarized message' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system',
|
||||
'content' => summary_prompt },
|
||||
{ 'role' => 'user', 'content' => conversation_messages }
|
||||
]
|
||||
}.to_json
|
||||
context 'with reply_suggestion event' do
|
||||
let(:event) { { 'name' => 'reply_suggestion', 'data' => { 'conversation_display_id' => conversation.display_id } } }
|
||||
let(:expected_request_body) do
|
||||
{
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ role: 'system', content: Rails.root.join('lib/integrations/openai/openai_prompts/reply.txt').read },
|
||||
{ role: 'user', content: customer_message.content },
|
||||
{ role: 'assistant', content: agent_message.content }
|
||||
]
|
||||
}.to_json
|
||||
end
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
before do
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: expected_request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response)
|
||||
end
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
it_behaves_like 'successful openai response'
|
||||
end
|
||||
|
||||
context 'with summarize event' do
|
||||
let(:event) { { 'name' => 'summarize', 'data' => { 'conversation_display_id' => conversation.display_id } } }
|
||||
let(:conversation_messages) do
|
||||
"Customer #{customer_message.sender.name} : #{customer_message.content}\n" \
|
||||
"Agent #{agent_message.sender.name} : #{agent_message.content}\n"
|
||||
end
|
||||
let(:summary_prompt) do
|
||||
if ChatwootApp.enterprise?
|
||||
Rails.root.join('enterprise/lib/enterprise/integrations/openai_prompts/summary.txt').read
|
||||
else
|
||||
'Please summarize the key points from the following conversation between support agents and customer as bullet points ' \
|
||||
"for the next support agent looking into the conversation. Reply in the user's language."
|
||||
end
|
||||
end
|
||||
let(:expected_request_body) do
|
||||
{
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => summary_prompt },
|
||||
{ 'role' => 'user', 'content' => conversation_messages }
|
||||
]
|
||||
}.to_json
|
||||
end
|
||||
|
||||
before do
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: expected_request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response)
|
||||
end
|
||||
|
||||
it_behaves_like 'successful openai response'
|
||||
end
|
||||
|
||||
context 'with label_suggestion event and no labels' do
|
||||
let(:event) { { 'name' => 'label_suggestion', 'data' => { 'conversation_display_id' => conversation.display_id } } }
|
||||
|
||||
it 'returns nil' do
|
||||
expect(service.perform).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is label_suggestion with no labels' do
|
||||
let(:event) { { 'name' => 'label_suggestion', 'data' => { 'conversation_display_id' => conversation.display_id } } }
|
||||
describe 'edge cases' do
|
||||
context 'with unknown event name' do
|
||||
let(:event) { { 'name' => 'unknown', 'data' => {} } }
|
||||
|
||||
it 'returns nil' do
|
||||
result = subject.perform
|
||||
expect(result).to be_nil
|
||||
it 'returns nil' do
|
||||
expect(service.perform).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is not one that can be processed' do
|
||||
let(:event) { { 'name' => 'unknown', 'data' => {} } }
|
||||
|
||||
it 'returns nil' do
|
||||
expect(subject.perform).to be_nil
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is fix_spelling_grammar' do
|
||||
let(:event) { { 'name' => 'fix_spelling_grammar', 'data' => { 'content' => 'This is a test' } } }
|
||||
|
||||
it 'returns the corrected text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please fix the spelling and grammar of the following response. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is shorten' do
|
||||
let(:event) { { 'name' => 'shorten', 'data' => { 'content' => 'This is a test' } } }
|
||||
|
||||
it 'returns the shortened text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please shorten the following response. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is expand' do
|
||||
let(:event) { { 'name' => 'expand', 'data' => { 'content' => 'help you' } } }
|
||||
|
||||
it 'returns the expanded text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please expand the following response. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is make_friendly' do
|
||||
let(:event) { { 'name' => 'make_friendly', 'data' => { 'content' => 'This is a test' } } }
|
||||
|
||||
it 'returns the friendly text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please make the following response more friendly. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is make_formal' do
|
||||
let(:event) { { 'name' => 'make_formal', 'data' => { 'content' => 'This is a test' } } }
|
||||
|
||||
it 'returns the formal text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please make the following response more formal. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is simplify' do
|
||||
let(:event) { { 'name' => 'simplify', 'data' => { 'content' => 'This is a test' } } }
|
||||
|
||||
it 'returns the simplified text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please simplify the following response. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when testing endpoint configuration' do
|
||||
describe 'response structure' do
|
||||
let(:event) { { 'name' => 'rephrase', 'data' => { 'content' => 'test message' } } }
|
||||
|
||||
context 'when CAPTAIN_OPEN_AI_ENDPOINT is not configured' do
|
||||
it 'uses default OpenAI endpoint' do
|
||||
InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.destroy
|
||||
|
||||
context 'when response includes usage data' do
|
||||
before do
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: anything, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
.to_return(status: 200, body: openai_response_with_usage)
|
||||
end
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
it 'returns message, usage, and request_messages' do
|
||||
result = service.perform
|
||||
|
||||
expect(result[:message]).to eq('This is a reply from openai.')
|
||||
expect(result[:usage]).to eq({
|
||||
'prompt_tokens' => 50,
|
||||
'completion_tokens' => 20,
|
||||
'total_tokens' => 70
|
||||
})
|
||||
expect(result[:request_messages]).to be_an(Array)
|
||||
expect(result[:request_messages].length).to eq(2)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when CAPTAIN_OPEN_AI_ENDPOINT is configured' do
|
||||
context 'when response does not include usage data' do
|
||||
before do
|
||||
create(:installation_config, name: 'CAPTAIN_OPEN_AI_ENDPOINT', value: 'https://custom.azure.com/')
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: anything, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response)
|
||||
end
|
||||
|
||||
it 'uses custom endpoint' do
|
||||
stub_request(:post, 'https://custom.azure.com/v1/chat/completions')
|
||||
.with(body: anything, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
it 'returns message and request_messages with nil usage' do
|
||||
result = service.perform
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
expect(result[:message]).to eq('This is a reply from openai.')
|
||||
expect(result[:usage]).to be_nil
|
||||
expect(result[:request_messages]).to be_an(Array)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe 'endpoint configuration' do
|
||||
let(:event) { { 'name' => 'rephrase', 'data' => { 'content' => 'test message' } } }
|
||||
|
||||
shared_examples 'endpoint request' do |endpoint_url|
|
||||
it "makes request to #{endpoint_url}" do
|
||||
stub_request(:post, "#{endpoint_url}/v1/chat/completions")
|
||||
.with(body: anything, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response)
|
||||
|
||||
result = service.perform
|
||||
expect(result[:message]).to eq('This is a reply from openai.')
|
||||
expect(result[:request_messages]).to be_an(Array)
|
||||
expect(result[:usage]).to be_nil
|
||||
end
|
||||
end
|
||||
|
||||
context 'when CAPTAIN_OPEN_AI_ENDPOINT has trailing slash' do
|
||||
context 'without CAPTAIN_OPEN_AI_ENDPOINT configured' do
|
||||
before { InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.destroy }
|
||||
|
||||
it_behaves_like 'endpoint request', 'https://api.openai.com'
|
||||
end
|
||||
|
||||
context 'with CAPTAIN_OPEN_AI_ENDPOINT configured' do
|
||||
before do
|
||||
InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.destroy
|
||||
create(:installation_config, name: 'CAPTAIN_OPEN_AI_ENDPOINT', value: 'https://custom.azure.com/')
|
||||
end
|
||||
|
||||
it 'properly handles trailing slash' do
|
||||
stub_request(:post, 'https://custom.azure.com/v1/chat/completions')
|
||||
.with(body: anything, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
it_behaves_like 'endpoint request', 'https://custom.azure.com'
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
Reference in New Issue
Block a user