feat: Add support for Langfuse LLM Tracing via OTEL (#12905)
This PR adds LLM instrumentation on langfuse for ai-editor feature ## Type of change New feature (non-breaking change which adds functionality) Needs langfuse account and env vars to be set ## How Has This Been Tested? I configured personal langfuse credentials and instrumented the app, traces can be seen in langfuse. each conversation is one session. <img width="1683" height="714" alt="image" src="https://github.com/user-attachments/assets/3fcba1c9-63cf-44b9-a355-fd6608691559" /> <img width="1446" height="172" alt="image" src="https://github.com/user-attachments/assets/dfa6e98f-4741-4e04-9a9e-078d1f01e97b" /> ## Checklist: - [x ] My code follows the style guidelines of this project - [ x] I have performed a self-review of my code - [ x] I have commented on my code, particularly in hard-to-understand areas - [ ] I have made corresponding changes to the documentation - [ x] My changes generate no new warnings - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] New and existing unit tests pass locally with my changes - [ ] Any dependent changes have been merged and published in downstream modules --------- Co-authored-by: aakashb95 <aakash@chatwoot.com> Co-authored-by: Vishnu Narayanan <iamwishnu@gmail.com> Co-authored-by: Pranav <pranav@chatwoot.com>
This commit is contained in:
@@ -220,6 +220,7 @@ ANDROID_SHA256_CERT_FINGERPRINT=AC:73:8E:DE:EB:56:EA:CC:10:87:02:A7:65:37:7B:38:
|
||||
## https://github.com/DataDog/dd-trace-rb/blob/master/docs/GettingStarted.md#environment-variables
|
||||
# DD_TRACE_AGENT_URL=
|
||||
|
||||
|
||||
# MaxMindDB API key to download GeoLite2 City database
|
||||
# IP_LOOKUP_API_KEY=
|
||||
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -99,3 +99,5 @@ CLAUDE.local.md
|
||||
# Histoire deployment
|
||||
.netlify
|
||||
.histoire
|
||||
.pnpm-store/*
|
||||
local/
|
||||
|
||||
4
Gemfile
4
Gemfile
@@ -196,6 +196,10 @@ gem 'ai-agents', '>= 0.4.3'
|
||||
# TODO: Move this gem as a dependency of ai-agents
|
||||
gem 'ruby_llm-schema'
|
||||
|
||||
# OpenTelemetry for LLM observability
|
||||
gem 'opentelemetry-sdk'
|
||||
gem 'opentelemetry-exporter-otlp'
|
||||
|
||||
gem 'shopify_api'
|
||||
|
||||
### Gems required only in specific deployment environments ###
|
||||
|
||||
21
Gemfile.lock
21
Gemfile.lock
@@ -625,6 +625,25 @@ GEM
|
||||
faraday (>= 1.0, < 3)
|
||||
multi_json (>= 1.0)
|
||||
openssl (3.2.0)
|
||||
opentelemetry-api (1.7.0)
|
||||
opentelemetry-common (0.23.0)
|
||||
opentelemetry-api (~> 1.0)
|
||||
opentelemetry-exporter-otlp (0.31.1)
|
||||
google-protobuf (>= 3.18)
|
||||
googleapis-common-protos-types (~> 1.3)
|
||||
opentelemetry-api (~> 1.1)
|
||||
opentelemetry-common (~> 0.20)
|
||||
opentelemetry-sdk (~> 1.10)
|
||||
opentelemetry-semantic_conventions
|
||||
opentelemetry-registry (0.4.0)
|
||||
opentelemetry-api (~> 1.1)
|
||||
opentelemetry-sdk (1.10.0)
|
||||
opentelemetry-api (~> 1.1)
|
||||
opentelemetry-common (~> 0.20)
|
||||
opentelemetry-registry (~> 0.2)
|
||||
opentelemetry-semantic_conventions
|
||||
opentelemetry-semantic_conventions (1.36.0)
|
||||
opentelemetry-api (~> 1.0)
|
||||
orm_adapter (0.5.0)
|
||||
os (1.1.4)
|
||||
ostruct (0.6.1)
|
||||
@@ -1073,6 +1092,8 @@ DEPENDENCIES
|
||||
omniauth-rails_csrf_protection (~> 1.0, >= 1.0.2)
|
||||
omniauth-saml
|
||||
opensearch-ruby
|
||||
opentelemetry-exporter-otlp
|
||||
opentelemetry-sdk
|
||||
pg
|
||||
pg_search
|
||||
pgvector
|
||||
|
||||
@@ -57,6 +57,12 @@ class OpenAIAPI extends ApiClient {
|
||||
content,
|
||||
};
|
||||
|
||||
// Always include conversation_display_id when available for session tracking
|
||||
if (conversationId) {
|
||||
data.conversation_display_id = conversationId;
|
||||
}
|
||||
|
||||
// For conversation-level events, only send conversation_display_id
|
||||
if (this.conversation_events.includes(type)) {
|
||||
data = {
|
||||
conversation_display_id: conversationId,
|
||||
|
||||
@@ -199,7 +199,6 @@
|
||||
description: 'The limits for the Captain AI service for different plans'
|
||||
value:
|
||||
type: code
|
||||
|
||||
# End of Captain Config
|
||||
|
||||
# ------- Chatwoot Internal Config for Cloud ----#
|
||||
@@ -476,3 +475,28 @@
|
||||
locked: false
|
||||
description: 'Token expiry in days'
|
||||
## ------ End of Customizations for Customers ------ ##
|
||||
|
||||
## ----- LLM Observability ---- ##
|
||||
- name: OTEL_PROVIDER
|
||||
display_title: 'OpenTelemetry Provider'
|
||||
description: 'LLM observability provider (langfuse, langsmith, etc.)'
|
||||
value: ''
|
||||
locked: false
|
||||
- name: LANGFUSE_PUBLIC_KEY
|
||||
display_title: 'Langfuse Public Key'
|
||||
description: 'Public key for Langfuse authentication'
|
||||
value: ''
|
||||
locked: false
|
||||
type: secret
|
||||
- name: LANGFUSE_SECRET_KEY
|
||||
display_title: 'Langfuse Secret Key'
|
||||
description: 'Secret key for Langfuse authentication'
|
||||
value: ''
|
||||
locked: false
|
||||
type: secret
|
||||
- name: LANGFUSE_BASE_URL
|
||||
display_title: 'Langfuse Base URL'
|
||||
description: 'Langfuse endpoint (US: https://us.cloud.langfuse.com, EU: https://cloud.langfuse.com)'
|
||||
value: 'https://us.cloud.langfuse.com'
|
||||
locked: false
|
||||
## ---- End of LLM Observability ---- ##
|
||||
|
||||
@@ -36,7 +36,8 @@ module Enterprise::SuperAdmin::AppConfigsController
|
||||
def internal_config_options
|
||||
%w[CHATWOOT_INBOX_TOKEN CHATWOOT_INBOX_HMAC_KEY ANALYTICS_TOKEN CLEARBIT_API_KEY DASHBOARD_SCRIPTS INACTIVE_WHATSAPP_NUMBERS BLOCKED_EMAIL_DOMAINS
|
||||
SKIP_INCOMING_BCC_PROCESSING CAPTAIN_CLOUD_PLAN_LIMITS ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL CHATWOOT_INSTANCE_ADMIN_EMAIL
|
||||
OG_IMAGE_CDN_URL OG_IMAGE_CLIENT_REF CLOUDFLARE_API_KEY CLOUDFLARE_ZONE_ID]
|
||||
OG_IMAGE_CDN_URL OG_IMAGE_CLIENT_REF CLOUDFLARE_API_KEY CLOUDFLARE_ZONE_ID
|
||||
OTEL_PROVIDER LANGFUSE_PUBLIC_KEY LANGFUSE_SECRET_KEY LANGFUSE_BASE_URL]
|
||||
end
|
||||
|
||||
def captain_config_options
|
||||
|
||||
@@ -42,4 +42,11 @@ module ChatwootApp
|
||||
def self.advanced_search_allowed?
|
||||
enterprise? && ENV.fetch('OPENSEARCH_URL', nil).present?
|
||||
end
|
||||
|
||||
def self.otel_enabled?
|
||||
otel_provider = InstallationConfig.find_by(name: 'OTEL_PROVIDER')&.value
|
||||
secret_key = InstallationConfig.find_by(name: 'LANGFUSE_SECRET_KEY')&.value
|
||||
|
||||
otel_provider.present? && secret_key.present? && otel_provider == 'langfuse'
|
||||
end
|
||||
end
|
||||
|
||||
106
lib/integrations/llm_instrumentation.rb
Normal file
106
lib/integrations/llm_instrumentation.rb
Normal file
@@ -0,0 +1,106 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require 'opentelemetry_config'
|
||||
|
||||
module Integrations::LlmInstrumentation
|
||||
# OpenTelemetry attribute names following GenAI semantic conventions
|
||||
# https://opentelemetry.io/docs/specs/semconv/gen-ai/
|
||||
ATTR_GEN_AI_PROVIDER = 'gen_ai.provider.name'
|
||||
ATTR_GEN_AI_REQUEST_MODEL = 'gen_ai.request.model'
|
||||
ATTR_GEN_AI_REQUEST_TEMPERATURE = 'gen_ai.request.temperature'
|
||||
ATTR_GEN_AI_PROMPT_ROLE = 'gen_ai.prompt.%d.role'
|
||||
ATTR_GEN_AI_PROMPT_CONTENT = 'gen_ai.prompt.%d.content'
|
||||
ATTR_GEN_AI_COMPLETION_ROLE = 'gen_ai.completion.0.role'
|
||||
ATTR_GEN_AI_COMPLETION_CONTENT = 'gen_ai.completion.0.content'
|
||||
ATTR_GEN_AI_USAGE_INPUT_TOKENS = 'gen_ai.usage.input_tokens'
|
||||
ATTR_GEN_AI_USAGE_OUTPUT_TOKENS = 'gen_ai.usage.output_tokens'
|
||||
ATTR_GEN_AI_USAGE_TOTAL_TOKENS = 'gen_ai.usage.total_tokens'
|
||||
ATTR_GEN_AI_RESPONSE_ERROR = 'gen_ai.response.error'
|
||||
ATTR_GEN_AI_RESPONSE_ERROR_CODE = 'gen_ai.response.error_code'
|
||||
|
||||
# Langfuse-specific attributes
|
||||
# https://langfuse.com/integrations/native/opentelemetry#property-mapping
|
||||
ATTR_LANGFUSE_USER_ID = 'langfuse.user.id'
|
||||
ATTR_LANGFUSE_SESSION_ID = 'langfuse.session.id'
|
||||
ATTR_LANGFUSE_TAGS = 'langfuse.trace.tags'
|
||||
|
||||
def tracer
|
||||
@tracer ||= OpentelemetryConfig.tracer
|
||||
end
|
||||
|
||||
def instrument_llm_call(params)
|
||||
return yield unless ChatwootApp.otel_enabled?
|
||||
|
||||
tracer.in_span(params[:span_name]) do |span|
|
||||
setup_span_attributes(span, params)
|
||||
result = yield
|
||||
record_completion(span, result)
|
||||
result
|
||||
end
|
||||
rescue StandardError => e
|
||||
ChatwootExceptionTracker.new(e, account: params[:account]).capture_exception
|
||||
yield
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def setup_span_attributes(span, params)
|
||||
set_request_attributes(span, params)
|
||||
set_prompt_messages(span, params[:messages])
|
||||
set_metadata_attributes(span, params)
|
||||
end
|
||||
|
||||
def record_completion(span, result)
|
||||
set_completion_attributes(span, result) if result.is_a?(Hash)
|
||||
end
|
||||
|
||||
def set_request_attributes(span, params)
|
||||
span.set_attribute(ATTR_GEN_AI_PROVIDER, 'openai')
|
||||
span.set_attribute(ATTR_GEN_AI_REQUEST_MODEL, params[:model])
|
||||
span.set_attribute(ATTR_GEN_AI_REQUEST_TEMPERATURE, params[:temperature]) if params[:temperature]
|
||||
end
|
||||
|
||||
def set_prompt_messages(span, messages)
|
||||
messages.each_with_index do |msg, idx|
|
||||
span.set_attribute(format(ATTR_GEN_AI_PROMPT_ROLE, idx), msg['role'])
|
||||
span.set_attribute(format(ATTR_GEN_AI_PROMPT_CONTENT, idx), msg['content'])
|
||||
end
|
||||
end
|
||||
|
||||
def set_metadata_attributes(span, params)
|
||||
session_id = params[:conversation_id].present? ? "#{params[:account_id]}_#{params[:conversation_id]}" : nil
|
||||
span.set_attribute(ATTR_LANGFUSE_USER_ID, params[:account_id].to_s) if params[:account_id]
|
||||
span.set_attribute(ATTR_LANGFUSE_SESSION_ID, session_id) if session_id.present?
|
||||
span.set_attribute(ATTR_LANGFUSE_TAGS, [params[:feature_name]].to_json)
|
||||
end
|
||||
|
||||
def set_completion_attributes(span, result)
|
||||
set_completion_message(span, result)
|
||||
set_usage_metrics(span, result)
|
||||
set_error_attributes(span, result)
|
||||
end
|
||||
|
||||
def set_completion_message(span, result)
|
||||
return if result[:message].blank?
|
||||
|
||||
span.set_attribute(ATTR_GEN_AI_COMPLETION_ROLE, 'assistant')
|
||||
span.set_attribute(ATTR_GEN_AI_COMPLETION_CONTENT, result[:message])
|
||||
end
|
||||
|
||||
def set_usage_metrics(span, result)
|
||||
return if result[:usage].blank?
|
||||
|
||||
usage = result[:usage]
|
||||
span.set_attribute(ATTR_GEN_AI_USAGE_INPUT_TOKENS, usage['prompt_tokens']) if usage['prompt_tokens']
|
||||
span.set_attribute(ATTR_GEN_AI_USAGE_OUTPUT_TOKENS, usage['completion_tokens']) if usage['completion_tokens']
|
||||
span.set_attribute(ATTR_GEN_AI_USAGE_TOTAL_TOKENS, usage['total_tokens']) if usage['total_tokens']
|
||||
end
|
||||
|
||||
def set_error_attributes(span, result)
|
||||
return if result[:error].blank?
|
||||
|
||||
span.set_attribute(ATTR_GEN_AI_RESPONSE_ERROR, result[:error].to_json)
|
||||
span.set_attribute(ATTR_GEN_AI_RESPONSE_ERROR_CODE, result[:error_code]) if result[:error_code]
|
||||
span.status = OpenTelemetry::Trace::Status.error("API Error: #{result[:error_code]}")
|
||||
end
|
||||
end
|
||||
@@ -1,4 +1,6 @@
|
||||
class Integrations::OpenaiBaseService
|
||||
include Integrations::LlmInstrumentation
|
||||
|
||||
# gpt-4o-mini supports 128,000 tokens
|
||||
# 1 token is approx 4 characters
|
||||
# sticking with 120000 to be safe
|
||||
@@ -87,21 +89,57 @@ class Integrations::OpenaiBaseService
|
||||
end
|
||||
|
||||
def make_api_call(body)
|
||||
headers = {
|
||||
parsed_body = JSON.parse(body)
|
||||
instrumentation_params = build_instrumentation_params(parsed_body)
|
||||
|
||||
instrument_llm_call(instrumentation_params) do
|
||||
execute_api_request(body, parsed_body['messages'])
|
||||
end
|
||||
end
|
||||
|
||||
def build_instrumentation_params(parsed_body)
|
||||
{
|
||||
span_name: "llm.#{event_name}",
|
||||
account_id: hook.account_id,
|
||||
conversation_id: conversation&.display_id,
|
||||
feature_name: event_name,
|
||||
model: parsed_body['model'],
|
||||
messages: parsed_body['messages'],
|
||||
temperature: parsed_body['temperature']
|
||||
}
|
||||
end
|
||||
|
||||
def execute_api_request(body, messages)
|
||||
Rails.logger.info("OpenAI API request: #{body}")
|
||||
response = HTTParty.post(api_url, headers: api_headers, body: body)
|
||||
Rails.logger.info("OpenAI API response: #{response.body}")
|
||||
|
||||
parse_api_response(response, messages)
|
||||
end
|
||||
|
||||
def api_headers
|
||||
{
|
||||
'Content-Type' => 'application/json',
|
||||
'Authorization' => "Bearer #{hook.settings['api_key']}"
|
||||
}
|
||||
end
|
||||
|
||||
Rails.logger.info("OpenAI API request: #{body}")
|
||||
response = HTTParty.post(api_url, headers: headers, body: body)
|
||||
Rails.logger.info("OpenAI API response: #{response.body}")
|
||||
def parse_api_response(response, messages)
|
||||
return build_error_response(response, messages) unless response.success?
|
||||
|
||||
return { error: response.parsed_response, error_code: response.code } unless response.success?
|
||||
parsed_response = JSON.parse(response.body)
|
||||
build_success_response(parsed_response, messages)
|
||||
end
|
||||
|
||||
choices = JSON.parse(response.body)['choices']
|
||||
def build_error_response(response, messages)
|
||||
{ error: response.parsed_response, error_code: response.code, request_messages: messages }
|
||||
end
|
||||
|
||||
return { message: choices.first['message']['content'] } if choices.present?
|
||||
def build_success_response(parsed_response, messages)
|
||||
choices = parsed_response['choices']
|
||||
usage = parsed_response['usage']
|
||||
message_content = choices.present? ? choices.first['message']['content'] : nil
|
||||
|
||||
{ message: nil }
|
||||
{ message: message_content, usage: usage, request_messages: messages }
|
||||
end
|
||||
end
|
||||
|
||||
91
lib/opentelemetry_config.rb
Normal file
91
lib/opentelemetry_config.rb
Normal file
@@ -0,0 +1,91 @@
|
||||
require 'opentelemetry/sdk'
|
||||
require 'opentelemetry/exporter/otlp'
|
||||
require 'base64'
|
||||
|
||||
module OpentelemetryConfig
|
||||
class << self
|
||||
def tracer
|
||||
initialize! unless initialized?
|
||||
OpenTelemetry.tracer_provider.tracer('chatwoot')
|
||||
end
|
||||
|
||||
def initialized?
|
||||
@initialized ||= false
|
||||
end
|
||||
|
||||
def initialize!
|
||||
return if @initialized
|
||||
return mark_initialized unless langfuse_provider?
|
||||
return mark_initialized unless langfuse_credentials_present?
|
||||
|
||||
configure_opentelemetry
|
||||
mark_initialized
|
||||
rescue StandardError => e
|
||||
Rails.logger.error "Failed to configure OpenTelemetry: #{e.message}"
|
||||
mark_initialized
|
||||
end
|
||||
|
||||
def reset!
|
||||
@initialized = false
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def mark_initialized
|
||||
@initialized = true
|
||||
end
|
||||
|
||||
def langfuse_provider?
|
||||
otel_provider = InstallationConfig.find_by(name: 'OTEL_PROVIDER')&.value
|
||||
otel_provider == 'langfuse'
|
||||
end
|
||||
|
||||
def langfuse_credentials_present?
|
||||
endpoint = InstallationConfig.find_by(name: 'LANGFUSE_BASE_URL')&.value
|
||||
public_key = InstallationConfig.find_by(name: 'LANGFUSE_PUBLIC_KEY')&.value
|
||||
secret_key = InstallationConfig.find_by(name: 'LANGFUSE_SECRET_KEY')&.value
|
||||
|
||||
if endpoint.blank? || public_key.blank? || secret_key.blank?
|
||||
Rails.logger.error 'OpenTelemetry disabled (LANGFUSE_BASE_URL, LANGFUSE_PUBLIC_KEY or LANGFUSE_SECRET_KEY is missing)'
|
||||
return false
|
||||
end
|
||||
|
||||
true
|
||||
end
|
||||
|
||||
def langfuse_credentials
|
||||
{
|
||||
endpoint: InstallationConfig.find_by(name: 'LANGFUSE_BASE_URL')&.value,
|
||||
public_key: InstallationConfig.find_by(name: 'LANGFUSE_PUBLIC_KEY')&.value,
|
||||
secret_key: InstallationConfig.find_by(name: 'LANGFUSE_SECRET_KEY')&.value
|
||||
}
|
||||
end
|
||||
|
||||
def traces_endpoint
|
||||
credentials = langfuse_credentials
|
||||
"#{credentials[:endpoint]}/api/public/otel/v1/traces"
|
||||
end
|
||||
|
||||
def exporter_config
|
||||
credentials = langfuse_credentials
|
||||
auth_header = Base64.strict_encode64("#{credentials[:public_key]}:#{credentials[:secret_key]}")
|
||||
|
||||
config = {
|
||||
endpoint: traces_endpoint,
|
||||
headers: { 'Authorization' => "Basic #{auth_header}" }
|
||||
}
|
||||
|
||||
config[:ssl_verify_mode] = OpenSSL::SSL::VERIFY_NONE if Rails.env.development?
|
||||
config
|
||||
end
|
||||
|
||||
def configure_opentelemetry
|
||||
OpenTelemetry::SDK.configure do |c|
|
||||
c.service_name = 'chatwoot'
|
||||
exporter = OpenTelemetry::Exporter::OTLP::Exporter.new(**exporter_config)
|
||||
c.add_span_processor(OpenTelemetry::SDK::Trace::Export::BatchSpanProcessor.new(exporter))
|
||||
Rails.logger.info 'OpenTelemetry initialized and configured to export to Langfuse'
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
217
spec/lib/integrations/llm_instrumentation_spec.rb
Normal file
217
spec/lib/integrations/llm_instrumentation_spec.rb
Normal file
@@ -0,0 +1,217 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Integrations::LlmInstrumentation do
|
||||
let(:test_class) do
|
||||
Class.new do
|
||||
include Integrations::LlmInstrumentation
|
||||
end
|
||||
end
|
||||
|
||||
let(:instance) { test_class.new }
|
||||
let!(:otel_config) do
|
||||
InstallationConfig.find_or_create_by(name: 'OTEL_PROVIDER') do |config|
|
||||
config.value = 'langfuse'
|
||||
end
|
||||
end
|
||||
|
||||
let(:params) do
|
||||
{
|
||||
span_name: 'llm.test',
|
||||
account_id: 123,
|
||||
conversation_id: 456,
|
||||
feature_name: 'reply_suggestion',
|
||||
model: 'gpt-4o-mini',
|
||||
messages: [{ 'role' => 'user', 'content' => 'Hello' }],
|
||||
temperature: 0.7
|
||||
}
|
||||
end
|
||||
|
||||
before do
|
||||
InstallationConfig.find_or_create_by(name: 'LANGFUSE_SECRET_KEY') do |config|
|
||||
config.value = 'test-secret-key'
|
||||
end
|
||||
end
|
||||
|
||||
describe '#instrument_llm_call' do
|
||||
context 'when OTEL provider is not configured' do
|
||||
before { otel_config.update(value: '') }
|
||||
|
||||
it 'executes the block without tracing' do
|
||||
result = instance.instrument_llm_call(params) { 'my_result' }
|
||||
expect(result).to eq('my_result')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when OTEL provider is configured' do
|
||||
it 'executes the block and returns the result' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
result = instance.instrument_llm_call(params) { 'my_result' }
|
||||
|
||||
expect(result).to eq('my_result')
|
||||
end
|
||||
|
||||
it 'creates a tracing span with the provided span name' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
instance.instrument_llm_call(params) { 'result' }
|
||||
|
||||
expect(mock_tracer).to have_received(:in_span).with('llm.test')
|
||||
end
|
||||
|
||||
it 'returns the block result even if instrumentation has errors' do
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_raise(StandardError.new('Instrumentation failed'))
|
||||
|
||||
result = instance.instrument_llm_call(params) { 'my_result' }
|
||||
|
||||
expect(result).to eq('my_result')
|
||||
end
|
||||
|
||||
it 'handles errors gracefully and captures exceptions' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
allow(mock_span).to receive(:set_attribute).and_raise(StandardError.new('Span error'))
|
||||
allow(ChatwootExceptionTracker).to receive(:new).and_call_original
|
||||
|
||||
result = instance.instrument_llm_call(params) { 'my_result' }
|
||||
|
||||
expect(result).to eq('my_result')
|
||||
expect(ChatwootExceptionTracker).to have_received(:new)
|
||||
end
|
||||
|
||||
it 'sets correct request attributes on the span' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
instance.instrument_llm_call(params) { 'result' }
|
||||
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.provider.name', 'openai')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.request.model', 'gpt-4o-mini')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.request.temperature', 0.7)
|
||||
end
|
||||
|
||||
it 'sets correct prompt message attributes' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
custom_params = params.merge(
|
||||
messages: [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful assistant' },
|
||||
{ 'role' => 'user', 'content' => 'Hello' }
|
||||
]
|
||||
)
|
||||
|
||||
instance.instrument_llm_call(custom_params) { 'result' }
|
||||
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.prompt.0.role', 'system')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.prompt.0.content', 'You are a helpful assistant')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.prompt.1.role', 'user')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.prompt.1.content', 'Hello')
|
||||
end
|
||||
|
||||
it 'sets correct metadata attributes' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
instance.instrument_llm_call(params) { 'result' }
|
||||
|
||||
expect(mock_span).to have_received(:set_attribute).with('langfuse.user.id', '123')
|
||||
expect(mock_span).to have_received(:set_attribute).with('langfuse.session.id', '123_456')
|
||||
expect(mock_span).to have_received(:set_attribute).with('langfuse.trace.tags', '["reply_suggestion"]')
|
||||
end
|
||||
|
||||
it 'sets completion message attributes when result contains message' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
result = instance.instrument_llm_call(params) do
|
||||
{ message: 'AI response here' }
|
||||
end
|
||||
|
||||
expect(result).to eq({ message: 'AI response here' })
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.completion.0.role', 'assistant')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.completion.0.content', 'AI response here')
|
||||
end
|
||||
|
||||
it 'sets usage metrics when result contains usage data' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
result = instance.instrument_llm_call(params) do
|
||||
{
|
||||
usage: {
|
||||
'prompt_tokens' => 150,
|
||||
'completion_tokens' => 200,
|
||||
'total_tokens' => 350
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
expect(result[:usage]['prompt_tokens']).to eq(150)
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.usage.input_tokens', 150)
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.usage.output_tokens', 200)
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.usage.total_tokens', 350)
|
||||
end
|
||||
|
||||
it 'sets error attributes when result contains error' do
|
||||
mock_span = instance_double(OpenTelemetry::Trace::Span)
|
||||
mock_status = instance_double(OpenTelemetry::Trace::Status)
|
||||
allow(mock_span).to receive(:set_attribute)
|
||||
allow(mock_span).to receive(:status=)
|
||||
allow(OpenTelemetry::Trace::Status).to receive(:error).and_return(mock_status)
|
||||
mock_tracer = instance_double(OpenTelemetry::Trace::Tracer)
|
||||
allow(instance).to receive(:tracer).and_return(mock_tracer)
|
||||
allow(mock_tracer).to receive(:in_span).and_yield(mock_span)
|
||||
|
||||
result = instance.instrument_llm_call(params) do
|
||||
{
|
||||
error: { message: 'API rate limit exceeded' },
|
||||
error_code: 'rate_limit_exceeded'
|
||||
}
|
||||
end
|
||||
|
||||
expect(result[:error_code]).to eq('rate_limit_exceeded')
|
||||
expect(mock_span).to have_received(:set_attribute)
|
||||
.with('gen_ai.response.error', '{"message":"API rate limit exceeded"}')
|
||||
expect(mock_span).to have_received(:set_attribute).with('gen_ai.response.error_code', 'rate_limit_exceeded')
|
||||
expect(mock_span).to have_received(:status=).with(mock_status)
|
||||
expect(OpenTelemetry::Trace::Status).to have_received(:error).with('API Error: rate_limit_exceeded')
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -1,303 +1,230 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Integrations::Openai::ProcessorService do
|
||||
subject { described_class.new(hook: hook, event: event) }
|
||||
subject(:service) { described_class.new(hook: hook, event: event) }
|
||||
|
||||
let(:account) { create(:account) }
|
||||
let(:hook) { create(:integrations_hook, :openai, account: account) }
|
||||
let(:expected_headers) { { 'Authorization' => "Bearer #{hook.settings['api_key']}" } }
|
||||
let(:openai_response) do
|
||||
{
|
||||
'choices' => [
|
||||
{
|
||||
'message' => {
|
||||
'content' => 'This is a reply from openai.'
|
||||
}
|
||||
}
|
||||
]
|
||||
'choices' => [{ 'message' => { 'content' => 'This is a reply from openai.' } }]
|
||||
}.to_json
|
||||
end
|
||||
let!(:conversation) { create(:conversation, account: account) }
|
||||
let!(:customer_message) { create(:message, account: account, conversation: conversation, message_type: :incoming, content: 'hello agent') }
|
||||
let!(:agent_message) { create(:message, account: account, conversation: conversation, message_type: :outgoing, content: 'hello customer') }
|
||||
let!(:summary_prompt) do
|
||||
if ChatwootApp.enterprise?
|
||||
Rails.root.join('enterprise/lib/enterprise/integrations/openai_prompts/summary.txt').read
|
||||
else
|
||||
'Please summarize the key points from the following conversation between support agents and customer as bullet points for the next ' \
|
||||
"support agent looking into the conversation. Reply in the user's language."
|
||||
end
|
||||
let(:openai_response_with_usage) do
|
||||
{
|
||||
'choices' => [{ 'message' => { 'content' => 'This is a reply from openai.' } }],
|
||||
'usage' => {
|
||||
'prompt_tokens' => 50,
|
||||
'completion_tokens' => 20,
|
||||
'total_tokens' => 70
|
||||
}
|
||||
}.to_json
|
||||
end
|
||||
|
||||
describe '#perform' do
|
||||
context 'when event name is rephrase' do
|
||||
let(:event) { { 'name' => 'rephrase', 'data' => { 'tone' => 'friendly', 'content' => 'This is a test message' } } }
|
||||
|
||||
it 'returns the rephrased message using the tone in data' do
|
||||
request_body = {
|
||||
shared_examples 'text transformation operation' do |event_name, system_prompt|
|
||||
let(:event) { { 'name' => event_name, 'data' => { 'content' => 'This is a test' } } }
|
||||
let(:expected_request_body) do
|
||||
{
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{
|
||||
'role' => 'system',
|
||||
'content' => 'You are a helpful support agent. ' \
|
||||
'Please rephrase the following response. ' \
|
||||
'Ensure that the reply should be in user language.'
|
||||
},
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
{ 'role' => 'system', 'content' => system_prompt },
|
||||
{ 'role' => 'user', 'content' => 'This is a test' }
|
||||
]
|
||||
}.to_json
|
||||
end
|
||||
|
||||
it "returns the #{event_name.tr('_', ' ')} text" do
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
.with(body: expected_request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response)
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
result = service.perform
|
||||
expect(result[:message]).to eq('This is a reply from openai.')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is reply_suggestion' do
|
||||
shared_examples 'successful openai response' do
|
||||
it 'returns the expected message' do
|
||||
result = service.perform
|
||||
expect(result[:message]).to eq('This is a reply from openai.')
|
||||
end
|
||||
end
|
||||
|
||||
describe 'text transformation operations' do
|
||||
base_prompt = 'You are a helpful support agent. '
|
||||
language_suffix = 'Ensure that the reply should be in user language.'
|
||||
|
||||
it_behaves_like 'text transformation operation', 'rephrase',
|
||||
"#{base_prompt}Please rephrase the following response. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'fix_spelling_grammar',
|
||||
"#{base_prompt}Please fix the spelling and grammar of the following response. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'shorten',
|
||||
"#{base_prompt}Please shorten the following response. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'expand',
|
||||
"#{base_prompt}Please expand the following response. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'make_friendly',
|
||||
"#{base_prompt}Please make the following response more friendly. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'make_formal',
|
||||
"#{base_prompt}Please make the following response more formal. #{language_suffix}"
|
||||
it_behaves_like 'text transformation operation', 'simplify',
|
||||
"#{base_prompt}Please simplify the following response. #{language_suffix}"
|
||||
end
|
||||
|
||||
describe 'conversation-based operations' do
|
||||
let!(:conversation) { create(:conversation, account: account) }
|
||||
let!(:customer_message) do
|
||||
create(:message, account: account, conversation: conversation, message_type: :incoming, content: 'hello agent')
|
||||
end
|
||||
let!(:agent_message) do
|
||||
create(:message, account: account, conversation: conversation, message_type: :outgoing, content: 'hello customer')
|
||||
end
|
||||
|
||||
context 'with reply_suggestion event' do
|
||||
let(:event) { { 'name' => 'reply_suggestion', 'data' => { 'conversation_display_id' => conversation.display_id } } }
|
||||
|
||||
it 'returns the suggested reply' do
|
||||
request_body = {
|
||||
let(:expected_request_body) do
|
||||
{
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ role: 'system',
|
||||
content: Rails.root.join('lib/integrations/openai/openai_prompts/reply.txt').read },
|
||||
{ role: 'system', content: Rails.root.join('lib/integrations/openai/openai_prompts/reply.txt').read },
|
||||
{ role: 'user', content: customer_message.content },
|
||||
{ role: 'assistant', content: agent_message.content }
|
||||
]
|
||||
}.to_json
|
||||
end
|
||||
|
||||
# Update the stub_request with the correct messages order
|
||||
before do
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
.with(body: expected_request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response)
|
||||
end
|
||||
|
||||
context 'when event name is summarize' do
|
||||
it_behaves_like 'successful openai response'
|
||||
end
|
||||
|
||||
context 'with summarize event' do
|
||||
let(:event) { { 'name' => 'summarize', 'data' => { 'conversation_display_id' => conversation.display_id } } }
|
||||
let(:conversation_messages) do
|
||||
"Customer #{customer_message.sender.name} : #{customer_message.content}\nAgent #{agent_message.sender.name} : #{agent_message.content}\n"
|
||||
"Customer #{customer_message.sender.name} : #{customer_message.content}\n" \
|
||||
"Agent #{agent_message.sender.name} : #{agent_message.content}\n"
|
||||
end
|
||||
|
||||
it 'returns the summarized message' do
|
||||
request_body = {
|
||||
let(:summary_prompt) do
|
||||
if ChatwootApp.enterprise?
|
||||
Rails.root.join('enterprise/lib/enterprise/integrations/openai_prompts/summary.txt').read
|
||||
else
|
||||
'Please summarize the key points from the following conversation between support agents and customer as bullet points ' \
|
||||
"for the next support agent looking into the conversation. Reply in the user's language."
|
||||
end
|
||||
end
|
||||
let(:expected_request_body) do
|
||||
{
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system',
|
||||
'content' => summary_prompt },
|
||||
{ 'role' => 'system', 'content' => summary_prompt },
|
||||
{ 'role' => 'user', 'content' => conversation_messages }
|
||||
]
|
||||
}.to_json
|
||||
end
|
||||
|
||||
before do
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
.with(body: expected_request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response)
|
||||
end
|
||||
|
||||
context 'when event name is label_suggestion with no labels' do
|
||||
it_behaves_like 'successful openai response'
|
||||
end
|
||||
|
||||
context 'with label_suggestion event and no labels' do
|
||||
let(:event) { { 'name' => 'label_suggestion', 'data' => { 'conversation_display_id' => conversation.display_id } } }
|
||||
|
||||
it 'returns nil' do
|
||||
result = subject.perform
|
||||
expect(result).to be_nil
|
||||
expect(service.perform).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is not one that can be processed' do
|
||||
describe 'edge cases' do
|
||||
context 'with unknown event name' do
|
||||
let(:event) { { 'name' => 'unknown', 'data' => {} } }
|
||||
|
||||
it 'returns nil' do
|
||||
expect(subject.perform).to be_nil
|
||||
expect(service.perform).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is fix_spelling_grammar' do
|
||||
let(:event) { { 'name' => 'fix_spelling_grammar', 'data' => { 'content' => 'This is a test' } } }
|
||||
|
||||
it 'returns the corrected text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please fix the spelling and grammar of the following response. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is shorten' do
|
||||
let(:event) { { 'name' => 'shorten', 'data' => { 'content' => 'This is a test' } } }
|
||||
|
||||
it 'returns the shortened text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please shorten the following response. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is expand' do
|
||||
let(:event) { { 'name' => 'expand', 'data' => { 'content' => 'help you' } } }
|
||||
|
||||
it 'returns the expanded text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please expand the following response. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is make_friendly' do
|
||||
let(:event) { { 'name' => 'make_friendly', 'data' => { 'content' => 'This is a test' } } }
|
||||
|
||||
it 'returns the friendly text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please make the following response more friendly. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is make_formal' do
|
||||
let(:event) { { 'name' => 'make_formal', 'data' => { 'content' => 'This is a test' } } }
|
||||
|
||||
it 'returns the formal text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please make the following response more formal. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when event name is simplify' do
|
||||
let(:event) { { 'name' => 'simplify', 'data' => { 'content' => 'This is a test' } } }
|
||||
|
||||
it 'returns the simplified text' do
|
||||
request_body = {
|
||||
'model' => 'gpt-4o-mini',
|
||||
'messages' => [
|
||||
{ 'role' => 'system', 'content' => 'You are a helpful support agent. Please simplify the following response. ' \
|
||||
'Ensure that the reply should be in user language.' },
|
||||
{ 'role' => 'user', 'content' => event['data']['content'] }
|
||||
]
|
||||
}.to_json
|
||||
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: request_body, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when testing endpoint configuration' do
|
||||
describe 'response structure' do
|
||||
let(:event) { { 'name' => 'rephrase', 'data' => { 'content' => 'test message' } } }
|
||||
|
||||
context 'when CAPTAIN_OPEN_AI_ENDPOINT is not configured' do
|
||||
it 'uses default OpenAI endpoint' do
|
||||
InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.destroy
|
||||
|
||||
context 'when response includes usage data' do
|
||||
before do
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: anything, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
.to_return(status: 200, body: openai_response_with_usage)
|
||||
end
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
it 'returns message, usage, and request_messages' do
|
||||
result = service.perform
|
||||
|
||||
expect(result[:message]).to eq('This is a reply from openai.')
|
||||
expect(result[:usage]).to eq({
|
||||
'prompt_tokens' => 50,
|
||||
'completion_tokens' => 20,
|
||||
'total_tokens' => 70
|
||||
})
|
||||
expect(result[:request_messages]).to be_an(Array)
|
||||
expect(result[:request_messages].length).to eq(2)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when CAPTAIN_OPEN_AI_ENDPOINT is configured' do
|
||||
context 'when response does not include usage data' do
|
||||
before do
|
||||
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
|
||||
.with(body: anything, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response)
|
||||
end
|
||||
|
||||
it 'returns message and request_messages with nil usage' do
|
||||
result = service.perform
|
||||
|
||||
expect(result[:message]).to eq('This is a reply from openai.')
|
||||
expect(result[:usage]).to be_nil
|
||||
expect(result[:request_messages]).to be_an(Array)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe 'endpoint configuration' do
|
||||
let(:event) { { 'name' => 'rephrase', 'data' => { 'content' => 'test message' } } }
|
||||
|
||||
shared_examples 'endpoint request' do |endpoint_url|
|
||||
it "makes request to #{endpoint_url}" do
|
||||
stub_request(:post, "#{endpoint_url}/v1/chat/completions")
|
||||
.with(body: anything, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response)
|
||||
|
||||
result = service.perform
|
||||
expect(result[:message]).to eq('This is a reply from openai.')
|
||||
expect(result[:request_messages]).to be_an(Array)
|
||||
expect(result[:usage]).to be_nil
|
||||
end
|
||||
end
|
||||
|
||||
context 'without CAPTAIN_OPEN_AI_ENDPOINT configured' do
|
||||
before { InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.destroy }
|
||||
|
||||
it_behaves_like 'endpoint request', 'https://api.openai.com'
|
||||
end
|
||||
|
||||
context 'with CAPTAIN_OPEN_AI_ENDPOINT configured' do
|
||||
before do
|
||||
InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.destroy
|
||||
create(:installation_config, name: 'CAPTAIN_OPEN_AI_ENDPOINT', value: 'https://custom.azure.com/')
|
||||
end
|
||||
|
||||
it 'uses custom endpoint' do
|
||||
stub_request(:post, 'https://custom.azure.com/v1/chat/completions')
|
||||
.with(body: anything, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
end
|
||||
|
||||
context 'when CAPTAIN_OPEN_AI_ENDPOINT has trailing slash' do
|
||||
before do
|
||||
create(:installation_config, name: 'CAPTAIN_OPEN_AI_ENDPOINT', value: 'https://custom.azure.com/')
|
||||
end
|
||||
|
||||
it 'properly handles trailing slash' do
|
||||
stub_request(:post, 'https://custom.azure.com/v1/chat/completions')
|
||||
.with(body: anything, headers: expected_headers)
|
||||
.to_return(status: 200, body: openai_response, headers: {})
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq({ :message => 'This is a reply from openai.' })
|
||||
end
|
||||
it_behaves_like 'endpoint request', 'https://custom.azure.com'
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
Reference in New Issue
Block a user