diff --git a/enterprise/lib/enterprise/integrations/openai_processor_service.rb b/enterprise/lib/enterprise/integrations/openai_processor_service.rb index 55b7d33cd..69b2e3dfd 100644 --- a/enterprise/lib/enterprise/integrations/openai_processor_service.rb +++ b/enterprise/lib/enterprise/integrations/openai_processor_service.rb @@ -27,11 +27,13 @@ module Enterprise::Integrations::OpenaiProcessorService response = make_api_call(label_suggestion_body) + return response if response[:error].present? + # LLMs are not deterministic, so this is bandaid solution # To what you ask? Sometimes, the response includes # "Labels:" in it's response in some format. This is a hacky way to remove it # TODO: Fix with with a better prompt - response[:message] ? response[:message].gsub(/^(label|labels):/i, '') : '' + { message: response[:message] ? response[:message].gsub(/^(label|labels):/i, '') : '' } end private diff --git a/lib/redis/redis_keys.rb b/lib/redis/redis_keys.rb index 3e49d87a7..396d7e50b 100644 --- a/lib/redis/redis_keys.rb +++ b/lib/redis/redis_keys.rb @@ -33,7 +33,7 @@ module Redis::RedisKeys LATEST_CHATWOOT_VERSION = 'LATEST_CHATWOOT_VERSION'.freeze # Check if a message create with same source-id is in progress? MESSAGE_SOURCE_KEY = 'MESSAGE_SOURCE_KEY::%s'.freeze - OPENAI_CONVERSATION_KEY = 'OPEN_AI_CONVERSATION_KEY::%s::%d::%d'.freeze + OPENAI_CONVERSATION_KEY = 'OPEN_AI_CONVERSATION_KEY::v1::%s::%d::%d'.freeze ## Sempahores / Locks # We don't want to process messages from the same sender concurrently to prevent creating double conversations diff --git a/spec/enterprise/lib/integrations/openai/processor_service_spec.rb b/spec/enterprise/lib/integrations/openai/processor_service_spec.rb index 2a98514cf..4707aff87 100644 --- a/spec/enterprise/lib/integrations/openai/processor_service_spec.rb +++ b/spec/enterprise/lib/integrations/openai/processor_service_spec.rb @@ -54,7 +54,7 @@ RSpec.describe Integrations::Openai::ProcessorService do .to_return(status: 200, body: openai_response, headers: {}) result = subject.perform - expect(result).to eq('This is a reply from openai.') + expect(result).to eq({ :message => 'This is a reply from openai.' }) end it 'returns empty string if openai response is blank' do @@ -63,7 +63,7 @@ RSpec.describe Integrations::Openai::ProcessorService do .to_return(status: 200, body: '{}', headers: {}) result = subject.perform - expect(result).to eq('') + expect(result).to eq({ :message => '' }) end end