diff --git a/enterprise/lib/chat_gpt.rb b/enterprise/lib/chat_gpt.rb index a8dbf9537..d13ec118f 100644 --- a/enterprise/lib/chat_gpt.rb +++ b/enterprise/lib/chat_gpt.rb @@ -8,9 +8,9 @@ class ChatGpt @messages = [system_message(context_sections)] end - def generate_response(input, previous_messages = []) + def generate_response(input, previous_messages = [], role = 'user') @messages += previous_messages - @messages << { 'role': 'user', 'content': input } if input.present? + @messages << { 'role': role, 'content': input } if input.present? response = request_gpt JSON.parse(response['choices'][0]['message']['content'].strip) diff --git a/enterprise/lib/enterprise/integrations/openai_processor_service.rb b/enterprise/lib/enterprise/integrations/openai_processor_service.rb index bf2f4ccf3..b3cf9aeee 100644 --- a/enterprise/lib/enterprise/integrations/openai_processor_service.rb +++ b/enterprise/lib/enterprise/integrations/openai_processor_service.rb @@ -3,6 +3,24 @@ module Enterprise::Integrations::OpenaiProcessorService make_friendly make_formal simplify].freeze CACHEABLE_EVENTS = %w[label_suggestion].freeze + def reply_suggestion_message + return super unless conversation.inbox.response_bot_enabled? + + messages = conversation_messages(in_array_format: true) + last_message = messages.pop + + robin_response = ChatGpt.new( + Enterprise::MessageTemplates::ResponseBotService.response_sections(last_message[:content], conversation.inbox) + ).generate_response( + last_message[:content], messages, last_message[:role] + ) + message_content = robin_response['response'] + if robin_response['context_ids'].present? + message_content += Enterprise::MessageTemplates::ResponseBotService.generate_sources_section(robin_response['context_ids']) + end + message_content + end + def label_suggestion_message payload = label_suggestion_body return nil if payload.blank? @@ -19,8 +37,6 @@ module Enterprise::Integrations::OpenaiProcessorService private def labels_with_messages - conversation = find_conversation - return nil unless valid_conversation?(conversation) labels = hook.account.labels.pluck(:title).join(', ') diff --git a/lib/integrations/openai/processor_service.rb b/lib/integrations/openai/processor_service.rb index f71d2a7ed..1359c6aff 100644 --- a/lib/integrations/openai/processor_service.rb +++ b/lib/integrations/openai/processor_service.rb @@ -62,7 +62,6 @@ class Integrations::Openai::ProcessorService < Integrations::OpenaiBaseService end def conversation_messages(in_array_format: false) - conversation = find_conversation messages = init_messages_body(in_array_format) add_messages_until_token_limit(conversation, messages, in_array_format) @@ -70,7 +69,7 @@ class Integrations::Openai::ProcessorService < Integrations::OpenaiBaseService def add_messages_until_token_limit(conversation, messages, in_array_format, start_from = 0) character_count = start_from - conversation.messages.chat.reorder('id desc').each do |message| + conversation.messages.where(message_type: [:incoming, :outgoing]).where(private: false).reorder('id desc').each do |message| character_count, message_added = add_message_if_within_limit(character_count, message, messages, in_array_format) break unless message_added end diff --git a/lib/integrations/openai_base_service.rb b/lib/integrations/openai_base_service.rb index 9cfdc71c3..da2878aab 100644 --- a/lib/integrations/openai_base_service.rb +++ b/lib/integrations/openai_base_service.rb @@ -1,8 +1,8 @@ class Integrations::OpenaiBaseService - # 3.5 support 4,096 tokens + # 3.5 support 16,385 tokens # 1 token is approx 4 characters - # 4,096 * 4 = 16,384 characters, sticking to 15,000 to be safe - TOKEN_LIMIT = 15_000 + # 16385 * 4 = 65540 characters, sticking to 50,000 to be safe + TOKEN_LIMIT = 50_000 API_URL = 'https://api.openai.com/v1/chat/completions'.freeze GPT_MODEL = 'gpt-3.5-turbo'.freeze @@ -31,7 +31,6 @@ class Integrations::OpenaiBaseService def cache_key return nil unless event_is_cacheable? - conversation = find_conversation return nil unless conversation # since the value from cache depends on the conversation last_activity_at, it will always be fresh @@ -52,8 +51,8 @@ class Integrations::OpenaiBaseService Redis::Alfred.setex(cache_key, response) end - def find_conversation - hook.account.conversations.find_by(display_id: event['data']['conversation_display_id']) + def conversation + @conversation ||= hook.account.conversations.find_by(display_id: event['data']['conversation_display_id']) end def valid_event_name?