diff --git a/enterprise/lib/enterprise/integrations/openai_processor_service.rb b/enterprise/lib/enterprise/integrations/openai_processor_service.rb index 5a98ad4c4..594c5d806 100644 --- a/enterprise/lib/enterprise/integrations/openai_processor_service.rb +++ b/enterprise/lib/enterprise/integrations/openai_processor_service.rb @@ -65,7 +65,7 @@ module Enterprise::Integrations::OpenaiProcessorService return value_from_cache if content.blank? { - model: self.class::GPT_MODEL, + model: self.class::LABEL_SUGGESTION_MODEL, messages: [ { role: 'system', diff --git a/lib/integrations/openai_base_service.rb b/lib/integrations/openai_base_service.rb index c19f605c1..116352a8f 100644 --- a/lib/integrations/openai_base_service.rb +++ b/lib/integrations/openai_base_service.rb @@ -7,7 +7,7 @@ class Integrations::OpenaiBaseService # 120000 * 4 = 480,000 characters (rounding off downwards to 400,000 to be safe) TOKEN_LIMIT = 400_000 GPT_MODEL = ENV.fetch('OPENAI_GPT_MODEL', 'gpt-4o-mini').freeze - + LABEL_SUGGESTION_MODEL = 'gpt-5-nano'.freeze ALLOWED_EVENT_NAMES = %w[rephrase summarize reply_suggestion fix_spelling_grammar shorten expand make_friendly make_formal simplify].freeze CACHEABLE_EVENTS = %w[].freeze