RubyLLM bundles a static models.json that doesn't know about models released after the gem was published. Self-hosted users configuring newer models hit ModelNotFoundError. Added a rake task that refreshes the registry from models.dev and saves to disk. ~~Called during Docker image build so every deploy gets fresh model data. Falls back silently to the bundled registry if models.dev is unreachable.~~ Commit the models.json file to code so it is available across deployments. --------- Co-authored-by: Sojan Jose <sojan@pepalo.com>
52 lines
1.1 KiB
Ruby
52 lines
1.1 KiB
Ruby
require 'ruby_llm'
|
|
|
|
module Llm::Config
|
|
DEFAULT_MODEL = 'gpt-4.1-mini'.freeze
|
|
|
|
class << self
|
|
def initialized?
|
|
@initialized ||= false
|
|
end
|
|
|
|
def initialize!
|
|
return if @initialized
|
|
|
|
configure_ruby_llm
|
|
@initialized = true
|
|
end
|
|
|
|
def reset!
|
|
@initialized = false
|
|
end
|
|
|
|
def with_api_key(api_key, api_base: nil)
|
|
initialize!
|
|
context = RubyLLM.context do |config|
|
|
config.openai_api_key = api_key
|
|
config.openai_api_base = api_base
|
|
end
|
|
|
|
yield context
|
|
end
|
|
|
|
private
|
|
|
|
def configure_ruby_llm
|
|
RubyLLM.configure do |config|
|
|
config.openai_api_key = system_api_key if system_api_key.present?
|
|
config.openai_api_base = openai_endpoint.chomp('/') if openai_endpoint.present?
|
|
config.model_registry_file = Rails.root.join('config/llm_models.json').to_s
|
|
config.logger = Rails.logger
|
|
end
|
|
end
|
|
|
|
def system_api_key
|
|
InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_API_KEY')&.value
|
|
end
|
|
|
|
def openai_endpoint
|
|
InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.value
|
|
end
|
|
end
|
|
end
|