Skip to content

Commit 4290b8c

Browse files
committed
refactor(logging): Replace debug prints with logger calls in Mistral chat provider
- Updated debug output from `puts` to `RubyLLM.logger.debug` for better logging management. - Commented out skip conditions in tests for Mistral provider to avoid flaky behavior during testing.
1 parent d2b3aa7 commit 4290b8c

File tree

4 files changed

+13
-13
lines changed

4 files changed

+13
-13
lines changed

lib/ruby_llm/providers/mistral/chat.rb

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ def render_payload(messages, tools:, temperature:, model:, stream: nil,
2525
Array(tools)
2626
end
2727

28-
puts "\n[DEBUG] Available tools: #{tools_array&.map { |t| t.name.to_s }}" if ENV["DEBUG"]
28+
RubyLLM.logger.debug "[DEBUG] Available tools: #{tools_array&.map { |t| t.name.to_s }}"
2929

3030
# Use "any" for tool_choice when tools are available
3131
effective_tool_choice = if tool_choice
@@ -36,7 +36,7 @@ def render_payload(messages, tools:, temperature:, model:, stream: nil,
3636
"none"
3737
end
3838

39-
puts "[DEBUG] Tool choice: #{effective_tool_choice.inspect}" if ENV["DEBUG"]
39+
RubyLLM.logger.debug "[DEBUG] Tool choice: #{effective_tool_choice.inspect}"
4040

4141
payload = {
4242
model: model,
@@ -55,7 +55,7 @@ def render_payload(messages, tools:, temperature:, model:, stream: nil,
5555
frequency_penalty: frequency_penalty,
5656
}.compact
5757

58-
puts "[DEBUG] Full payload: #{payload.inspect}" if ENV["DEBUG"]
58+
RubyLLM.logger.debug "[DEBUG] Full payload: #{payload.inspect}"
5959

6060
payload
6161
end
@@ -110,7 +110,7 @@ def render_tool_call(tool_call)
110110
arguments: tool_call.arguments,
111111
},
112112
}
113-
puts "[DEBUG] Rendered tool call: #{tool_call_spec.inspect}" if ENV["DEBUG"]
113+
RubyLLM.logger.debug "[DEBUG] Rendered tool call: #{tool_call_spec.inspect}"
114114
tool_call_spec
115115
end
116116

@@ -132,7 +132,7 @@ def render_tool(tool)
132132
}
133133
}
134134
}
135-
puts "[DEBUG] Rendered tool spec: #{tool_spec.inspect}" if ENV["DEBUG"]
135+
RubyLLM.logger.debug "[DEBUG] Rendered tool spec: #{tool_spec.inspect}"
136136
tool_spec
137137
end
138138

@@ -144,7 +144,7 @@ def param_schema(param)
144144
end
145145

146146
def parse_completion_response(response)
147-
puts "\n[DEBUG] Raw response: #{response.body.inspect}" if ENV["DEBUG"]
147+
RubyLLM.logger.debug "\n[DEBUG] Raw response: #{response.body.inspect}"
148148

149149
if response.body["error"]
150150
error_message = response.body.dig("error", "message")
@@ -166,10 +166,10 @@ def parse_completion_response(response)
166166
message = choice.dig("message")
167167
return unless message
168168

169-
puts "[DEBUG] Message from model: #{message.inspect}" if ENV["DEBUG"]
169+
RubyLLM.logger.debug "[DEBUG] Message from model: #{message.inspect}"
170170

171171
tool_calls = Mistral::Tools.parse_tool_calls(message["tool_calls"])
172-
puts "[DEBUG] Parsed tool calls: #{tool_calls.inspect}" if ENV["DEBUG"]
172+
RubyLLM.logger.debug "[DEBUG] Parsed tool calls: #{tool_calls.inspect}"
173173

174174
content = message["content"]
175175

spec/ruby_llm/chat_spec.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131

3232
it "#{provider}/#{model} successfully uses the system prompt" do
3333
skip 'System prompt can be flaky for Ollama models' if provider == :ollama
34-
skip 'Mistral API does not allow system messages after assistant messages' if provider == :mistral
34+
#skip 'Mistral API does not allow system messages after assistant messages' if provider == :mistral
3535
chat = RubyLLM.chat(model: model, provider: provider).with_temperature(0.0)
3636

3737
# Use a distinctive and unusual instruction that wouldn't happen naturally

spec/ruby_llm/chat_tools_spec.rb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def execute
3636
model = model_info[:model]
3737
provider = model_info[:provider]
3838
it "#{provider}/#{model} can use tools" do # rubocop:disable RSpec/MultipleExpectations
39-
skip 'Mistral does not reliably support tool usage' if provider == :mistral
39+
#skip 'Mistral does not reliably support tool usage' if provider == :mistral
4040
chat = RubyLLM.chat(model: model, provider: provider)
4141
.with_tool(Weather)
4242

@@ -69,7 +69,7 @@ def execute
6969
provider = model_info[:provider]
7070
it "#{provider}/#{model} can use tools without parameters" do
7171
skip 'Ollama models do not reliably use tools without parameters' if provider == :ollama
72-
skip 'Mistral does not reliably support tool usage' if provider == :mistral
72+
#skip 'Mistral does not reliably support tool usage' if provider == :mistral
7373
chat = RubyLLM.chat(model: model, provider: provider)
7474
.with_tool(BestLanguageToLearn)
7575
response = chat.ask("What's the best language to learn?")

spec/ruby_llm/embeddings_spec.rb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
end
2828

2929
it "#{provider}/#{model} can handle a single text with custom dimensions" do # rubocop:disable RSpec/MultipleExpectations
30-
skip "Mistral embed does not support custom dimensions" if model == "mistral-embed"
30+
skip "Mistral embed does not support custom dimensions" if provider == :mistral
3131
embedding = RubyLLM.embed(test_text, model: model, dimensions: test_dimensions)
3232
expect(embedding.vectors).to be_an(Array)
3333
expect(embedding.vectors.length).to eq(test_dimensions)
@@ -43,7 +43,7 @@
4343
end
4444

4545
it "#{provider}/#{model} can handle multiple texts with custom dimensions" do # rubocop:disable RSpec/MultipleExpectations
46-
skip "Mistral embed does not support custom dimensions" if model == "mistral-embed"
46+
skip "Mistral embed does not support custom dimensions" if provider == :mistral
4747
embeddings = RubyLLM.embed(test_texts, model: model, dimensions: test_dimensions)
4848
expect(embeddings.vectors).to be_an(Array)
4949
embeddings.vectors.each do |vector|

0 commit comments

Comments
 (0)