mirror of
https://github.com/alibaba/higress.git
synced 2026-05-10 22:07:48 +08:00
fix(ai-proxy): preserve empty Claude tool inputs (#3799)
Signed-off-by: Betula-L <6059935+Betula-L@users.noreply.github.com> Co-authored-by: Betula-L <6059935+Betula-L@users.noreply.github.com>
This commit is contained in:
@@ -1506,7 +1506,7 @@ func claudeContentBlocksToBedrockContents(blocks []claudeChatMessageContent) []b
|
||||
}
|
||||
case "tool_use":
|
||||
result = append(result, bedrockMessageContent{ToolUse: &toolUseBlock{
|
||||
Input: block.Input,
|
||||
Input: claudeToolUseInput(block.Input),
|
||||
Name: block.Name,
|
||||
ToolUseId: block.Id,
|
||||
}})
|
||||
@@ -1525,6 +1525,13 @@ func claudeContentBlocksToBedrockContents(blocks []claudeChatMessageContent) []b
|
||||
return result
|
||||
}
|
||||
|
||||
func claudeToolUseInput(input *map[string]interface{}) map[string]interface{} {
|
||||
if input == nil {
|
||||
return map[string]interface{}{}
|
||||
}
|
||||
return *input
|
||||
}
|
||||
|
||||
func bedrockThinkingFromClaudeConfig(thinking *claudeThinkingConfig) map[string]interface{} {
|
||||
if thinking == nil || thinking.Type == "" || thinking.Type == "disabled" {
|
||||
return nil
|
||||
|
||||
@@ -251,6 +251,35 @@ func TestBedrockRequestPreservesClaudeNativeThinkingAndToolResult(t *testing.T)
|
||||
assert.Equal(t, "failed", *request.Messages[1].Content[0].ToolResult.Content[0].Text)
|
||||
}
|
||||
|
||||
func TestBedrockRequestPreservesClaudeNoArgToolUseInput(t *testing.T) {
|
||||
provider := &bedrockProvider{}
|
||||
openaiBody, err := (&ClaudeToOpenAIConverter{}).ConvertClaudeRequestToOpenAI([]byte(`{
|
||||
"model":"claude",
|
||||
"messages":[{
|
||||
"role":"assistant",
|
||||
"content":[
|
||||
{"type":"thinking","thinking":"reasoning","signature":"sig"},
|
||||
{"type":"tool_use","id":"toolu_1","name":"list_items","input":{}}
|
||||
]
|
||||
}]
|
||||
}`))
|
||||
require.NoError(t, err)
|
||||
|
||||
var openaiRequest chatCompletionRequest
|
||||
require.NoError(t, json.Unmarshal(openaiBody, &openaiRequest))
|
||||
|
||||
body, err := provider.buildBedrockTextGenerationRequest(&openaiRequest, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Contains(t, string(body), `"input":{}`)
|
||||
var request bedrockTextGenRequest
|
||||
require.NoError(t, json.Unmarshal(body, &request))
|
||||
require.Len(t, request.Messages, 1)
|
||||
require.Len(t, request.Messages[0].Content, 2)
|
||||
require.NotNil(t, request.Messages[0].Content[1].ToolUse)
|
||||
assert.Empty(t, request.Messages[0].Content[1].ToolUse.Input)
|
||||
}
|
||||
|
||||
func TestBedrockRequestToolResultWithTrailingTextDoesNotDuplicateToolResult(t *testing.T) {
|
||||
provider := &bedrockProvider{}
|
||||
openaiBody, err := (&ClaudeToOpenAIConverter{}).ConvertClaudeRequestToOpenAI([]byte(`{
|
||||
|
||||
@@ -70,9 +70,9 @@ type claudeChatMessageContent struct {
|
||||
Source *claudeChatMessageContentSource `json:"source,omitempty"`
|
||||
CacheControl map[string]interface{} `json:"cache_control,omitempty"`
|
||||
// Tool use fields
|
||||
Id string `json:"id,omitempty"` // For tool_use
|
||||
Name string `json:"name,omitempty"` // For tool_use
|
||||
Input map[string]interface{} `json:"input,omitempty"` // For tool_use
|
||||
Id string `json:"id,omitempty"` // For tool_use
|
||||
Name string `json:"name,omitempty"` // For tool_use
|
||||
Input *map[string]interface{} `json:"input,omitempty"` // For tool_use
|
||||
// Tool result fields
|
||||
ToolUseId string `json:"tool_use_id,omitempty"` // For tool_result
|
||||
Content *claudeChatMessageContentWr `json:"content,omitempty"` // For tool_result - can be string or array
|
||||
@@ -622,7 +622,7 @@ func (c *claudeProvider) buildClaudeTextGenRequest(origRequest *chatCompletionRe
|
||||
Type: "tool_use",
|
||||
Id: tc.Id,
|
||||
Name: tc.Function.Name,
|
||||
Input: inputMap,
|
||||
Input: &inputMap,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -383,6 +383,42 @@ func TestClaudeToOpenAIConverter_ConvertClaudeRequestToOpenAI(t *testing.T) {
|
||||
assert.Equal(t, float64(5), args["max_results"])
|
||||
})
|
||||
|
||||
t.Run("preserve_empty_tool_use_input", func(t *testing.T) {
|
||||
claudeRequest := `{
|
||||
"model": "anthropic/claude-sonnet-4",
|
||||
"messages": [{
|
||||
"role": "assistant",
|
||||
"content": [{
|
||||
"type": "thinking",
|
||||
"thinking": "Need to list items.",
|
||||
"signature": "sig"
|
||||
}, {
|
||||
"type": "tool_use",
|
||||
"id": "toolu_empty",
|
||||
"name": "list_items",
|
||||
"input": {}
|
||||
}]
|
||||
}],
|
||||
"max_tokens": 1000
|
||||
}`
|
||||
|
||||
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, string(result), `"input":{}`)
|
||||
|
||||
var openaiRequest chatCompletionRequest
|
||||
err = json.Unmarshal(result, &openaiRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, openaiRequest.Messages, 1)
|
||||
assistantMsg := openaiRequest.Messages[0]
|
||||
require.Len(t, assistantMsg.ToolCalls, 1)
|
||||
assert.Equal(t, "{}", assistantMsg.ToolCalls[0].Function.Arguments)
|
||||
require.Len(t, assistantMsg.ClaudeContentBlocks, 2)
|
||||
require.NotNil(t, assistantMsg.ClaudeContentBlocks[1].Input)
|
||||
assert.Empty(t, *assistantMsg.ClaudeContentBlocks[1].Input)
|
||||
})
|
||||
|
||||
t.Run("convert_tool_result_to_tool_message", func(t *testing.T) {
|
||||
// Test Claude tool_result conversion to OpenAI tool message format
|
||||
claudeRequest := `{
|
||||
|
||||
Reference in New Issue
Block a user