feat(ai-proxy): add OpenRouter provider support (#2823)

This commit is contained in:
澄潭
2025-08-28 19:26:21 +08:00
committed by GitHub
parent b2ffeff7b8
commit 44c33617fa
11 changed files with 5684 additions and 184 deletions

View File

@@ -173,6 +173,10 @@ Groq 所对应的 `type` 为 `groq`。它并无特有的配置字段。
Grok 所对应的 `type``grok`。它并无特有的配置字段。
#### OpenRouter
OpenRouter 所对应的 `type``openrouter`。它并无特有的配置字段。
#### 文心一言Baidu
文心一言所对应的 `type``baidu`。它并无特有的配置字段。
@@ -948,6 +952,63 @@ provider:
}
```
### 使用 OpenAI 协议代理 OpenRouter 服务
**配置信息**
```yaml
provider:
type: openrouter
apiTokens:
- 'YOUR_OPENROUTER_API_TOKEN'
modelMapping:
'gpt-4': 'openai/gpt-4-turbo-preview'
'gpt-3.5-turbo': 'openai/gpt-3.5-turbo'
'claude-3': 'anthropic/claude-3-opus'
'*': 'openai/gpt-3.5-turbo'
```
**请求示例**
```json
{
"model": "gpt-4",
"messages": [
{
"role": "user",
"content": "你好,你是谁?"
}
],
"temperature": 0.7
}
```
**响应示例**
```json
{
"id": "gen-1234567890abcdef",
"object": "chat.completion",
"created": 1699123456,
"model": "openai/gpt-4-turbo-preview",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "你好我是一个AI助手通过OpenRouter平台提供服务。我可以帮助回答问题、协助创作、进行对话等。有什么我可以帮助你的吗"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 12,
"completion_tokens": 46,
"total_tokens": 58
}
}
```
### 使用自动协议兼容功能
插件现在支持自动协议检测,可以同时处理 OpenAI 和 Claude 两种协议格式的请求。

View File

@@ -144,6 +144,10 @@ For Groq, the corresponding `type` is `groq`. It has no unique configuration fie
For Grok, the corresponding `type` is `grok`. It has no unique configuration fields.
#### OpenRouter
For OpenRouter, the corresponding `type` is `openrouter`. It has no unique configuration fields.
#### ERNIE Bot
For ERNIE Bot, the corresponding `type` is `baidu`. It has no unique configuration fields.
@@ -894,6 +898,63 @@ provider:
}
```
### Using OpenAI Protocol Proxy for OpenRouter Service
**Configuration Information**
```yaml
provider:
type: openrouter
apiTokens:
- 'YOUR_OPENROUTER_API_TOKEN'
modelMapping:
'gpt-4': 'openai/gpt-4-turbo-preview'
'gpt-3.5-turbo': 'openai/gpt-3.5-turbo'
'claude-3': 'anthropic/claude-3-opus'
'*': 'openai/gpt-3.5-turbo'
```
**Example Request**
```json
{
"model": "gpt-4",
"messages": [
{
"role": "user",
"content": "Hello, who are you?"
}
],
"temperature": 0.7
}
```
**Example Response**
```json
{
"id": "gen-1234567890abcdef",
"object": "chat.completion",
"created": 1699123456,
"model": "openai/gpt-4-turbo-preview",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Hello! I am an AI assistant powered by OpenRouter. I can help answer questions, assist with creative tasks, engage in conversations, and more. How can I assist you today?"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 12,
"completion_tokens": 35,
"total_tokens": 47
}
}
```
### Using Auto Protocol Compatibility
The plugin now supports automatic protocol detection, capable of handling both OpenAI and Claude protocol format requests simultaneously.

View File

File diff suppressed because it is too large Load Diff

View File

@@ -340,17 +340,20 @@ func onHttpResponseHeaders(ctx wrapper.HttpContext, pluginConfig config.PluginCo
}
util.ReplaceResponseHeaders(headers)
checkStream(ctx)
_, needHandleBody := activeProvider.(provider.TransformResponseBodyHandler)
var needHandleStreamingBody bool
_, needHandleStreamingBody = activeProvider.(provider.StreamingResponseBodyHandler)
if !needHandleStreamingBody {
_, needHandleStreamingBody = activeProvider.(provider.StreamingEventHandler)
}
if !needHandleBody && !needHandleStreamingBody {
// Check if we need to read body for Claude response conversion
needClaudeConversion, _ := ctx.GetContext("needClaudeResponseConversion").(bool)
if !needHandleBody && !needHandleStreamingBody && !needClaudeConversion {
ctx.DontReadResponseBody()
} else if !needHandleStreamingBody {
ctx.BufferResponseBody()
} else {
checkStream(ctx)
}
return types.ActionContinue
@@ -371,19 +374,12 @@ func onStreamingResponseBody(ctx wrapper.HttpContext, pluginConfig config.Plugin
apiName, _ := ctx.GetContext(provider.CtxKeyApiName).(provider.ApiName)
modifiedChunk, err := handler.OnStreamingResponseBody(ctx, apiName, chunk, isLastChunk)
if err == nil && modifiedChunk != nil {
// Check if we need to convert OpenAI stream response back to Claude format
// Only convert if we did the forward conversion (provider doesn't support Claude natively)
needClaudeConversion, _ := ctx.GetContext("needClaudeResponseConversion").(bool)
if needClaudeConversion {
converter := &provider.ClaudeToOpenAIConverter{}
claudeChunk, err := converter.ConvertOpenAIStreamResponseToClaude(ctx, modifiedChunk)
if err != nil {
log.Errorf("failed to convert streaming response to claude format: %v", err)
return modifiedChunk
}
return claudeChunk
// Convert to Claude format if needed
claudeChunk, convertErr := convertStreamingResponseToClaude(ctx, modifiedChunk)
if convertErr != nil {
return modifiedChunk
}
return modifiedChunk
return claudeChunk
}
return chunk
}
@@ -392,8 +388,8 @@ func onStreamingResponseBody(ctx wrapper.HttpContext, pluginConfig config.Plugin
events := provider.ExtractStreamingEvents(ctx, chunk)
log.Debugf("[onStreamingResponseBody] %d events received", len(events))
if len(events) == 0 {
// No events are extracted, return the original chunk
return chunk
// No events are extracted, return empty bytes slice
return []byte("")
}
var responseBuilder strings.Builder
for _, event := range events {
@@ -409,7 +405,7 @@ func onStreamingResponseBody(ctx wrapper.HttpContext, pluginConfig config.Plugin
log.Errorf("[onStreamingResponseBody] failed to process streaming event: %v\n%s", err, chunk)
return chunk
}
if outputEvents == nil || len(outputEvents) == 0 {
if len(outputEvents) == 0 {
responseBuilder.WriteString(event.ToHttpString())
} else {
for _, outputEvent := range outputEvents {
@@ -420,22 +416,37 @@ func onStreamingResponseBody(ctx wrapper.HttpContext, pluginConfig config.Plugin
result := []byte(responseBuilder.String())
// Check if we need to convert OpenAI stream response back to Claude format
// Only convert if we did the forward conversion (provider doesn't support Claude natively)
needClaudeConversion, _ := ctx.GetContext("needClaudeResponseConversion").(bool)
if needClaudeConversion {
converter := &provider.ClaudeToOpenAIConverter{}
claudeChunk, err := converter.ConvertOpenAIStreamResponseToClaude(ctx, result)
if err != nil {
log.Errorf("failed to convert streaming event response to claude format: %v", err)
return result
}
return claudeChunk
// Convert to Claude format if needed
claudeChunk, convertErr := convertStreamingResponseToClaude(ctx, result)
if convertErr != nil {
return result
}
return claudeChunk
}
// If provider doesn't implement any streaming handlers but we need Claude conversion
// First extract complete events from the chunk
events := provider.ExtractStreamingEvents(ctx, chunk)
log.Debugf("[onStreamingResponseBody] %d events received (no handler)", len(events))
if len(events) == 0 {
// No events are extracted, return empty bytes slice
return []byte("")
}
// Build response from extracted events (without handler processing)
var responseBuilder strings.Builder
for _, event := range events {
responseBuilder.WriteString(event.ToHttpString())
}
result := []byte(responseBuilder.String())
// Convert to Claude format if needed
claudeChunk, convertErr := convertStreamingResponseToClaude(ctx, result)
if convertErr != nil {
return result
}
return chunk
return claudeChunk
}
func onHttpResponseBody(ctx wrapper.HttpContext, pluginConfig config.PluginConfig, body []byte) types.Action {
@@ -448,33 +459,82 @@ func onHttpResponseBody(ctx wrapper.HttpContext, pluginConfig config.PluginConfi
log.Debugf("[onHttpResponseBody] provider=%s", activeProvider.GetProviderType())
var finalBody []byte
if handler, ok := activeProvider.(provider.TransformResponseBodyHandler); ok {
apiName, _ := ctx.GetContext(provider.CtxKeyApiName).(provider.ApiName)
body, err := handler.TransformResponseBody(ctx, apiName, body)
transformedBody, err := handler.TransformResponseBody(ctx, apiName, body)
if err != nil {
_ = util.ErrorHandler("ai-proxy.proc_resp_body_failed", fmt.Errorf("failed to process response body: %v", err))
return types.ActionContinue
}
finalBody = transformedBody
} else {
finalBody = body
}
// Check if we need to convert OpenAI response back to Claude format
// Only convert if we did the forward conversion (provider doesn't support Claude natively)
needClaudeConversion, _ := ctx.GetContext("needClaudeResponseConversion").(bool)
if needClaudeConversion {
converter := &provider.ClaudeToOpenAIConverter{}
body, err = converter.ConvertOpenAIResponseToClaude(ctx, body)
if err != nil {
_ = util.ErrorHandler("ai-proxy.convert_resp_to_claude_failed", fmt.Errorf("failed to convert response to claude format: %v", err))
return types.ActionContinue
}
}
// Convert to Claude format if needed (applies to both branches)
convertedBody, err := convertResponseBodyToClaude(ctx, finalBody)
if err != nil {
_ = util.ErrorHandler("ai-proxy.convert_resp_to_claude_failed", err)
return types.ActionContinue
}
if err = provider.ReplaceResponseBody(body); err != nil {
_ = util.ErrorHandler("ai-proxy.replace_resp_body_failed", fmt.Errorf("failed to replace response body: %v", err))
}
if err = provider.ReplaceResponseBody(convertedBody); err != nil {
_ = util.ErrorHandler("ai-proxy.replace_resp_body_failed", fmt.Errorf("failed to replace response body: %v", err))
}
return types.ActionContinue
}
// Helper function to check if Claude response conversion is needed
func needsClaudeResponseConversion(ctx wrapper.HttpContext) bool {
needClaudeConversion, _ := ctx.GetContext("needClaudeResponseConversion").(bool)
return needClaudeConversion
}
// Helper function to convert OpenAI streaming response to Claude format
func convertStreamingResponseToClaude(ctx wrapper.HttpContext, data []byte) ([]byte, error) {
if !needsClaudeResponseConversion(ctx) {
return data, nil
}
// Get or create converter instance from context to maintain state
const claudeConverterKey = "claudeConverter"
var converter *provider.ClaudeToOpenAIConverter
if converterData := ctx.GetContext(claudeConverterKey); converterData != nil {
if c, ok := converterData.(*provider.ClaudeToOpenAIConverter); ok {
converter = c
}
}
if converter == nil {
converter = &provider.ClaudeToOpenAIConverter{}
ctx.SetContext(claudeConverterKey, converter)
}
claudeChunk, err := converter.ConvertOpenAIStreamResponseToClaude(ctx, data)
if err != nil {
log.Errorf("failed to convert streaming response to claude format: %v", err)
return data, err
}
return claudeChunk, nil
}
// Helper function to convert OpenAI response body to Claude format
func convertResponseBodyToClaude(ctx wrapper.HttpContext, body []byte) ([]byte, error) {
if !needsClaudeResponseConversion(ctx) {
return body, nil
}
converter := &provider.ClaudeToOpenAIConverter{}
convertedBody, err := converter.ConvertOpenAIResponseToClaude(ctx, body)
if err != nil {
return body, fmt.Errorf("failed to convert response to claude format: %v", err)
}
return convertedBody, nil
}
func normalizeOpenAiRequestBody(body []byte) []byte {
var err error
// Default setting include_usage.

View File

@@ -36,8 +36,18 @@ type claudeToolChoice struct {
}
type claudeChatMessage struct {
Role string `json:"role"`
Content any `json:"content"`
Role string `json:"role"`
Content claudeChatMessageContentWr `json:"content"`
}
// claudeChatMessageContentWr wraps the content to handle both string and array formats
type claudeChatMessageContentWr struct {
// StringValue holds simple text content
StringValue string
// ArrayValue holds multi-modal content
ArrayValue []claudeChatMessageContent
// IsString indicates whether this is a simple string or array
IsString bool
}
type claudeChatMessageContentSource struct {
@@ -49,23 +59,154 @@ type claudeChatMessageContentSource struct {
}
type claudeChatMessageContent struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
Source *claudeChatMessageContentSource `json:"source,omitempty"`
Type string `json:"type"`
Text string `json:"text,omitempty"`
Source *claudeChatMessageContentSource `json:"source,omitempty"`
CacheControl map[string]interface{} `json:"cache_control,omitempty"`
// Tool use fields
Id string `json:"id,omitempty"` // For tool_use
Name string `json:"name,omitempty"` // For tool_use
Input map[string]interface{} `json:"input,omitempty"` // For tool_use
// Tool result fields
ToolUseId string `json:"tool_use_id,omitempty"` // For tool_result
Content string `json:"content,omitempty"` // For tool_result
}
// UnmarshalJSON implements custom JSON unmarshaling for claudeChatMessageContentWr
func (ccw *claudeChatMessageContentWr) UnmarshalJSON(data []byte) error {
// Try to unmarshal as string first
var stringValue string
if err := json.Unmarshal(data, &stringValue); err == nil {
ccw.StringValue = stringValue
ccw.IsString = true
return nil
}
// Try to unmarshal as array of content blocks
var arrayValue []claudeChatMessageContent
if err := json.Unmarshal(data, &arrayValue); err == nil {
ccw.ArrayValue = arrayValue
ccw.IsString = false
return nil
}
return fmt.Errorf("content field must be either a string or an array of content blocks")
}
// MarshalJSON implements custom JSON marshaling for claudeChatMessageContentWr
func (ccw claudeChatMessageContentWr) MarshalJSON() ([]byte, error) {
if ccw.IsString {
return json.Marshal(ccw.StringValue)
}
return json.Marshal(ccw.ArrayValue)
}
// GetStringValue returns the string representation if it's a string, empty string otherwise
func (ccw claudeChatMessageContentWr) GetStringValue() string {
if ccw.IsString {
return ccw.StringValue
}
return ""
}
// GetArrayValue returns the array representation if it's an array, empty slice otherwise
func (ccw claudeChatMessageContentWr) GetArrayValue() []claudeChatMessageContent {
if !ccw.IsString {
return ccw.ArrayValue
}
return nil
}
// NewStringContent creates a new wrapper for string content
func NewStringContent(content string) claudeChatMessageContentWr {
return claudeChatMessageContentWr{
StringValue: content,
IsString: true,
}
}
// NewArrayContent creates a new wrapper for array content
func NewArrayContent(content []claudeChatMessageContent) claudeChatMessageContentWr {
return claudeChatMessageContentWr{
ArrayValue: content,
IsString: false,
}
}
// claudeSystemPrompt represents the system field which can be either a string or an array of text blocks
type claudeSystemPrompt struct {
// Will be set to the string value if system is a simple string
StringValue string
// Will be set to the array value if system is an array of text blocks
ArrayValue []claudeTextGenContent
// Indicates which type this represents
IsArray bool
}
// UnmarshalJSON implements custom JSON unmarshaling for claudeSystemPrompt
func (csp *claudeSystemPrompt) UnmarshalJSON(data []byte) error {
// Try to unmarshal as string first
var stringValue string
if err := json.Unmarshal(data, &stringValue); err == nil {
csp.StringValue = stringValue
csp.IsArray = false
return nil
}
// Try to unmarshal as array of text blocks
var arrayValue []claudeTextGenContent
if err := json.Unmarshal(data, &arrayValue); err == nil {
csp.ArrayValue = arrayValue
csp.IsArray = true
return nil
}
return fmt.Errorf("system field must be either a string or an array of text blocks")
}
// MarshalJSON implements custom JSON marshaling for claudeSystemPrompt
func (csp claudeSystemPrompt) MarshalJSON() ([]byte, error) {
if csp.IsArray {
return json.Marshal(csp.ArrayValue)
}
return json.Marshal(csp.StringValue)
}
// String returns the string representation of the system prompt
func (csp claudeSystemPrompt) String() string {
if csp.IsArray {
// Concatenate all text blocks
var parts []string
for _, block := range csp.ArrayValue {
if block.Text != "" {
parts = append(parts, block.Text)
}
}
return strings.Join(parts, "\n")
}
return csp.StringValue
}
// claudeThinkingConfig represents the thinking configuration for Claude
type claudeThinkingConfig struct {
Type string `json:"type"`
BudgetTokens int `json:"budget_tokens,omitempty"`
}
type claudeTextGenRequest struct {
Model string `json:"model"`
Messages []claudeChatMessage `json:"messages"`
System string `json:"system,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
StopSequences []string `json:"stop_sequences,omitempty"`
Stream bool `json:"stream,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
ToolChoice *claudeToolChoice `json:"tool_choice,omitempty"`
Tools []claudeTool `json:"tools,omitempty"`
ServiceTier string `json:"service_tier,omitempty"`
Model string `json:"model"`
Messages []claudeChatMessage `json:"messages"`
System claudeSystemPrompt `json:"system,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
StopSequences []string `json:"stop_sequences,omitempty"`
Stream bool `json:"stream,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
ToolChoice *claudeToolChoice `json:"tool_choice,omitempty"`
Tools []claudeTool `json:"tools,omitempty"`
ServiceTier string `json:"service_tier,omitempty"`
Thinking *claudeThinkingConfig `json:"thinking,omitempty"`
}
type claudeTextGenResponse struct {
@@ -81,8 +222,13 @@ type claudeTextGenResponse struct {
}
type claudeTextGenContent struct {
Type string `json:"type,omitempty"`
Text string `json:"text,omitempty"`
Type string `json:"type,omitempty"`
Text string `json:"text,omitempty"`
Id string `json:"id,omitempty"` // For tool_use
Name string `json:"name,omitempty"` // For tool_use
Input map[string]interface{} `json:"input,omitempty"` // For tool_use
Signature string `json:"signature,omitempty"` // For thinking
Thinking string `json:"thinking,omitempty"` // For thinking
}
type claudeTextGenUsage struct {
@@ -99,7 +245,7 @@ type claudeTextGenError struct {
type claudeTextGenStreamResponse struct {
Type string `json:"type"`
Message *claudeTextGenResponse `json:"message,omitempty"`
Index int `json:"index,omitempty"`
Index *int `json:"index,omitempty"`
ContentBlock *claudeTextGenContent `json:"content_block,omitempty"`
Delta *claudeTextGenDelta `json:"delta,omitempty"`
Usage *claudeTextGenUsage `json:"usage,omitempty"`
@@ -107,13 +253,13 @@ type claudeTextGenStreamResponse struct {
type claudeTextGenDelta struct {
Type string `json:"type"`
Text string `json:"text"`
StopReason *string `json:"stop_reason"`
StopSequence *string `json:"stop_sequence"`
Text string `json:"text,omitempty"`
StopReason *string `json:"stop_reason,omitempty"`
StopSequence *string `json:"stop_sequence,omitempty"`
}
func (c *claudeProviderInitializer) ValidateConfig(config *ProviderConfig) error {
if config.apiTokens == nil || len(config.apiTokens) == 0 {
if len(config.apiTokens) == 0 {
return errors.New("no apiToken found in provider config")
}
return nil
@@ -255,7 +401,10 @@ func (c *claudeProvider) buildClaudeTextGenRequest(origRequest *chatCompletionRe
for _, message := range origRequest.Messages {
if message.Role == roleSystem {
claudeRequest.System = message.StringContent()
claudeRequest.System = claudeSystemPrompt{
StringValue: message.StringContent(),
IsArray: false,
}
continue
}
@@ -263,7 +412,7 @@ func (c *claudeProvider) buildClaudeTextGenRequest(origRequest *chatCompletionRe
Role: message.Role,
}
if message.IsStringContent() {
claudeMessage.Content = message.StringContent()
claudeMessage.Content = NewStringContent(message.StringContent())
} else {
chatMessageContents := make([]claudeChatMessageContent, 0)
for _, messageContent := range message.ParseContent() {
@@ -310,7 +459,7 @@ func (c *claudeProvider) buildClaudeTextGenRequest(origRequest *chatCompletionRe
continue
}
}
claudeMessage.Content = chatMessageContents
claudeMessage.Content = NewArrayContent(chatMessageContents)
}
claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
}
@@ -342,19 +491,25 @@ func (c *claudeProvider) responseClaude2OpenAI(ctx wrapper.HttpContext, origResp
FinishReason: util.Ptr(stopReasonClaude2OpenAI(origResponse.StopReason)),
}
return &chatCompletionResponse{
response := &chatCompletionResponse{
Id: origResponse.Id,
Created: time.Now().UnixMilli() / 1000,
Model: ctx.GetStringContext(ctxKeyFinalRequestModel, ""),
SystemFingerprint: "",
Object: objectChatCompletion,
Choices: []chatCompletionChoice{choice},
Usage: &usage{
}
// Include usage information if available
if origResponse.Usage.InputTokens > 0 || origResponse.Usage.OutputTokens > 0 {
response.Usage = &usage{
PromptTokens: origResponse.Usage.InputTokens,
CompletionTokens: origResponse.Usage.OutputTokens,
TotalTokens: origResponse.Usage.InputTokens + origResponse.Usage.OutputTokens,
},
}
}
return response
}
func stopReasonClaude2OpenAI(reason *string) string {
@@ -376,31 +531,47 @@ func stopReasonClaude2OpenAI(reason *string) string {
func (c *claudeProvider) streamResponseClaude2OpenAI(ctx wrapper.HttpContext, origResponse *claudeTextGenStreamResponse) *chatCompletionResponse {
switch origResponse.Type {
case "message_start":
c.messageId = origResponse.Message.Id
c.usage = usage{
PromptTokens: origResponse.Message.Usage.InputTokens,
CompletionTokens: origResponse.Message.Usage.OutputTokens,
if origResponse.Message != nil {
c.messageId = origResponse.Message.Id
c.usage = usage{
PromptTokens: origResponse.Message.Usage.InputTokens,
CompletionTokens: origResponse.Message.Usage.OutputTokens,
}
c.serviceTier = origResponse.Message.Usage.ServiceTier
}
var index int
if origResponse.Index != nil {
index = *origResponse.Index
}
c.serviceTier = origResponse.Message.Usage.ServiceTier
choice := chatCompletionChoice{
Index: origResponse.Index,
Index: index,
Delta: &chatMessage{Role: roleAssistant, Content: ""},
}
return c.createChatCompletionResponse(ctx, origResponse, choice)
case "content_block_delta":
var index int
if origResponse.Index != nil {
index = *origResponse.Index
}
choice := chatCompletionChoice{
Index: origResponse.Index,
Index: index,
Delta: &chatMessage{Content: origResponse.Delta.Text},
}
return c.createChatCompletionResponse(ctx, origResponse, choice)
case "message_delta":
c.usage.CompletionTokens += origResponse.Usage.OutputTokens
c.usage.TotalTokens = c.usage.PromptTokens + c.usage.CompletionTokens
if origResponse.Usage != nil {
c.usage.CompletionTokens += origResponse.Usage.OutputTokens
c.usage.TotalTokens = c.usage.PromptTokens + c.usage.CompletionTokens
}
var index int
if origResponse.Index != nil {
index = *origResponse.Index
}
choice := chatCompletionChoice{
Index: origResponse.Index,
Index: index,
Delta: &chatMessage{},
FinishReason: util.Ptr(stopReasonClaude2OpenAI(origResponse.Delta.StopReason)),
}
@@ -449,10 +620,17 @@ func (c *claudeProvider) insertHttpContextMessage(body []byte, content string, o
return nil, fmt.Errorf("unable to unmarshal request: %v", err)
}
if request.System == "" {
request.System = content
systemStr := request.System.String()
if systemStr == "" {
request.System = claudeSystemPrompt{
StringValue: content,
IsArray: false,
}
} else {
request.System = content + "\n" + request.System
request.System = claudeSystemPrompt{
StringValue: content + "\n" + systemStr,
IsArray: false,
}
}
return json.Marshal(request)

View File

@@ -10,10 +10,49 @@ import (
)
// ClaudeToOpenAIConverter converts Claude protocol requests to OpenAI protocol
type ClaudeToOpenAIConverter struct{}
type ClaudeToOpenAIConverter struct {
// State tracking for streaming conversion
messageStartSent bool
messageStopSent bool
messageId string
// Cache stop_reason until we get usage info
pendingStopReason *string
// Content block tracking with dynamic index allocation
nextContentIndex int
thinkingBlockIndex int
thinkingBlockStarted bool
thinkingBlockStopped bool
textBlockIndex int
textBlockStarted bool
textBlockStopped bool
toolBlockIndex int
toolBlockStarted bool
toolBlockStopped bool
// Tool call state tracking
toolCallStates map[string]*toolCallState
}
// contentConversionResult represents the result of converting Claude content to OpenAI format
type contentConversionResult struct {
textParts []string
toolCalls []toolCall
toolResults []claudeChatMessageContent
openaiContents []chatMessageContent
hasNonTextContent bool
}
// toolCallState tracks the state of a tool call during streaming
type toolCallState struct {
id string
name string
argumentsBuffer string
isComplete bool
}
// ConvertClaudeRequestToOpenAI converts a Claude chat completion request to OpenAI format
func (c *ClaudeToOpenAIConverter) ConvertClaudeRequestToOpenAI(body []byte) ([]byte, error) {
log.Debugf("[Claude->OpenAI] Original Claude request body: %s", string(body))
var claudeRequest claudeTextGenRequest
if err := json.Unmarshal(body, &claudeRequest); err != nil {
return nil, fmt.Errorf("unable to unmarshal claude request: %v", err)
@@ -31,58 +70,74 @@ func (c *ClaudeToOpenAIConverter) ConvertClaudeRequestToOpenAI(body []byte) ([]b
// Convert messages from Claude format to OpenAI format
for _, claudeMsg := range claudeRequest.Messages {
openaiMsg := chatMessage{
Role: claudeMsg.Role,
}
// Handle different content types
switch content := claudeMsg.Content.(type) {
case string:
// Handle different content types using the type-safe wrapper
if claudeMsg.Content.IsString {
// Simple text content
openaiMsg.Content = content
case []claudeChatMessageContent:
// Multi-modal content
var openaiContents []chatMessageContent
for _, claudeContent := range content {
switch claudeContent.Type {
case "text":
openaiContents = append(openaiContents, chatMessageContent{
Type: contentTypeText,
Text: claudeContent.Text,
})
case "image":
if claudeContent.Source != nil {
if claudeContent.Source.Type == "base64" {
// Convert base64 image to OpenAI format
dataUrl := fmt.Sprintf("data:%s;base64,%s", claudeContent.Source.MediaType, claudeContent.Source.Data)
openaiContents = append(openaiContents, chatMessageContent{
Type: contentTypeImageUrl,
ImageUrl: &chatMessageContentImageUrl{
Url: dataUrl,
},
})
} else if claudeContent.Source.Type == "url" {
openaiContents = append(openaiContents, chatMessageContent{
Type: contentTypeImageUrl,
ImageUrl: &chatMessageContentImageUrl{
Url: claudeContent.Source.Url,
},
})
}
openaiMsg := chatMessage{
Role: claudeMsg.Role,
Content: claudeMsg.Content.GetStringValue(),
}
openaiRequest.Messages = append(openaiRequest.Messages, openaiMsg)
} else {
// Multi-modal content - process with convertContentArray
conversionResult := c.convertContentArray(claudeMsg.Content.GetArrayValue())
// Handle tool calls if present
if len(conversionResult.toolCalls) > 0 {
// Use tool_calls format (current OpenAI standard)
openaiMsg := chatMessage{
Role: claudeMsg.Role,
ToolCalls: conversionResult.toolCalls,
}
// Add text content if present, otherwise set to null
if len(conversionResult.textParts) > 0 {
openaiMsg.Content = strings.Join(conversionResult.textParts, "\n\n")
} else {
openaiMsg.Content = nil
}
openaiRequest.Messages = append(openaiRequest.Messages, openaiMsg)
}
// Handle tool results if present
if len(conversionResult.toolResults) > 0 {
for _, toolResult := range conversionResult.toolResults {
toolMsg := chatMessage{
Role: "tool",
Content: toolResult.Content,
ToolCallId: toolResult.ToolUseId,
}
openaiRequest.Messages = append(openaiRequest.Messages, toolMsg)
}
}
openaiMsg.Content = openaiContents
}
openaiRequest.Messages = append(openaiRequest.Messages, openaiMsg)
// Handle regular content if no tool calls or tool results
if len(conversionResult.toolCalls) == 0 && len(conversionResult.toolResults) == 0 {
var content interface{}
if !conversionResult.hasNonTextContent && len(conversionResult.textParts) > 0 {
// Simple text content
content = strings.Join(conversionResult.textParts, "\n\n")
} else {
// Multi-modal content or empty content
content = conversionResult.openaiContents
}
openaiMsg := chatMessage{
Role: claudeMsg.Role,
Content: content,
}
openaiRequest.Messages = append(openaiRequest.Messages, openaiMsg)
}
}
}
// Handle system message - Claude has separate system field
if claudeRequest.System != "" {
systemStr := claudeRequest.System.String()
if systemStr != "" {
systemMsg := chatMessage{
Role: roleSystem,
Content: claudeRequest.System,
Content: systemStr,
}
// Insert system message at the beginning
openaiRequest.Messages = append([]chatMessage{systemMsg}, openaiRequest.Messages...)
@@ -119,11 +174,44 @@ func (c *ClaudeToOpenAIConverter) ConvertClaudeRequestToOpenAI(body []byte) ([]b
openaiRequest.ParallelToolCalls = !claudeRequest.ToolChoice.DisableParallelToolUse
}
return json.Marshal(openaiRequest)
// Convert thinking configuration if present
if claudeRequest.Thinking != nil {
log.Debugf("[Claude->OpenAI] Found thinking config: type=%s, budget_tokens=%d",
claudeRequest.Thinking.Type, claudeRequest.Thinking.BudgetTokens)
if claudeRequest.Thinking.Type == "enabled" {
openaiRequest.ReasoningMaxTokens = claudeRequest.Thinking.BudgetTokens
// Set ReasoningEffort based on budget_tokens
// low: <4096, medium: >=4096 and <16384, high: >=16384
if claudeRequest.Thinking.BudgetTokens < 4096 {
openaiRequest.ReasoningEffort = "low"
} else if claudeRequest.Thinking.BudgetTokens < 16384 {
openaiRequest.ReasoningEffort = "medium"
} else {
openaiRequest.ReasoningEffort = "high"
}
log.Debugf("[Claude->OpenAI] Converted thinking config: budget_tokens=%d, reasoning_effort=%s, reasoning_max_tokens=%d",
claudeRequest.Thinking.BudgetTokens, openaiRequest.ReasoningEffort, openaiRequest.ReasoningMaxTokens)
}
} else {
log.Debugf("[Claude->OpenAI] No thinking config found")
}
result, err := json.Marshal(openaiRequest)
if err != nil {
return nil, fmt.Errorf("unable to marshal openai request: %v", err)
}
log.Debugf("[Claude->OpenAI] Converted OpenAI request body: %s", string(result))
return result, nil
}
// ConvertOpenAIResponseToClaude converts an OpenAI response back to Claude format
func (c *ClaudeToOpenAIConverter) ConvertOpenAIResponseToClaude(ctx wrapper.HttpContext, body []byte) ([]byte, error) {
log.Debugf("[OpenAI->Claude] Original OpenAI response body: %s", string(body))
var openaiResponse chatCompletionResponse
if err := json.Unmarshal(body, &openaiResponse); err != nil {
return nil, fmt.Errorf("unable to unmarshal openai response: %v", err)
@@ -135,21 +223,73 @@ func (c *ClaudeToOpenAIConverter) ConvertOpenAIResponseToClaude(ctx wrapper.Http
Type: "message",
Role: "assistant",
Model: openaiResponse.Model,
Usage: claudeTextGenUsage{
}
// Only include usage if it's available
if openaiResponse.Usage != nil {
claudeResponse.Usage = claudeTextGenUsage{
InputTokens: openaiResponse.Usage.PromptTokens,
OutputTokens: openaiResponse.Usage.CompletionTokens,
},
}
}
// Convert the first choice content
if len(openaiResponse.Choices) > 0 {
choice := openaiResponse.Choices[0]
if choice.Message != nil {
content := claudeTextGenContent{
Type: "text",
Text: choice.Message.StringContent(),
var contents []claudeTextGenContent
// Add reasoning content (thinking) if present - check both reasoning and reasoning_content fields
var reasoningText string
if choice.Message.Reasoning != "" {
reasoningText = choice.Message.Reasoning
} else if choice.Message.ReasoningContent != "" {
reasoningText = choice.Message.ReasoningContent
}
claudeResponse.Content = []claudeTextGenContent{content}
if reasoningText != "" {
contents = append(contents, claudeTextGenContent{
Type: "thinking",
Signature: "", // OpenAI doesn't provide signature, use empty string
Thinking: reasoningText,
})
log.Debugf("[OpenAI->Claude] Added thinking content: %s", reasoningText)
}
// Add text content if present
if choice.Message.StringContent() != "" {
contents = append(contents, claudeTextGenContent{
Type: "text",
Text: choice.Message.StringContent(),
})
}
// Add tool calls if present
if len(choice.Message.ToolCalls) > 0 {
for _, toolCall := range choice.Message.ToolCalls {
if !toolCall.Function.IsEmpty() {
// Parse arguments from JSON string to map
var input map[string]interface{}
if toolCall.Function.Arguments != "" {
if err := json.Unmarshal([]byte(toolCall.Function.Arguments), &input); err != nil {
log.Errorf("Failed to parse tool call arguments: %v", err)
input = map[string]interface{}{}
}
} else {
input = map[string]interface{}{}
}
contents = append(contents, claudeTextGenContent{
Type: "tool_use",
Id: toolCall.Id,
Name: toolCall.Function.Name,
Input: input,
})
}
}
}
claudeResponse.Content = contents
}
// Convert finish reason
@@ -159,11 +299,24 @@ func (c *ClaudeToOpenAIConverter) ConvertOpenAIResponseToClaude(ctx wrapper.Http
}
}
return json.Marshal(claudeResponse)
result, err := json.Marshal(claudeResponse)
if err != nil {
return nil, fmt.Errorf("unable to marshal claude response: %v", err)
}
log.Debugf("[OpenAI->Claude] Converted Claude response body: %s", string(result))
return result, nil
}
// ConvertOpenAIStreamResponseToClaude converts OpenAI streaming response to Claude format
func (c *ClaudeToOpenAIConverter) ConvertOpenAIStreamResponseToClaude(ctx wrapper.HttpContext, chunk []byte) ([]byte, error) {
log.Debugf("[OpenAI->Claude] Original OpenAI streaming chunk: %s", string(chunk))
// Initialize tool call states if needed
if c.toolCallStates == nil {
c.toolCallStates = make(map[string]*toolCallState)
}
// For streaming responses, we need to handle the Server-Sent Events format
lines := strings.Split(string(chunk), "\n")
var result strings.Builder
@@ -172,88 +325,413 @@ func (c *ClaudeToOpenAIConverter) ConvertOpenAIStreamResponseToClaude(ctx wrappe
if strings.HasPrefix(line, "data: ") {
data := strings.TrimPrefix(line, "data: ")
// Skip [DONE] messages
// Handle [DONE] messages
if data == "[DONE]" {
log.Debugf("[OpenAI->Claude] Processing [DONE] message, finalizing stream")
// Send final content_block_stop events for any active blocks
if c.thinkingBlockStarted && !c.thinkingBlockStopped {
c.thinkingBlockStopped = true
log.Debugf("[OpenAI->Claude] Sending final thinking content_block_stop event at index %d", c.thinkingBlockIndex)
stopEvent := &claudeTextGenStreamResponse{
Type: "content_block_stop",
Index: &c.thinkingBlockIndex,
}
stopData, _ := json.Marshal(stopEvent)
result.WriteString(fmt.Sprintf("data: %s\n\n", stopData))
}
if c.textBlockStarted && !c.textBlockStopped {
c.textBlockStopped = true
log.Debugf("[OpenAI->Claude] Sending final text content_block_stop event at index %d", c.textBlockIndex)
stopEvent := &claudeTextGenStreamResponse{
Type: "content_block_stop",
Index: &c.textBlockIndex,
}
stopData, _ := json.Marshal(stopEvent)
result.WriteString(fmt.Sprintf("data: %s\n\n", stopData))
}
if c.toolBlockStarted && !c.toolBlockStopped {
c.toolBlockStopped = true
log.Debugf("[OpenAI->Claude] Sending final tool content_block_stop event at index %d", c.toolBlockIndex)
stopEvent := &claudeTextGenStreamResponse{
Type: "content_block_stop",
Index: &c.toolBlockIndex,
}
stopData, _ := json.Marshal(stopEvent)
result.WriteString(fmt.Sprintf("data: %s\n\n", stopData))
}
// If we have a pending stop_reason but no usage, send message_delta with just stop_reason
if c.pendingStopReason != nil {
log.Debugf("[OpenAI->Claude] Sending final message_delta with pending stop_reason: %s", *c.pendingStopReason)
messageDelta := &claudeTextGenStreamResponse{
Type: "message_delta",
Delta: &claudeTextGenDelta{
Type: "message_delta",
StopReason: c.pendingStopReason,
},
}
stopData, _ := json.Marshal(messageDelta)
result.WriteString(fmt.Sprintf("data: %s\n\n", stopData))
c.pendingStopReason = nil
}
if c.messageStartSent && !c.messageStopSent {
c.messageStopSent = true
log.Debugf("[OpenAI->Claude] Sending final message_stop event")
messageStopEvent := &claudeTextGenStreamResponse{
Type: "message_stop",
}
stopData, _ := json.Marshal(messageStopEvent)
result.WriteString(fmt.Sprintf("data: %s\n\n", stopData))
}
// Reset all state for next request
c.messageStartSent = false
c.messageStopSent = false
c.messageId = ""
c.pendingStopReason = nil
c.nextContentIndex = 0
c.thinkingBlockIndex = -1
c.thinkingBlockStarted = false
c.thinkingBlockStopped = false
c.textBlockIndex = -1
c.textBlockStarted = false
c.textBlockStopped = false
c.toolBlockIndex = -1
c.toolBlockStarted = false
c.toolBlockStopped = false
c.toolCallStates = make(map[string]*toolCallState)
log.Debugf("[OpenAI->Claude] Reset converter state for next request")
continue
}
var openaiStreamResponse chatCompletionResponse
if err := json.Unmarshal([]byte(data), &openaiStreamResponse); err != nil {
log.Errorf("unable to unmarshal openai stream response: %v", err)
log.Debugf("unable to unmarshal openai stream response: %v, data: %s", err, data)
continue
}
// Convert to Claude streaming format
claudeStreamResponse := c.buildClaudeStreamResponse(ctx, &openaiStreamResponse)
if claudeStreamResponse != nil {
claudeStreamResponses := c.buildClaudeStreamResponse(ctx, &openaiStreamResponse)
log.Debugf("[OpenAI->Claude] Generated %d Claude stream events from OpenAI chunk", len(claudeStreamResponses))
for i, claudeStreamResponse := range claudeStreamResponses {
responseData, err := json.Marshal(claudeStreamResponse)
if err != nil {
log.Errorf("unable to marshal claude stream response: %v", err)
continue
}
log.Debugf("[OpenAI->Claude] Stream event [%d/%d]: %s", i+1, len(claudeStreamResponses), string(responseData))
result.WriteString(fmt.Sprintf("data: %s\n\n", responseData))
}
}
}
return []byte(result.String()), nil
claudeChunk := []byte(result.String())
log.Debugf("[OpenAI->Claude] Converted Claude streaming chunk: %s", string(claudeChunk))
return claudeChunk, nil
}
// buildClaudeStreamResponse builds a Claude streaming response from OpenAI streaming response
func (c *ClaudeToOpenAIConverter) buildClaudeStreamResponse(ctx wrapper.HttpContext, openaiResponse *chatCompletionResponse) *claudeTextGenStreamResponse {
// buildClaudeStreamResponse builds Claude streaming responses from OpenAI streaming response
func (c *ClaudeToOpenAIConverter) buildClaudeStreamResponse(ctx wrapper.HttpContext, openaiResponse *chatCompletionResponse) []*claudeTextGenStreamResponse {
if len(openaiResponse.Choices) == 0 {
log.Debugf("[OpenAI->Claude] No choices in OpenAI response, skipping")
return nil
}
choice := openaiResponse.Choices[0]
var responses []*claudeTextGenStreamResponse
// Determine the response type based on the content
if choice.Delta != nil && choice.Delta.Content != "" {
// Content delta
if deltaContent, ok := choice.Delta.Content.(string); ok {
return &claudeTextGenStreamResponse{
Type: "content_block_delta",
Index: choice.Index,
Delta: &claudeTextGenDelta{
Type: "text_delta",
Text: deltaContent,
},
// Log what we're processing
hasRole := choice.Delta != nil && choice.Delta.Role != ""
hasContent := choice.Delta != nil && choice.Delta.Content != ""
hasFinishReason := choice.FinishReason != nil
hasUsage := openaiResponse.Usage != nil
log.Debugf("[OpenAI->Claude] Processing OpenAI chunk - Role: %v, Content: %v, FinishReason: %v, Usage: %v",
hasRole, hasContent, hasFinishReason, hasUsage)
// Handle message start (only once)
// Note: OpenRouter may send multiple messages with role but empty content at the start
// We only send message_start for the first one
if choice.Delta != nil && choice.Delta.Role != "" && !c.messageStartSent {
c.messageId = openaiResponse.Id
c.messageStartSent = true
message := &claudeTextGenResponse{
Id: openaiResponse.Id,
Type: "message",
Role: "assistant",
Model: openaiResponse.Model,
Content: []claudeTextGenContent{},
}
// Only include usage if it's available
if openaiResponse.Usage != nil {
message.Usage = claudeTextGenUsage{
InputTokens: openaiResponse.Usage.PromptTokens,
OutputTokens: 0,
}
}
} else if choice.FinishReason != nil {
// Message completed
claudeFinishReason := openAIFinishReasonToClaude(*choice.FinishReason)
return &claudeTextGenStreamResponse{
Type: "message_delta",
Index: choice.Index,
responses = append(responses, &claudeTextGenStreamResponse{
Type: "message_start",
Message: message,
})
log.Debugf("[OpenAI->Claude] Generated message_start event for id: %s", openaiResponse.Id)
} else if choice.Delta != nil && choice.Delta.Role != "" && c.messageStartSent {
// Skip duplicate role messages from OpenRouter
log.Debugf("[OpenAI->Claude] Skipping duplicate role message for id: %s", openaiResponse.Id)
}
// Handle reasoning content (thinking) first - check both reasoning and reasoning_content fields
var reasoningText string
if choice.Delta != nil {
if choice.Delta.Reasoning != "" {
reasoningText = choice.Delta.Reasoning
} else if choice.Delta.ReasoningContent != "" {
reasoningText = choice.Delta.ReasoningContent
}
}
if reasoningText != "" {
log.Debugf("[OpenAI->Claude] Processing reasoning content delta: %s", reasoningText)
// Send content_block_start for thinking only once with dynamic index
if !c.thinkingBlockStarted {
c.thinkingBlockIndex = c.nextContentIndex
c.nextContentIndex++
c.thinkingBlockStarted = true
log.Debugf("[OpenAI->Claude] Generated content_block_start event for thinking at index %d", c.thinkingBlockIndex)
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_start",
Index: &c.thinkingBlockIndex,
ContentBlock: &claudeTextGenContent{
Type: "thinking",
Signature: "", // OpenAI doesn't provide signature
Thinking: "",
},
})
}
// Send content_block_delta for thinking
log.Debugf("[OpenAI->Claude] Generated content_block_delta event with thinking: %s", reasoningText)
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_delta",
Index: &c.thinkingBlockIndex,
Delta: &claudeTextGenDelta{
Type: "message_delta",
StopReason: &claudeFinishReason,
Type: "thinking_delta", // Use thinking_delta for reasoning content
Text: reasoningText,
},
})
}
// Handle content
if choice.Delta != nil && choice.Delta.Content != nil && choice.Delta.Content != "" {
deltaContent, ok := choice.Delta.Content.(string)
if !ok {
log.Debugf("[OpenAI->Claude] Content is not a string: %T", choice.Delta.Content)
return responses
}
log.Debugf("[OpenAI->Claude] Processing content delta: %s", deltaContent)
// Close thinking content block if it's still open
if c.thinkingBlockStarted && !c.thinkingBlockStopped {
c.thinkingBlockStopped = true
log.Debugf("[OpenAI->Claude] Closing thinking content block before text")
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_stop",
Index: &c.thinkingBlockIndex,
})
}
// Send content_block_start only once for text content with dynamic index
if !c.textBlockStarted {
c.textBlockIndex = c.nextContentIndex
c.nextContentIndex++
c.textBlockStarted = true
log.Debugf("[OpenAI->Claude] Generated content_block_start event for text at index %d", c.textBlockIndex)
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_start",
Index: &c.textBlockIndex,
ContentBlock: &claudeTextGenContent{
Type: "text",
Text: "",
},
})
}
// Send content_block_delta
log.Debugf("[OpenAI->Claude] Generated content_block_delta event with text: %s", deltaContent)
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_delta",
Index: &c.textBlockIndex,
Delta: &claudeTextGenDelta{
Type: "text_delta",
Text: deltaContent,
},
})
}
// Handle tool calls in streaming response
if choice.Delta != nil && len(choice.Delta.ToolCalls) > 0 {
for _, toolCall := range choice.Delta.ToolCalls {
if !toolCall.Function.IsEmpty() {
log.Debugf("[OpenAI->Claude] Processing tool call delta")
// Get or create tool call state
state := c.toolCallStates[toolCall.Id]
if state == nil {
state = &toolCallState{
id: toolCall.Id,
name: toolCall.Function.Name,
argumentsBuffer: "",
isComplete: false,
}
c.toolCallStates[toolCall.Id] = state
log.Debugf("[OpenAI->Claude] Created new tool call state for id: %s, name: %s", toolCall.Id, toolCall.Function.Name)
}
// Accumulate arguments
if toolCall.Function.Arguments != "" {
state.argumentsBuffer += toolCall.Function.Arguments
log.Debugf("[OpenAI->Claude] Accumulated tool arguments: %s", state.argumentsBuffer)
}
// Try to parse accumulated arguments as JSON to check if complete
var input map[string]interface{}
if state.argumentsBuffer != "" {
if err := json.Unmarshal([]byte(state.argumentsBuffer), &input); err == nil {
// Successfully parsed - arguments are complete
if !state.isComplete {
state.isComplete = true
log.Debugf("[OpenAI->Claude] Tool call arguments complete for %s: %s", state.name, state.argumentsBuffer)
// Close thinking content block if it's still open
if c.thinkingBlockStarted && !c.thinkingBlockStopped {
c.thinkingBlockStopped = true
log.Debugf("[OpenAI->Claude] Closing thinking content block before tool use")
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_stop",
Index: &c.thinkingBlockIndex,
})
}
// Close text content block if it's still open
if c.textBlockStarted && !c.textBlockStopped {
c.textBlockStopped = true
log.Debugf("[OpenAI->Claude] Closing text content block before tool use")
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_stop",
Index: &c.textBlockIndex,
})
}
// Send content_block_start for tool_use only when we have complete arguments with dynamic index
if !c.toolBlockStarted {
c.toolBlockIndex = c.nextContentIndex
c.nextContentIndex++
c.toolBlockStarted = true
log.Debugf("[OpenAI->Claude] Generated content_block_start event for tool_use at index %d", c.toolBlockIndex)
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_start",
Index: &c.toolBlockIndex,
ContentBlock: &claudeTextGenContent{
Type: "tool_use",
Id: toolCall.Id,
Name: state.name,
Input: input,
},
})
}
}
} else {
// Still accumulating arguments
log.Debugf("[OpenAI->Claude] Tool arguments not yet complete, continuing to accumulate: %v", err)
}
}
}
}
}
// Handle finish reason
if choice.FinishReason != nil {
claudeFinishReason := openAIFinishReasonToClaude(*choice.FinishReason)
log.Debugf("[OpenAI->Claude] Processing finish_reason: %s -> %s", *choice.FinishReason, claudeFinishReason)
// Send content_block_stop for any active content blocks
if c.thinkingBlockStarted && !c.thinkingBlockStopped {
c.thinkingBlockStopped = true
log.Debugf("[OpenAI->Claude] Generated thinking content_block_stop event at index %d", c.thinkingBlockIndex)
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_stop",
Index: &c.thinkingBlockIndex,
})
}
if c.textBlockStarted && !c.textBlockStopped {
c.textBlockStopped = true
log.Debugf("[OpenAI->Claude] Generated text content_block_stop event at index %d", c.textBlockIndex)
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_stop",
Index: &c.textBlockIndex,
})
}
if c.toolBlockStarted && !c.toolBlockStopped {
c.toolBlockStopped = true
log.Debugf("[OpenAI->Claude] Generated tool content_block_stop event at index %d", c.toolBlockIndex)
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_stop",
Index: &c.toolBlockIndex,
})
}
// Cache stop_reason until we get usage info (Claude protocol requires them together)
c.pendingStopReason = &claudeFinishReason
log.Debugf("[OpenAI->Claude] Cached stop_reason: %s, waiting for usage", claudeFinishReason)
}
// Handle usage information
if openaiResponse.Usage != nil && choice.FinishReason == nil {
log.Debugf("[OpenAI->Claude] Processing usage info - input: %d, output: %d",
openaiResponse.Usage.PromptTokens, openaiResponse.Usage.CompletionTokens)
// Send message_delta with both stop_reason and usage (Claude protocol requirement)
messageDelta := &claudeTextGenStreamResponse{
Type: "message_delta",
Delta: &claudeTextGenDelta{
Type: "message_delta",
},
Usage: &claudeTextGenUsage{
InputTokens: openaiResponse.Usage.PromptTokens,
OutputTokens: openaiResponse.Usage.CompletionTokens,
},
}
} else if choice.Delta != nil && choice.Delta.Role != "" {
// Message start
return &claudeTextGenStreamResponse{
Type: "message_start",
Index: choice.Index,
Message: &claudeTextGenResponse{
Id: openaiResponse.Id,
Type: "message",
Role: "assistant",
Model: openaiResponse.Model,
Usage: claudeTextGenUsage{
InputTokens: openaiResponse.Usage.PromptTokens,
OutputTokens: 0,
},
},
// Include cached stop_reason if available
if c.pendingStopReason != nil {
log.Debugf("[OpenAI->Claude] Combining cached stop_reason %s with usage", *c.pendingStopReason)
messageDelta.Delta.StopReason = c.pendingStopReason
c.pendingStopReason = nil // Clear cache
}
log.Debugf("[OpenAI->Claude] Generated message_delta event with usage and stop_reason")
responses = append(responses, messageDelta)
// Send message_stop after combined message_delta
if !c.messageStopSent {
c.messageStopSent = true
log.Debugf("[OpenAI->Claude] Generated message_stop event")
responses = append(responses, &claudeTextGenStreamResponse{
Type: "message_stop",
})
}
}
return nil
return responses
}
// openAIFinishReasonToClaude converts OpenAI finish reason to Claude format
@@ -269,3 +747,78 @@ func openAIFinishReasonToClaude(reason string) string {
return reason
}
}
// convertContentArray converts an array of Claude content to OpenAI content format
func (c *ClaudeToOpenAIConverter) convertContentArray(claudeContents []claudeChatMessageContent) *contentConversionResult {
result := &contentConversionResult{
textParts: []string{},
toolCalls: []toolCall{},
toolResults: []claudeChatMessageContent{},
openaiContents: []chatMessageContent{},
hasNonTextContent: false,
}
for _, claudeContent := range claudeContents {
switch claudeContent.Type {
case "text":
if claudeContent.Text != "" {
result.textParts = append(result.textParts, claudeContent.Text)
result.openaiContents = append(result.openaiContents, chatMessageContent{
Type: contentTypeText,
Text: claudeContent.Text,
})
}
case "image":
result.hasNonTextContent = true
if claudeContent.Source != nil {
if claudeContent.Source.Type == "base64" {
// Convert base64 image to OpenAI format
dataUrl := fmt.Sprintf("data:%s;base64,%s", claudeContent.Source.MediaType, claudeContent.Source.Data)
result.openaiContents = append(result.openaiContents, chatMessageContent{
Type: contentTypeImageUrl,
ImageUrl: &chatMessageContentImageUrl{
Url: dataUrl,
},
})
} else if claudeContent.Source.Type == "url" {
result.openaiContents = append(result.openaiContents, chatMessageContent{
Type: contentTypeImageUrl,
ImageUrl: &chatMessageContentImageUrl{
Url: claudeContent.Source.Url,
},
})
}
}
case "tool_use":
result.hasNonTextContent = true
// Convert Claude tool_use to OpenAI tool_calls format
if claudeContent.Id != "" && claudeContent.Name != "" {
// Convert input to JSON string for OpenAI format
var argumentsStr string
if claudeContent.Input != nil {
if argBytes, err := json.Marshal(claudeContent.Input); err == nil {
argumentsStr = string(argBytes)
}
}
toolCall := toolCall{
Id: claudeContent.Id,
Type: "function",
Function: functionCall{
Name: claudeContent.Name,
Arguments: argumentsStr,
},
}
result.toolCalls = append(result.toolCalls, toolCall)
log.Debugf("[Claude->OpenAI] Converted tool_use to tool_call: %s", claudeContent.Name)
}
case "tool_result":
result.hasNonTextContent = true
// Store tool results for processing
result.toolResults = append(result.toolResults, claudeContent)
log.Debugf("[Claude->OpenAI] Found tool_result for tool_use_id: %s", claudeContent.ToolUseId)
}
}
return result
}

View File

@@ -0,0 +1,727 @@
package provider
import (
"encoding/json"
"testing"
"github.com/higress-group/wasm-go/pkg/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Mock logger for testing
type mockLogger struct{}
func (m *mockLogger) Trace(msg string) {}
func (m *mockLogger) Tracef(format string, args ...interface{}) {}
func (m *mockLogger) Debug(msg string) {}
func (m *mockLogger) Debugf(format string, args ...interface{}) {}
func (m *mockLogger) Info(msg string) {}
func (m *mockLogger) Infof(format string, args ...interface{}) {}
func (m *mockLogger) Warn(msg string) {}
func (m *mockLogger) Warnf(format string, args ...interface{}) {}
func (m *mockLogger) Error(msg string) {}
func (m *mockLogger) Errorf(format string, args ...interface{}) {}
func (m *mockLogger) Critical(msg string) {}
func (m *mockLogger) Criticalf(format string, args ...interface{}) {}
func (m *mockLogger) ResetID(pluginID string) {}
func init() {
// Initialize mock logger for testing
log.SetPluginLog(&mockLogger{})
}
func TestClaudeToOpenAIConverter_ConvertClaudeRequestToOpenAI(t *testing.T) {
converter := &ClaudeToOpenAIConverter{}
t.Run("convert_multiple_text_content_blocks", func(t *testing.T) {
// Test case for the bug fix: multiple text content blocks should be merged into a single string
claudeRequest := `{
"max_tokens": 32000,
"messages": [{
"content": [{
"text": "<system-reminder>\nThis is a reminder that your todo list is currently empty. DO NOT mention this to the user explicitly because they are already aware. If you are working on tasks that would benefit from a todo list please use the TodoWrite tool to create one. If not, please feel free to ignore. Again do not mention this message to the user.</system-reminder>",
"type": "text"
}, {
"text": "<system-reminder>\nyyy</system-reminder>",
"type": "text"
}, {
"cache_control": {
"type": "ephemeral"
},
"text": "你是谁",
"type": "text"
}],
"role": "user"
}],
"metadata": {
"user_id": "user_dd3c52c1d698a4486bdef490197846b7c1f7e553202dae5763f330c35aeb9823_account__session_b2e14122-0ac6-4959-9c5d-b49ae01ccb7c"
},
"model": "anthropic/claude-sonnet-4",
"stream": true,
"system": [{
"cache_control": {
"type": "ephemeral"
},
"text": "xxx",
"type": "text"
}, {
"cache_control": {
"type": "ephemeral"
},
"text": "yyy",
"type": "text"
}],
"temperature": 1,
"stream_options": {
"include_usage": true
}
}`
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
require.NoError(t, err)
// Parse the result to verify the conversion
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
// Verify basic fields are converted correctly
assert.Equal(t, "anthropic/claude-sonnet-4", openaiRequest.Model)
assert.Equal(t, true, openaiRequest.Stream)
assert.Equal(t, 1.0, openaiRequest.Temperature)
assert.Equal(t, 32000, openaiRequest.MaxTokens)
// Verify messages structure
require.Len(t, openaiRequest.Messages, 2)
// First message should be system message (converted from Claude's system field)
systemMsg := openaiRequest.Messages[0]
assert.Equal(t, roleSystem, systemMsg.Role)
assert.Equal(t, "xxx\nyyy", systemMsg.Content) // Claude system uses single \n
// Second message should be user message with merged text content
userMsg := openaiRequest.Messages[1]
assert.Equal(t, "user", userMsg.Role)
// The key fix: multiple text blocks should be merged into a single string
expectedContent := "<system-reminder>\nThis is a reminder that your todo list is currently empty. DO NOT mention this to the user explicitly because they are already aware. If you are working on tasks that would benefit from a todo list please use the TodoWrite tool to create one. If not, please feel free to ignore. Again do not mention this message to the user.</system-reminder>\n\n<system-reminder>\nyyy</system-reminder>\n\n你是谁"
assert.Equal(t, expectedContent, userMsg.Content)
})
t.Run("convert_mixed_content_with_image", func(t *testing.T) {
// Test case with mixed text and image content (should remain as array)
claudeRequest := `{
"model": "claude-3-sonnet-20240229",
"messages": [{
"role": "user",
"content": [{
"type": "text",
"text": "What's in this image?"
}, {
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
}
}]
}],
"max_tokens": 1000
}`
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
require.NoError(t, err)
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
// Should have one user message
require.Len(t, openaiRequest.Messages, 1)
userMsg := openaiRequest.Messages[0]
assert.Equal(t, "user", userMsg.Role)
// Content should be an array (mixed content) - after JSON marshaling/unmarshaling it becomes []interface{}
contentArray, ok := userMsg.Content.([]interface{})
require.True(t, ok, "Content should be an array for mixed content")
require.Len(t, contentArray, 2)
// First element should be text
firstElement, ok := contentArray[0].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, contentTypeText, firstElement["type"])
assert.Equal(t, "What's in this image?", firstElement["text"])
// Second element should be image
secondElement, ok := contentArray[1].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, contentTypeImageUrl, secondElement["type"])
assert.NotNil(t, secondElement["image_url"])
imageUrl, ok := secondElement["image_url"].(map[string]interface{})
require.True(t, ok)
assert.Contains(t, imageUrl["url"], "data:image/jpeg;base64,")
})
t.Run("convert_simple_string_content", func(t *testing.T) {
// Test case with simple string content
claudeRequest := `{
"model": "claude-3-sonnet-20240229",
"messages": [{
"role": "user",
"content": "Hello, how are you?"
}],
"max_tokens": 1000
}`
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
require.NoError(t, err)
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
require.Len(t, openaiRequest.Messages, 1)
userMsg := openaiRequest.Messages[0]
assert.Equal(t, "user", userMsg.Role)
assert.Equal(t, "Hello, how are you?", userMsg.Content)
})
t.Run("convert_empty_content_array", func(t *testing.T) {
// Test case with empty content array
claudeRequest := `{
"model": "claude-3-sonnet-20240229",
"messages": [{
"role": "user",
"content": []
}],
"max_tokens": 1000
}`
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
require.NoError(t, err)
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
require.Len(t, openaiRequest.Messages, 1)
userMsg := openaiRequest.Messages[0]
assert.Equal(t, "user", userMsg.Role)
// Empty array should result in empty array, not string - after JSON marshaling/unmarshaling becomes []interface{}
if userMsg.Content != nil {
contentArray, ok := userMsg.Content.([]interface{})
require.True(t, ok, "Empty content should be an array")
assert.Empty(t, contentArray)
} else {
// null is also acceptable for empty content
assert.Nil(t, userMsg.Content)
}
})
t.Run("convert_tool_use_to_tool_calls", func(t *testing.T) {
// Test Claude tool_use conversion to OpenAI tool_calls format
claudeRequest := `{
"model": "anthropic/claude-sonnet-4",
"messages": [{
"role": "assistant",
"content": [{
"type": "text",
"text": "I'll help you search for information."
}, {
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "web_search",
"input": {
"query": "Claude AI capabilities",
"max_results": 5
}
}]
}],
"max_tokens": 1000
}`
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
require.NoError(t, err)
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
// Should have one assistant message with tool_calls
require.Len(t, openaiRequest.Messages, 1)
assistantMsg := openaiRequest.Messages[0]
assert.Equal(t, "assistant", assistantMsg.Role)
assert.Equal(t, "I'll help you search for information.", assistantMsg.Content)
// Verify tool_calls format
require.NotNil(t, assistantMsg.ToolCalls)
require.Len(t, assistantMsg.ToolCalls, 1)
toolCall := assistantMsg.ToolCalls[0]
assert.Equal(t, "toolu_01D7FLrfh4GYq7yT1ULFeyMV", toolCall.Id)
assert.Equal(t, "function", toolCall.Type)
assert.Equal(t, "web_search", toolCall.Function.Name)
// Verify arguments are properly JSON encoded
var args map[string]interface{}
err = json.Unmarshal([]byte(toolCall.Function.Arguments), &args)
require.NoError(t, err)
assert.Equal(t, "Claude AI capabilities", args["query"])
assert.Equal(t, float64(5), args["max_results"])
})
t.Run("convert_tool_result_to_tool_message", func(t *testing.T) {
// Test Claude tool_result conversion to OpenAI tool message format
claudeRequest := `{
"model": "anthropic/claude-sonnet-4",
"messages": [{
"role": "user",
"content": [{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "Search results: Claude is an AI assistant created by Anthropic."
}]
}],
"max_tokens": 1000
}`
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
require.NoError(t, err)
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
// Should have one tool message
require.Len(t, openaiRequest.Messages, 1)
toolMsg := openaiRequest.Messages[0]
assert.Equal(t, "tool", toolMsg.Role)
assert.Equal(t, "Search results: Claude is an AI assistant created by Anthropic.", toolMsg.Content)
assert.Equal(t, "toolu_01D7FLrfh4GYq7yT1ULFeyMV", toolMsg.ToolCallId)
})
t.Run("convert_multiple_tool_calls", func(t *testing.T) {
// Test multiple tool_use in single message
claudeRequest := `{
"model": "anthropic/claude-sonnet-4",
"messages": [{
"role": "assistant",
"content": [{
"type": "tool_use",
"id": "toolu_search",
"name": "web_search",
"input": {"query": "weather"}
}, {
"type": "tool_use",
"id": "toolu_calc",
"name": "calculate",
"input": {"expression": "2+2"}
}]
}],
"max_tokens": 1000
}`
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
require.NoError(t, err)
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
// Should have one assistant message with multiple tool_calls
require.Len(t, openaiRequest.Messages, 1)
assistantMsg := openaiRequest.Messages[0]
assert.Equal(t, "assistant", assistantMsg.Role)
assert.Nil(t, assistantMsg.Content) // No text content, so should be null
// Verify multiple tool_calls
require.NotNil(t, assistantMsg.ToolCalls)
require.Len(t, assistantMsg.ToolCalls, 2)
// First tool call
assert.Equal(t, "toolu_search", assistantMsg.ToolCalls[0].Id)
assert.Equal(t, "web_search", assistantMsg.ToolCalls[0].Function.Name)
// Second tool call
assert.Equal(t, "toolu_calc", assistantMsg.ToolCalls[1].Id)
assert.Equal(t, "calculate", assistantMsg.ToolCalls[1].Function.Name)
})
t.Run("convert_multiple_tool_results", func(t *testing.T) {
// Test multiple tool_result messages
claudeRequest := `{
"model": "anthropic/claude-sonnet-4",
"messages": [{
"role": "user",
"content": [{
"type": "tool_result",
"tool_use_id": "toolu_search",
"content": "Weather: 25°C sunny"
}, {
"type": "tool_result",
"tool_use_id": "toolu_calc",
"content": "Result: 4"
}]
}],
"max_tokens": 1000
}`
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
require.NoError(t, err)
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
// Should have two tool messages
require.Len(t, openaiRequest.Messages, 2)
// First tool result
toolMsg1 := openaiRequest.Messages[0]
assert.Equal(t, "tool", toolMsg1.Role)
assert.Equal(t, "Weather: 25°C sunny", toolMsg1.Content)
assert.Equal(t, "toolu_search", toolMsg1.ToolCallId)
// Second tool result
toolMsg2 := openaiRequest.Messages[1]
assert.Equal(t, "tool", toolMsg2.Role)
assert.Equal(t, "Result: 4", toolMsg2.Content)
assert.Equal(t, "toolu_calc", toolMsg2.ToolCallId)
})
t.Run("convert_mixed_text_and_tool_use", func(t *testing.T) {
// Test message with both text and tool_use
claudeRequest := `{
"model": "anthropic/claude-sonnet-4",
"messages": [{
"role": "assistant",
"content": [{
"type": "text",
"text": "Let me search for that information and do a calculation."
}, {
"type": "tool_use",
"id": "toolu_search123",
"name": "search_database",
"input": {"table": "users", "limit": 10}
}]
}],
"max_tokens": 1000
}`
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
require.NoError(t, err)
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
// Should have one assistant message with both content and tool_calls
require.Len(t, openaiRequest.Messages, 1)
assistantMsg := openaiRequest.Messages[0]
assert.Equal(t, "assistant", assistantMsg.Role)
assert.Equal(t, "Let me search for that information and do a calculation.", assistantMsg.Content)
// Should have tool_calls
require.NotNil(t, assistantMsg.ToolCalls)
require.Len(t, assistantMsg.ToolCalls, 1)
assert.Equal(t, "toolu_search123", assistantMsg.ToolCalls[0].Id)
assert.Equal(t, "search_database", assistantMsg.ToolCalls[0].Function.Name)
})
}
func TestClaudeToOpenAIConverter_ConvertOpenAIResponseToClaude(t *testing.T) {
converter := &ClaudeToOpenAIConverter{}
t.Run("convert_tool_calls_response", func(t *testing.T) {
// Test OpenAI response with tool calls conversion to Claude format
openaiResponse := `{
"id": "gen-1756214072-tVFkPBV6lxee00IqNAC5",
"provider": "Google",
"model": "anthropic/claude-sonnet-4",
"object": "chat.completion",
"created": 1756214072,
"choices": [{
"logprobs": null,
"finish_reason": "tool_calls",
"native_finish_reason": "tool_calls",
"index": 0,
"message": {
"role": "assistant",
"content": "I'll analyze the README file to understand this project's purpose.",
"refusal": null,
"reasoning": null,
"tool_calls": [{
"id": "toolu_vrtx_017ijjgx8hpigatPzzPW59Wq",
"index": 0,
"type": "function",
"function": {
"name": "Read",
"arguments": "{\"file_path\": \"/Users/zhangty/git/higress/README.md\"}"
}
}]
}
}],
"usage": {
"prompt_tokens": 14923,
"completion_tokens": 81,
"total_tokens": 15004
}
}`
result, err := converter.ConvertOpenAIResponseToClaude(nil, []byte(openaiResponse))
require.NoError(t, err)
var claudeResponse claudeTextGenResponse
err = json.Unmarshal(result, &claudeResponse)
require.NoError(t, err)
// Verify basic response fields
assert.Equal(t, "gen-1756214072-tVFkPBV6lxee00IqNAC5", claudeResponse.Id)
assert.Equal(t, "message", claudeResponse.Type)
assert.Equal(t, "assistant", claudeResponse.Role)
assert.Equal(t, "anthropic/claude-sonnet-4", claudeResponse.Model)
assert.Equal(t, "tool_use", *claudeResponse.StopReason)
// Verify usage
assert.Equal(t, 14923, claudeResponse.Usage.InputTokens)
assert.Equal(t, 81, claudeResponse.Usage.OutputTokens)
// Verify content array has both text and tool_use
require.Len(t, claudeResponse.Content, 2)
// First content should be text
textContent := claudeResponse.Content[0]
assert.Equal(t, "text", textContent.Type)
assert.Equal(t, "I'll analyze the README file to understand this project's purpose.", textContent.Text)
// Second content should be tool_use
toolContent := claudeResponse.Content[1]
assert.Equal(t, "tool_use", toolContent.Type)
assert.Equal(t, "toolu_vrtx_017ijjgx8hpigatPzzPW59Wq", toolContent.Id)
assert.Equal(t, "Read", toolContent.Name)
// Verify tool arguments
require.NotNil(t, toolContent.Input)
assert.Equal(t, "/Users/zhangty/git/higress/README.md", toolContent.Input["file_path"])
})
}
func TestClaudeToOpenAIConverter_ConvertThinkingConfig(t *testing.T) {
converter := &ClaudeToOpenAIConverter{}
tests := []struct {
name string
claudeRequest string
expectedMaxTokens int
expectedEffort string
expectThinkingConfig bool
}{
{
name: "thinking_enabled_low",
claudeRequest: `{
"model": "claude-sonnet-4",
"max_tokens": 1000,
"messages": [{"role": "user", "content": "Hello"}],
"thinking": {"type": "enabled", "budget_tokens": 2048}
}`,
expectedMaxTokens: 2048,
expectedEffort: "low",
expectThinkingConfig: true,
},
{
name: "thinking_enabled_medium",
claudeRequest: `{
"model": "claude-sonnet-4",
"max_tokens": 1000,
"messages": [{"role": "user", "content": "Hello"}],
"thinking": {"type": "enabled", "budget_tokens": 8192}
}`,
expectedMaxTokens: 8192,
expectedEffort: "medium",
expectThinkingConfig: true,
},
{
name: "thinking_enabled_high",
claudeRequest: `{
"model": "claude-sonnet-4",
"max_tokens": 1000,
"messages": [{"role": "user", "content": "Hello"}],
"thinking": {"type": "enabled", "budget_tokens": 20480}
}`,
expectedMaxTokens: 20480,
expectedEffort: "high",
expectThinkingConfig: true,
},
{
name: "thinking_disabled",
claudeRequest: `{
"model": "claude-sonnet-4",
"max_tokens": 1000,
"messages": [{"role": "user", "content": "Hello"}],
"thinking": {"type": "disabled"}
}`,
expectedMaxTokens: 0,
expectedEffort: "",
expectThinkingConfig: false,
},
{
name: "no_thinking",
claudeRequest: `{
"model": "claude-sonnet-4",
"max_tokens": 1000,
"messages": [{"role": "user", "content": "Hello"}]
}`,
expectedMaxTokens: 0,
expectedEffort: "",
expectThinkingConfig: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(tt.claudeRequest))
assert.NoError(t, err)
assert.NotNil(t, result)
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
assert.NoError(t, err)
if tt.expectThinkingConfig {
assert.Equal(t, tt.expectedMaxTokens, openaiRequest.ReasoningMaxTokens)
assert.Equal(t, tt.expectedEffort, openaiRequest.ReasoningEffort)
} else {
assert.Equal(t, 0, openaiRequest.ReasoningMaxTokens)
assert.Equal(t, "", openaiRequest.ReasoningEffort)
}
})
}
}
func TestClaudeToOpenAIConverter_ConvertReasoningResponseToClaude(t *testing.T) {
converter := &ClaudeToOpenAIConverter{}
tests := []struct {
name string
openaiResponse string
expectThinking bool
expectedText string
}{
{
name: "response_with_reasoning_content",
openaiResponse: `{
"id": "chatcmpl-test123",
"object": "chat.completion",
"created": 1699999999,
"model": "gpt-4o",
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": "Based on my analysis, the answer is 42.",
"reasoning_content": "Let me think about this step by step:\n1. The question asks about the meaning of life\n2. According to Douglas Adams, the answer is 42\n3. Therefore, 42 is the correct answer"
},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 20,
"total_tokens": 30
}
}`,
expectThinking: true,
expectedText: "Based on my analysis, the answer is 42.",
},
{
name: "response_with_reasoning_field",
openaiResponse: `{
"id": "chatcmpl-test789",
"object": "chat.completion",
"created": 1699999999,
"model": "gpt-4o",
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": "Based on my analysis, the answer is 42.",
"reasoning": "Let me think about this step by step:\n1. The question asks about the meaning of life\n2. According to Douglas Adams, the answer is 42\n3. Therefore, 42 is the correct answer"
},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 20,
"total_tokens": 30
}
}`,
expectThinking: true,
expectedText: "Based on my analysis, the answer is 42.",
},
{
name: "response_without_reasoning_content",
openaiResponse: `{
"id": "chatcmpl-test456",
"object": "chat.completion",
"created": 1699999999,
"model": "gpt-4o",
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": "The answer is 42."
},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": 5,
"completion_tokens": 10,
"total_tokens": 15
}
}`,
expectThinking: false,
expectedText: "The answer is 42.",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := converter.ConvertOpenAIResponseToClaude(nil, []byte(tt.openaiResponse))
assert.NoError(t, err)
assert.NotNil(t, result)
var claudeResponse claudeTextGenResponse
err = json.Unmarshal(result, &claudeResponse)
assert.NoError(t, err)
// Verify response structure
assert.Equal(t, "message", claudeResponse.Type)
assert.Equal(t, "assistant", claudeResponse.Role)
assert.NotEmpty(t, claudeResponse.Id) // ID should be present
if tt.expectThinking {
// Should have both thinking and text content
assert.Len(t, claudeResponse.Content, 2)
// First should be thinking
thinkingContent := claudeResponse.Content[0]
assert.Equal(t, "thinking", thinkingContent.Type)
assert.Equal(t, "", thinkingContent.Signature) // OpenAI doesn't provide signature
assert.Contains(t, thinkingContent.Thinking, "Let me think about this step by step")
// Second should be text
textContent := claudeResponse.Content[1]
assert.Equal(t, "text", textContent.Type)
assert.Equal(t, tt.expectedText, textContent.Text)
} else {
// Should only have text content
assert.Len(t, claudeResponse.Content, 1)
textContent := claudeResponse.Content[0]
assert.Equal(t, "text", textContent.Type)
assert.Equal(t, tt.expectedText, textContent.Text)
}
})
}
}

View File

@@ -8,10 +8,10 @@ import (
"strings"
"github.com/alibaba/higress/plugins/wasm-go/extensions/ai-proxy/util"
"github.com/higress-group/wasm-go/pkg/log"
"github.com/higress-group/wasm-go/pkg/wrapper"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm/types"
"github.com/higress-group/wasm-go/pkg/log"
"github.com/higress-group/wasm-go/pkg/wrapper"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)

View File

@@ -29,7 +29,12 @@ const (
reasoningEndTag = "</think>"
)
type NonOpenAIStyleOptions struct {
ReasoningMaxTokens int `json:"reasoning_max_tokens,omitempty"`
}
type chatCompletionRequest struct {
NonOpenAIStyleOptions
Messages []chatMessage `json:"messages"`
Model string `json:"model"`
Store bool `json:"store,omitempty"`
@@ -169,7 +174,9 @@ type chatMessage struct {
Role string `json:"role,omitempty"`
Content any `json:"content,omitempty"`
ReasoningContent string `json:"reasoning_content,omitempty"`
Reasoning string `json:"reasoning,omitempty"` // For streaming responses
ToolCalls []toolCall `json:"tool_calls,omitempty"`
FunctionCall *functionCall `json:"function_call,omitempty"` // For legacy OpenAI format
Refusal string `json:"refusal,omitempty"`
ToolCallId string `json:"tool_call_id,omitempty"`
}

View File

@@ -0,0 +1,117 @@
package provider
import (
"errors"
"net/http"
"strings"
"github.com/alibaba/higress/plugins/wasm-go/extensions/ai-proxy/util"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm/types"
"github.com/higress-group/wasm-go/pkg/wrapper"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
// openrouterProvider is the provider for OpenRouter service.
const (
openrouterDomain = "openrouter.ai"
openrouterChatCompletionPath = "/api/v1/chat/completions"
openrouterCompletionPath = "/api/v1/completions"
)
type openrouterProviderInitializer struct{}
func (o *openrouterProviderInitializer) ValidateConfig(config *ProviderConfig) error {
if len(config.apiTokens) == 0 {
return errors.New("no apiToken found in provider config")
}
return nil
}
func (o *openrouterProviderInitializer) DefaultCapabilities() map[string]string {
return map[string]string{
string(ApiNameChatCompletion): openrouterChatCompletionPath,
string(ApiNameCompletion): openrouterCompletionPath,
}
}
func (o *openrouterProviderInitializer) CreateProvider(config ProviderConfig) (Provider, error) {
config.setDefaultCapabilities(o.DefaultCapabilities())
return &openrouterProvider{
config: config,
contextCache: createContextCache(&config),
}, nil
}
type openrouterProvider struct {
config ProviderConfig
contextCache *contextCache
}
func (o *openrouterProvider) GetProviderType() string {
return providerTypeOpenRouter
}
func (o *openrouterProvider) OnRequestHeaders(ctx wrapper.HttpContext, apiName ApiName) error {
o.config.handleRequestHeaders(o, ctx, apiName)
return nil
}
func (o *openrouterProvider) OnRequestBody(ctx wrapper.HttpContext, apiName ApiName, body []byte) (types.Action, error) {
if !o.config.isSupportedAPI(apiName) {
return types.ActionContinue, errUnsupportedApiName
}
return o.config.handleRequestBody(o, o.contextCache, ctx, apiName, body)
}
func (o *openrouterProvider) TransformRequestHeaders(ctx wrapper.HttpContext, apiName ApiName, headers http.Header) {
util.OverwriteRequestPathHeaderByCapability(headers, string(apiName), o.config.capabilities)
util.OverwriteRequestHostHeader(headers, openrouterDomain)
util.OverwriteRequestAuthorizationHeader(headers, "Bearer "+o.config.GetApiTokenInUse(ctx))
headers.Del("Content-Length")
}
func (o *openrouterProvider) TransformRequestBody(ctx wrapper.HttpContext, apiName ApiName, body []byte) ([]byte, error) {
if apiName != ApiNameChatCompletion {
return o.config.defaultTransformRequestBody(ctx, apiName, body)
}
// Check if ReasoningMaxTokens exists in the request body
reasoningMaxTokens := gjson.GetBytes(body, "reasoning_max_tokens")
if !reasoningMaxTokens.Exists() || reasoningMaxTokens.Int() == 0 {
// No reasoning_max_tokens, use default transformation
return o.config.defaultTransformRequestBody(ctx, apiName, body)
}
// Clear reasoning_effort field if it exists
modifiedBody, err := sjson.DeleteBytes(body, "reasoning_effort")
if err != nil {
// If delete fails, continue with original body
modifiedBody = body
}
// Set reasoning.max_tokens to the value of reasoning_max_tokens
modifiedBody, err = sjson.SetBytes(modifiedBody, "reasoning.max_tokens", reasoningMaxTokens.Int())
if err != nil {
return nil, err
}
// Remove the original reasoning_max_tokens field
modifiedBody, err = sjson.DeleteBytes(modifiedBody, "reasoning_max_tokens")
if err != nil {
return nil, err
}
// Apply default model mapping
return o.config.defaultTransformRequestBody(ctx, apiName, modifiedBody)
}
func (o *openrouterProvider) GetApiName(path string) ApiName {
if strings.Contains(path, openrouterChatCompletionPath) {
return ApiNameChatCompletion
}
if strings.Contains(path, openrouterCompletionPath) {
return ApiNameCompletion
}
return ""
}

View File

@@ -131,6 +131,7 @@ const (
providerTypeDify = "dify"
providerTypeBedrock = "bedrock"
providerTypeVertex = "vertex"
providerTypeOpenRouter = "openrouter"
protocolOpenAI = "openai"
protocolOriginal = "original"
@@ -209,6 +210,7 @@ var (
providerTypeDify: &difyProviderInitializer{},
providerTypeBedrock: &bedrockProviderInitializer{},
providerTypeVertex: &vertexProviderInitializer{},
providerTypeOpenRouter: &openrouterProviderInitializer{},
}
)