Compare commits

...

67 Commits

Author SHA1 Message Date
johnlanni
b446651dd3 Add release notes 2026-02-22 12:31:15 +00:00
github-actions[bot]
b3fb6324a4 Add release notes (#3524)
Co-authored-by: johnlanni <6763318+johnlanni@users.noreply.github.com>
2026-02-22 20:14:09 +08:00
澄潭
8576128e4c feat(ai-statistics): add Claude/Anthropic streaming tool calls parsing support (#3523) 2026-02-21 14:14:22 +08:00
澄潭
caa5317723 feat: share hub parameter between deployments and plugins with separate namespaces (#3521) 2026-02-20 23:30:48 +08:00
澄潭
093ef9a2c0 Update index.ts 2026-02-19 12:47:34 +08:00
澄潭
9346f1340b refactor: migrate MCP SDK to main repo (#3516) 2026-02-16 23:39:18 +08:00
澄潭
87c6cc9c9f Fix model pattern for Dashscope entry 2026-02-16 22:40:37 +08:00
澄潭
ac29ba6984 Rename qwen3-coder-plus to qwen3.5-plus 2026-02-16 22:24:58 +08:00
澄潭
1c847dd553 feat(ai-proxy): strip dynamic cch field from billing header to enable caching (#3518) 2026-02-15 23:57:08 +08:00
澄潭
a07f5024a9 fix(ai-proxy): convert OpenAI tool role to Claude user role with tool_result (#3517) 2026-02-15 22:45:09 +08:00
澄潭
814c3307ba fix(ai-statistics): lightweight mode should include question and model (#3513) 2026-02-15 20:49:57 +08:00
澄潭
b76a3aca5e feat(ai-statistics): add lightweight mode with use_default_response_attributes (#3512) 2026-02-15 17:23:54 +08:00
澄潭
28df33c596 feat(ai-statistics): add system field support for Claude /v1/messages API (#3511) 2026-02-15 14:16:19 +08:00
澄潭
8e7292c42e fix(ai-proxy): fix Claude protocol conversion issues (#3510) 2026-02-15 13:52:26 +08:00
澄潭
d03932b3ea fix(ai-proxy): add streaming tool_calls support for Claude provider (#3507) 2026-02-15 08:48:20 +08:00
澄潭
5a2ff8c836 fix(ai-proxy): convert Claude tool_use stop_reason to OpenAI tool_calls format (#3506) 2026-02-14 21:52:25 +08:00
澄潭
6f8ef2ff69 fix(ai-statistics): use RuleAppend as default for streaming answer extraction (#3505)这个 2026-02-14 13:58:55 +08:00
澄潭
67e2913f3d fix(ai-proxy): preserve text content alongside tool_result in Claude to OpenAI conversion (#3503) 2026-02-14 12:12:07 +08:00
澄潭
e996194228 fix(ai-proxy): add missing event field in Claude streaming response (#3502) 2026-02-14 09:55:14 +08:00
澄潭
95f86d7ab5 feat(ai-proxy): add consumer affinity for stateful APIs (#3499) 2026-02-14 09:22:12 +08:00
澄潭
5d5d20df1f fix(ci): pin GitHub Actions runner to ubuntu-22.04 (#3500) 2026-02-14 07:17:10 +08:00
澄潭
1ddc07992c Update index.ts 2026-02-13 21:41:19 +08:00
澄潭
13ed2284ae fix(ai-proxy): fix claude system content null serialization (#3496) 2026-02-13 20:32:37 +08:00
澄潭
f9c7527753 Update index.ts 2026-02-13 09:40:50 +08:00
澄潭
c2be0e8c9a fix(ai-statistics): add ValueSource to built-in attributes for streaming body buffering (#3491) 2026-02-13 09:03:06 +08:00
澄潭
927fb52309 Update sync-skills-to-oss.yaml 2026-02-13 00:01:32 +08:00
澄潭
c0761c4553 Update SKILL.md 2026-02-12 23:38:07 +08:00
澄潭
4f857597da docs: optimize provider list in OpenClaw integration skill (#3490) 2026-02-12 23:36:42 +08:00
澄潭
0d45ce755f feat(skill): add z.ai domain and code plan mode options (#3489) 2026-02-12 23:21:49 +08:00
澄潭
44d688a168 feat(ai-proxy): add zhipu provider enhancements (#3488) 2026-02-12 22:19:13 +08:00
澄潭
0d9354da16 fix(skill): correct model reference prefix for higress provider (#3485) 2026-02-12 19:52:51 +08:00
澄潭
65834bff21 fix(skill): update higress-openclaw-integration to use dedicated install directory (#3484) 2026-02-12 19:39:43 +08:00
澄潭
668c2b3669 Update SKILL.md 2026-02-12 18:35:24 +08:00
澄潭
ff4de901e7 Update SKILL.md 2026-02-12 18:28:29 +08:00
澄潭
a1967adb94 fix: use absolute path for packaging skills (#3483) 2026-02-12 18:23:16 +08:00
澄潭
f6cb3031fe feat: optimize skills packaging in OSS sync workflow (#3482) 2026-02-12 18:19:57 +08:00
澄潭
d4a0665957 feat: add GitHub Action to sync skills to OSS (#3481) 2026-02-12 18:15:03 +08:00
澄潭
2c7771da42 Update index.ts 2026-02-12 16:56:23 +08:00
澄潭
75c6fbe090 Rename plugin ID from 'higress-ai-gateway' to 'higress' 2026-02-12 16:50:51 +08:00
澄潭
b153d08610 Update README.md 2026-02-12 16:50:17 +08:00
澄潭
de633d8610 Rename plugin ID from 'higress-ai-gateway' to 'higress' 2026-02-12 16:48:41 +08:00
澄潭
f2e4942f00 Update package.json 2026-02-12 16:48:26 +08:00
澄潭
1b3a8b762b docs: improve OpenClaw integration prompt for configuration updates (#3480) 2026-02-12 16:41:31 +08:00
澄潭
c885b89d03 Update SKILL.md 2026-02-12 16:24:01 +08:00
澄潭
ce4dff9887 feat(ai-proxy): convert developer role to system for unsupported providers (#3479) 2026-02-12 16:14:46 +08:00
澄潭
6935a44d53 docs: mark OpenClaw commands as interactive in SKILL.md (#3478) 2026-02-12 15:49:13 +08:00
澄潭
b33e2be5e9 Update SKILL.md 2026-02-12 15:43:01 +08:00
澄潭
d2385f1b30 fix: remove duplicate /v1 path in OpenClaw plugin baseUrl (#3477) 2026-02-12 15:40:01 +08:00
澄潭
ef5e3ee31b Update index.ts 2026-02-12 15:26:55 +08:00
澄潭
d2b0885236 Update index.ts 2026-02-12 15:07:59 +08:00
澄潭
6cb48247fd Delete compatibility information from README.md
Removed compatibility section for OpenClaw and Higress AI Gateway.
2026-02-12 14:42:26 +08:00
澄潭
773f639260 Update SKILL.md 2026-02-12 14:41:18 +08:00
澄潭
fe58ce3943 Update SKILL.md 2026-02-12 14:37:29 +08:00
澄潭
0dbc056ce9 docs: improve higress-openclaw-integration skill for better usability (#3476) 2026-02-12 14:34:03 +08:00
github-actions[bot]
3bf39b60ea Add release notes (#3468)
Co-authored-by: johnlanni <6763318+johnlanni@users.noreply.github.com>
2026-02-12 14:02:50 +08:00
澄潭
e9bb5d3255 refactor: rename skill to higress-openclaw-integration and update model configs (#3475) 2026-02-12 14:02:29 +08:00
澄潭
1f10cc293f chore: update higress-console helm dependency to 2.2.0 (#3472) 2026-02-11 17:46:53 +08:00
Kent Dong
22ae1aaf69 fix: Fix the incorrect api-version appending logic in AzureProvider (#3289) 2026-02-11 17:44:29 +08:00
Kent Dong
cd0a6116ce fix: Fix jwt-auth plugin related typos (#3291) 2026-02-11 17:43:49 +08:00
Kent Dong
de50630680 doc: Add more related repositories to README files (#3293) 2026-02-11 17:43:39 +08:00
EndlessSeeker
b3f5d42210 fix: helm pull old image tag (#3471) 2026-02-11 17:32:32 +08:00
woody
5e2892f18c fix(provider/bedrock.go): 优化工具调用消息处理逻辑 || fix(provider/bedrock.go): Optimization tool calls message processing logic (#3470) 2026-02-11 12:33:12 +08:00
澄潭
0cc92aa6b8 fix: update golang.org/x/net to v0.47.0 for hgctl build (#3469) 2026-02-10 23:21:24 +08:00
澄潭
3ac11743d6 Release 2.2.0 (#3457)
Co-authored-by: EndlessSeeker <153817598+EndlessSeeker@users.noreply.github.com>
Co-authored-by: jingze <daijingze.djz@alibaba-inc.com>
2026-02-10 21:33:23 +08:00
澄潭
cd670e957f refactor(ai-proxy): remove automatic Bash tool injection in Claude Code mode (#3462) 2026-02-07 20:24:43 +08:00
澄潭
92ece2c86d docs: add Claude Code mode to higress-clawdbot-integration skill (#3461) 2026-02-07 17:00:19 +08:00
澄潭
083bae0e73 feat(ai-proxy): add Claude Code mode support for Claude provider (#3459) 2026-02-07 15:57:19 +08:00
143 changed files with 18478 additions and 4665 deletions

View File

@@ -1,431 +0,0 @@
---
name: higress-clawdbot-integration
description: "Deploy and configure Higress AI Gateway for Clawdbot/OpenClaw integration. Use when: (1) User wants to deploy Higress AI Gateway, (2) User wants to configure Clawdbot/OpenClaw to use Higress as a model provider, (3) User mentions 'higress', 'ai gateway', 'model gateway', 'AI网关', (4) User wants to set up model routing or auto-routing, (5) User needs to manage LLM provider API keys, (6) User wants to track token usage and conversation history."
---
# Higress AI Gateway Integration
Deploy and configure Higress AI Gateway for Clawdbot/OpenClaw integration with one-click deployment, model provider configuration, auto-routing, and session monitoring.
## Prerequisites
- Docker installed and running
- Internet access to download the setup script
- LLM provider API keys (at least one)
## Workflow
### Step 1: Download Setup Script
Download the official get-ai-gateway.sh script:
```bash
curl -fsSL https://raw.githubusercontent.com/higress-group/higress-standalone/main/all-in-one/get-ai-gateway.sh -o get-ai-gateway.sh
chmod +x get-ai-gateway.sh
```
### Step 2: Gather Configuration
Ask the user for:
1. **LLM Provider API Keys** (at least one required):
**Top Commonly Used Providers:**
- Aliyun Dashscope (Qwen): `--dashscope-key`
- DeepSeek: `--deepseek-key`
- Moonshot (Kimi): `--moonshot-key`
- Zhipu AI: `--zhipuai-key`
- Minimax: `--minimax-key`
- Azure OpenAI: `--azure-key`
- AWS Bedrock: `--bedrock-key`
- Google Vertex AI: `--vertex-key`
- OpenAI: `--openai-key`
- OpenRouter: `--openrouter-key`
- Grok: `--grok-key`
See CLI Parameters Reference for complete list with model pattern options.
2. **Port Configuration** (optional):
- HTTP port: `--http-port` (default: 8080)
- HTTPS port: `--https-port` (default: 8443)
- Console port: `--console-port` (default: 8001)
3. **Auto-routing** (optional):
- Enable: `--auto-routing`
- Default model: `--auto-routing-default-model`
### Step 3: Run Setup Script
Run the script in non-interactive mode with gathered parameters:
```bash
./get-ai-gateway.sh start --non-interactive \
--dashscope-key sk-xxx \
--openai-key sk-xxx \
--auto-routing \
--auto-routing-default-model qwen-turbo
```
**Automatic Repository Selection:**
The script automatically detects your timezone and selects the geographically closest registry for both:
- **Container image** (`IMAGE_REPO`)
- **WASM plugins** (`PLUGIN_REGISTRY`)
| Region | Timezone Examples | Selected Registry |
|--------|------------------|-------------------|
| China & nearby | Asia/Shanghai, Asia/Hong_Kong, etc. | `higress-registry.cn-hangzhou.cr.aliyuncs.com` |
| Southeast Asia | Asia/Singapore, Asia/Jakarta, etc. | `higress-registry.ap-southeast-7.cr.aliyuncs.com` |
| North America | America/*, US/*, Canada/* | `higress-registry.us-west-1.cr.aliyuncs.com` |
| Others | Default fallback | `higress-registry.cn-hangzhou.cr.aliyuncs.com` |
**Manual Override (optional):**
If you want to use a specific registry:
```bash
IMAGE_REPO="higress-registry.ap-southeast-7.cr.aliyuncs.com/higress/all-in-one" \
PLUGIN_REGISTRY="higress-registry.ap-southeast-7.cr.aliyuncs.com" \
./get-ai-gateway.sh start --non-interactive \
--dashscope-key sk-xxx \
--openai-key sk-xxx
```
### Step 4: Verify Deployment
After script completion:
1. Check container is running:
```bash
docker ps --filter "name=higress-ai-gateway"
```
2. Test the gateway endpoint:
```bash
curl http://localhost:8080/v1/models
```
3. Access the console (optional):
```
http://localhost:8001
```
### Step 5: Configure Clawdbot/OpenClaw Plugin
If the user wants to use Higress with Clawdbot/OpenClaw, install the appropriate plugin:
#### Automatic Installation
Detect runtime and install the correct plugin version:
```bash
# Detect which runtime is installed
if command -v clawdbot &> /dev/null; then
RUNTIME="clawdbot"
RUNTIME_DIR="$HOME/.clawdbot"
PLUGIN_SRC="scripts/plugin-clawdbot"
elif command -v openclaw &> /dev/null; then
RUNTIME="openclaw"
RUNTIME_DIR="$HOME/.openclaw"
PLUGIN_SRC="scripts/plugin"
else
echo "Error: Neither clawdbot nor openclaw is installed"
exit 1
fi
# Install the plugin
PLUGIN_DEST="$RUNTIME_DIR/extensions/higress-ai-gateway"
echo "Installing Higress AI Gateway plugin for $RUNTIME..."
mkdir -p "$(dirname "$PLUGIN_DEST")"
[ -d "$PLUGIN_DEST" ] && rm -rf "$PLUGIN_DEST"
cp -r "$PLUGIN_SRC" "$PLUGIN_DEST"
echo "✓ Plugin installed at: $PLUGIN_DEST"
# Configure provider
echo
echo "Configuring provider..."
$RUNTIME models auth login --provider higress
```
The plugin will guide you through an interactive setup for:
1. Gateway URL (default: `http://localhost:8080`)
2. Console URL (default: `http://localhost:8001`)
3. API Key (optional for local deployments)
4. Model list (auto-detected or manually specified)
5. Auto-routing default model (if using `higress/auto`)
### Step 6: Manage API Keys (optional)
After deployment, manage API keys without redeploying:
```bash
# View configured API keys
./get-ai-gateway.sh config list
# Add or update an API key (hot-reload, no restart needed)
./get-ai-gateway.sh config add --provider <provider> --key <api-key>
# Remove an API key (hot-reload, no restart needed)
./get-ai-gateway.sh config remove --provider <provider>
```
**Note:** Changes take effect immediately via hot-reload. No container restart required.
## CLI Parameters Reference
### Basic Options
| Parameter | Description | Default |
|-----------|-------------|---------|
| `--non-interactive` | Run without prompts | - |
| `--http-port` | Gateway HTTP port | 8080 |
| `--https-port` | Gateway HTTPS port | 8443 |
| `--console-port` | Console port | 8001 |
| `--container-name` | Container name | higress-ai-gateway |
| `--data-folder` | Data folder path | ./higress |
| `--auto-routing` | Enable auto-routing feature | - |
| `--auto-routing-default-model` | Default model when no rule matches | - |
### Environment Variables
| Variable | Description | Default |
|----------|-------------|---------|
| `PLUGIN_REGISTRY` | Registry URL for container images and WASM plugins (auto-selected based on timezone) | `higress-registry.cn-hangzhou.cr.aliyuncs.com` |
**Auto-Selection Logic:**
The registry is automatically selected based on your timezone:
- **China & nearby** (Asia/Shanghai, etc.) → `higress-registry.cn-hangzhou.cr.aliyuncs.com`
- **Southeast Asia** (Asia/Singapore, etc.) → `higress-registry.ap-southeast-7.cr.aliyuncs.com`
- **North America** (America/*, etc.) → `higress-registry.us-west-1.cr.aliyuncs.com`
- **Others** → `higress-registry.cn-hangzhou.cr.aliyuncs.com` (default)
Both container images and WASM plugins use the same registry for consistency.
**Manual Override:**
```bash
PLUGIN_REGISTRY="higress-registry.ap-southeast-7.cr.aliyuncs.com" \
./get-ai-gateway.sh start --non-interactive ...
```
### LLM Provider API Keys
**Top Providers:**
| Parameter | Provider |
|-----------|----------|
| `--dashscope-key` | Aliyun Dashscope (Qwen) |
| `--deepseek-key` | DeepSeek |
| `--moonshot-key` | Moonshot (Kimi) |
| `--zhipuai-key` | Zhipu AI |
| `--openai-key` | OpenAI |
| `--openrouter-key` | OpenRouter |
| `--claude-key` | Claude |
| `--gemini-key` | Google Gemini |
| `--groq-key` | Groq |
**Additional Providers:**
`--doubao-key`, `--baichuan-key`, `--yi-key`, `--stepfun-key`, `--minimax-key`, `--cohere-key`, `--mistral-key`, `--github-key`, `--fireworks-key`, `--togetherai-key`, `--grok-key`, `--azure-key`, `--bedrock-key`, `--vertex-key`
## Managing Configuration
### API Keys
```bash
# List all configured API keys
./get-ai-gateway.sh config list
# Add or update an API key (hot-reload)
./get-ai-gateway.sh config add --provider deepseek --key sk-xxx
# Remove an API key (hot-reload)
./get-ai-gateway.sh config remove --provider deepseek
```
**Supported provider aliases:**
`dashscope`/`qwen`, `moonshot`/`kimi`, `zhipuai`/`zhipu`, `togetherai`/`together`
### Routing Rules
```bash
# Add a routing rule
./get-ai-gateway.sh route add --model claude-opus-4.5 --trigger "深入思考|deep thinking"
# List all rules
./get-ai-gateway.sh route list
# Remove a rule
./get-ai-gateway.sh route remove --rule-id 0
```
See [higress-auto-router](../higress-auto-router/SKILL.md) for detailed documentation.
## Access Logs
Gateway access logs are available at:
```
$DATA_FOLDER/logs/access.log
```
These logs can be used with the **agent-session-monitor** skill for token tracking and conversation analysis.
## Related Skills
- **higress-auto-router**: Configure automatic model routing using CLI commands
See: [higress-auto-router](../higress-auto-router/SKILL.md)
- **agent-session-monitor**: Monitor and track token usage across sessions
See: [agent-session-monitor](../agent-session-monitor/SKILL.md)
## Examples
### Example 1: Basic Deployment with Dashscope
**User:** 帮我部署一个Higress AI网关使用阿里云的通义千问
**Steps:**
1. Download script
2. Get Dashscope API key from user
3. Run (script auto-detects timezone and selects optimal registry):
```bash
./get-ai-gateway.sh start --non-interactive \
--dashscope-key sk-xxx
```
**Response:**
```
Auto-detected timezone: Asia/Shanghai
Selected plugin registry: higress-registry.cn-hangzhou.cr.aliyuncs.com
✅ Higress AI Gateway 部署完成!
网关地址: http://localhost:8080/v1/chat/completions
控制台: http://localhost:8001
日志目录: ./higress/logs
已配置的模型提供商:
- Aliyun Dashscope (Qwen)
测试命令:
curl 'http://localhost:8080/v1/chat/completions' \
-H 'Content-Type: application/json' \
-d '{"model": "qwen-turbo", "messages": [{"role": "user", "content": "Hello!"}]}'
```
### Example 2: Full Integration with Clawdbot
**User:** 完整配置Higress和Clawdbot的集成
**Steps:**
1. Deploy Higress AI Gateway (auto-detects timezone)
2. Install and configure Clawdbot plugin
3. Enable auto-routing
4. Set up session monitoring
**Response:**
```
Auto-detected timezone: Asia/Shanghai
Selected plugin registry: higress-registry.cn-hangzhou.cr.aliyuncs.com
✅ Higress AI Gateway 集成完成!
1. 网关已部署:
- HTTP: http://localhost:8080
- Console: http://localhost:8001
- 容器镜像: Hangzhou (自动选择)
- 插件镜像: Hangzhou (自动选择)
2. Clawdbot 插件配置:
Plugin installed at: /root/.clawdbot/extensions/higress-ai-gateway
Run: clawdbot models auth login --provider higress
3. 自动路由:
已启用,使用 model="higress/auto"
4. 会话监控:
日志路径: ./higress/logs/access.log
需要我帮你配置自动路由规则吗?
```
### Example 3: Manage API Keys
**User:** 帮我查看当前配置的API keys并添加一个DeepSeek的key
**Steps:**
1. List current API keys:
```bash
./get-ai-gateway.sh config list
```
2. Add DeepSeek API key:
```bash
./get-ai-gateway.sh config add --provider deepseek --key sk-xxx
```
**Response:**
```
当前配置的API keys:
Aliyun Dashscope (Qwen): sk-ab***ef12
OpenAI: sk-cd***gh34
Adding API key for DeepSeek...
✅ API key updated successfully!
Provider: DeepSeek
Key: sk-xx***yy56
Configuration has been hot-reloaded (no restart needed).
```
### Example 4: North America Deployment
**User:** 帮我部署Higress AI网关
**Context:** User's timezone is America/Los_Angeles
**Steps:**
1. Download script
2. Get API keys from user
3. Run (script auto-detects timezone and selects North America mirror):
```bash
./get-ai-gateway.sh start --non-interactive \
--openai-key sk-xxx \
--openrouter-key sk-xxx
```
**Response:**
```
Auto-detected timezone: America/Los_Angeles
Selected plugin registry: higress-registry.us-west-1.cr.aliyuncs.com
✅ Higress AI Gateway 部署完成!
网关地址: http://localhost:8080/v1/chat/completions
控制台: http://localhost:8001
日志目录: ./higress/logs
镜像优化:
- 容器镜像: North America (基于时区自动选择)
- 插件镜像: North America (基于时区自动选择)
已配置的模型提供商:
- OpenAI
- OpenRouter
```
## Troubleshooting
For detailed troubleshooting guides, see [TROUBLESHOOTING.md](references/TROUBLESHOOTING.md).
Common issues:
- **Container fails to start**: Check Docker status, port availability, and container logs
- **"too many open files" error**: Increase `fs.inotify.max_user_instances` to 8192
- **Gateway not responding**: Verify container status and port mapping
- **Plugin not recognized**: Check installation path and restart runtime
- **Auto-routing not working**: Verify model list and routing rules
- **Timezone detection fails**: Manually set `IMAGE_REPO` environment variable

View File

@@ -1,79 +0,0 @@
# Higress AI Gateway Plugin (Clawdbot)
Clawdbot model provider plugin for Higress AI Gateway with auto-routing support.
## What is this?
This is a TypeScript-based provider plugin that enables Clawdbot to use Higress AI Gateway as a model provider. It provides:
- **Auto-routing support**: Use `higress/auto` to intelligently route requests based on message content
- **Dynamic model discovery**: Auto-detect available models from Higress Console
- **Smart URL handling**: Automatic URL normalization and validation
- **Flexible authentication**: Support for both local and remote gateway deployments
## Files
- **index.ts**: Main plugin implementation
- **package.json**: NPM package metadata and Clawdbot extension declaration
- **clawdbot.plugin.json**: Plugin manifest for Clawdbot
## Installation
This plugin is automatically installed when you use the `higress-clawdbot-integration` skill. See the parent SKILL.md for complete installation instructions.
### Manual Installation
If you need to install manually:
```bash
# Copy plugin files
mkdir -p "$HOME/.clawdbot/extensions/higress-ai-gateway"
cp -r ./* "$HOME/.clawdbot/extensions/higress-ai-gateway/"
# Configure provider
clawdbot models auth login --provider higress
```
## Usage
After installation, configure Higress as a model provider:
```bash
clawdbot models auth login --provider higress
```
The plugin will prompt for:
1. Gateway URL (default: http://localhost:8080)
2. Console URL (default: http://localhost:8001)
3. API Key (optional for local deployments)
4. Model list (auto-detected or manually specified)
5. Auto-routing default model (if using higress/auto)
## Auto-routing
To use auto-routing, include `higress/auto` in your model list during configuration. Then use it in your conversations:
```bash
# Use auto-routing
clawdbot chat --model higress/auto "深入思考 这个问题应该怎么解决?"
# The gateway will automatically route to the appropriate model based on:
# - Message content triggers (configured via higress-auto-router skill)
# - Fallback to default model if no rule matches
```
## Related Resources
- **Parent Skill**: [higress-clawdbot-integration](../SKILL.md)
- **Auto-routing Configuration**: [higress-auto-router](../../higress-auto-router/SKILL.md)
- **Session Monitoring**: [agent-session-monitor](../../agent-session-monitor/SKILL.md)
- **Higress AI Gateway**: https://github.com/higress-group/higress-standalone
## Compatibility
- **Clawdbot**: v2.0.0+
- **Higress AI Gateway**: All versions
## License
Apache-2.0

View File

@@ -1,10 +0,0 @@
{
"id": "higress-ai-gateway",
"name": "Higress AI Gateway",
"description": "Model provider plugin for Higress AI Gateway with auto-routing support",
"providers": ["higress"],
"configSchema": {
"type": "object",
"additionalProperties": true
}
}

View File

@@ -1,284 +0,0 @@
import { emptyPluginConfigSchema } from "clawdbot/plugin-sdk";
const DEFAULT_GATEWAY_URL = "http://localhost:8080";
const DEFAULT_CONSOLE_URL = "http://localhost:8001";
const DEFAULT_CONTEXT_WINDOW = 128_000;
const DEFAULT_MAX_TOKENS = 8192;
// Common models that Higress AI Gateway typically supports
const DEFAULT_MODEL_IDS = [
// Auto-routing special model
"higress/auto",
// OpenAI models
"gpt-5.2",
"gpt-5-mini",
"gpt-5-nano",
// Anthropic models
"claude-opus-4.5",
"claude-sonnet-4.5",
"claude-haiku-4.5",
// Qwen models
"qwen3-turbo",
"qwen3-plus",
"qwen3-max",
"qwen3-coder-480b-a35b-instruct",
// DeepSeek models
"deepseek-chat",
"deepseek-reasoner",
// Other common models
"kimi-k2.5",
"glm-4.7",
"MiniMax-M2.1",
] as const;
function normalizeBaseUrl(value: string): string {
const trimmed = value.trim();
if (!trimmed) return DEFAULT_GATEWAY_URL;
let normalized = trimmed;
while (normalized.endsWith("/")) normalized = normalized.slice(0, -1);
if (!normalized.endsWith("/v1")) normalized = `${normalized}/v1`;
return normalized;
}
function validateUrl(value: string): string | undefined {
const normalized = normalizeBaseUrl(value);
try {
new URL(normalized);
} catch {
return "Enter a valid URL";
}
return undefined;
}
function parseModelIds(input: string): string[] {
const parsed = input
.split(/[\n,]/)
.map((model) => model.trim())
.filter(Boolean);
return Array.from(new Set(parsed));
}
function buildModelDefinition(modelId: string) {
const isAutoModel = modelId === "higress/auto";
return {
id: modelId,
name: isAutoModel ? "Higress Auto Router" : modelId,
api: "openai-completions",
reasoning: false,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: DEFAULT_CONTEXT_WINDOW,
maxTokens: DEFAULT_MAX_TOKENS,
};
}
async function testGatewayConnection(gatewayUrl: string): Promise<boolean> {
try {
const response = await fetch(`${gatewayUrl}/v1/models`, {
method: "GET",
headers: { "Content-Type": "application/json" },
signal: AbortSignal.timeout(5000),
});
return response.ok || response.status === 401; // 401 means gateway is up but needs auth
} catch {
return false;
}
}
async function fetchAvailableModels(consoleUrl: string): Promise<string[]> {
try {
// Try to get models from Higress Console API
const response = await fetch(`${consoleUrl}/v1/ai/routes`, {
method: "GET",
headers: { "Content-Type": "application/json" },
signal: AbortSignal.timeout(5000),
});
if (response.ok) {
const data = (await response.json()) as { data?: { model?: string }[] };
if (data.data && Array.isArray(data.data)) {
return data.data
.map((route: { model?: string }) => route.model)
.filter((m): m is string => typeof m === "string");
}
}
} catch {
// Ignore errors, use defaults
}
return [];
}
const higressPlugin = {
id: "higress-ai-gateway",
name: "Higress AI Gateway",
description: "Model provider plugin for Higress AI Gateway with auto-routing support",
configSchema: emptyPluginConfigSchema(),
register(api) {
api.registerProvider({
id: "higress",
label: "Higress AI Gateway",
docsPath: "/providers/models",
aliases: ["higress-gateway", "higress-ai"],
auth: [
{
id: "api-key",
label: "API Key",
hint: "Configure Higress AI Gateway endpoint with optional API key",
kind: "custom",
run: async (ctx) => {
// Step 1: Get Gateway URL
const gatewayUrlInput = await ctx.prompter.text({
message: "Higress AI Gateway URL",
initialValue: DEFAULT_GATEWAY_URL,
validate: validateUrl,
});
const gatewayUrl = normalizeBaseUrl(gatewayUrlInput);
// Step 2: Get Console URL (for auto-router configuration)
const consoleUrlInput = await ctx.prompter.text({
message: "Higress Console URL (for auto-router config)",
initialValue: DEFAULT_CONSOLE_URL,
validate: validateUrl,
});
const consoleUrl = normalizeBaseUrl(consoleUrlInput);
// Step 3: Test connection (create a new spinner)
const spin = ctx.prompter.progress("Testing gateway connection…");
const isConnected = await testGatewayConnection(gatewayUrl);
if (!isConnected) {
spin.stop("Gateway connection failed");
await ctx.prompter.note(
[
"Could not connect to Higress AI Gateway.",
"Make sure the gateway is running and the URL is correct.",
"",
`Tried: ${gatewayUrl}/v1/models`,
].join("\n"),
"Connection Warning",
);
} else {
spin.stop("Gateway connected");
}
// Step 4: Get API Key (optional for local gateway)
const apiKeyInput = await ctx.prompter.text({
message: "API Key (leave empty if not required)",
initialValue: "",
}) || '';
const apiKey = apiKeyInput.trim() || "higress-local";
// Step 5: Fetch available models (create a new spinner)
const spin2 = ctx.prompter.progress("Fetching available models…");
const fetchedModels = await fetchAvailableModels(consoleUrl);
const defaultModels = fetchedModels.length > 0
? ["higress/auto", ...fetchedModels]
: DEFAULT_MODEL_IDS;
spin2.stop();
// Step 6: Let user customize model list
const modelInput = await ctx.prompter.text({
message: "Model IDs (comma-separated, higress/auto enables auto-routing)",
initialValue: defaultModels.slice(0, 10).join(", "),
validate: (value) =>
parseModelIds(value).length > 0 ? undefined : "Enter at least one model id",
});
const modelIds = parseModelIds(modelInput);
const hasAutoModel = modelIds.includes("higress/auto");
// FIX: Avoid double prefix - if modelId already starts with provider, don't add prefix again
const defaultModelId = hasAutoModel
? "higress/auto"
: (modelIds[0] ?? "qwen-turbo");
const defaultModelRef = defaultModelId.startsWith("higress/")
? defaultModelId
: `higress/${defaultModelId}`;
// Step 7: Configure default model for auto-routing
let autoRoutingDefaultModel = "qwen-turbo";
if (hasAutoModel) {
const autoRoutingModelInput = await ctx.prompter.text({
message: "Default model for auto-routing (when no rule matches)",
initialValue: "qwen-turbo",
});
autoRoutingDefaultModel = autoRoutingModelInput.trim(); // FIX: Add trim() here
}
return {
profiles: [
{
profileId: `higress:${apiKey === "higress-local" ? "local" : "default"}`,
credential: {
type: "token",
provider: "higress",
token: apiKey,
},
},
],
configPatch: {
models: {
providers: {
higress: {
baseUrl: `${gatewayUrl}/v1`,
apiKey: apiKey,
api: "openai-completions",
authHeader: apiKey !== "higress-local",
models: modelIds.map((modelId) => buildModelDefinition(modelId)),
},
},
},
agents: {
defaults: {
models: Object.fromEntries(
modelIds.map((modelId) => {
// FIX: Avoid double prefix - only add provider prefix if not already present
const modelRef = modelId.startsWith("higress/")
? modelId
: `higress/${modelId}`;
return [modelRef, {}];
}),
),
},
},
plugins: {
entries: {
"higress-ai-gateway": {
enabled: true,
config: {
gatewayUrl,
consoleUrl,
autoRoutingDefaultModel,
},
},
},
},
},
defaultModel: defaultModelRef,
notes: [
"Higress AI Gateway is now configured as a model provider.",
hasAutoModel
? `Auto-routing enabled: use model "higress/auto" to route based on message content.`
: "Add 'higress/auto' to models to enable auto-routing.",
`Gateway endpoint: ${gatewayUrl}/v1/chat/completions`,
`Console: ${consoleUrl}`,
"",
"🎯 Recommended Skills (install via Clawdbot conversation):",
"",
"1. Auto-Routing Skill:",
" Configure automatic model routing based on message content",
" https://github.com/alibaba/higress/tree/main/.claude/skills/higress-auto-router",
' Say: "Install higress-auto-router skill"',
"",
"2. Agent Session Monitor Skill:",
" Track token usage and monitor conversation history",
" https://github.com/alibaba/higress/tree/main/.claude/skills/agent-session-monitor",
' Say: "Install agent-session-monitor skill"',
],
};
},
},
],
});
},
};
export default higressPlugin;

View File

@@ -1,22 +0,0 @@
{
"name": "@higress/higress-ai-gateway",
"version": "1.0.0",
"description": "Higress AI Gateway model provider plugin for Clawdbot with auto-routing support",
"main": "index.ts",
"clawdbot": {
"extensions": ["./index.ts"]
},
"keywords": [
"clawdbot",
"higress",
"ai-gateway",
"model-router",
"auto-routing"
],
"author": "Higress Team",
"license": "Apache-2.0",
"repository": {
"type": "git",
"url": "https://github.com/alibaba/higress"
}
}

View File

@@ -1,92 +0,0 @@
# Higress AI Gateway Plugin
OpenClaw/Clawdbot model provider plugin for Higress AI Gateway with auto-routing support.
## What is this?
This is a TypeScript-based provider plugin that enables Clawdbot and OpenClaw to use Higress AI Gateway as a model provider. It provides:
- **Auto-routing support**: Use `higress/auto` to intelligently route requests based on message content
- **Dynamic model discovery**: Auto-detect available models from Higress Console
- **Smart URL handling**: Automatic URL normalization and validation
- **Flexible authentication**: Support for both local and remote gateway deployments
## Files
- **index.ts**: Main plugin implementation
- **package.json**: NPM package metadata and OpenClaw extension declaration
- **openclaw.plugin.json**: Plugin manifest for OpenClaw
## Installation
This plugin is automatically installed when you use the `higress-clawdbot-integration` skill. See the parent SKILL.md for complete installation instructions.
### Manual Installation
If you need to install manually:
```bash
# Detect runtime
if command -v clawdbot &> /dev/null; then
RUNTIME_DIR="$HOME/.clawdbot"
elif command -v openclaw &> /dev/null; then
RUNTIME_DIR="$HOME/.openclaw"
else
echo "Error: Neither clawdbot nor openclaw is installed"
exit 1
fi
# Copy plugin files
mkdir -p "$RUNTIME_DIR/extensions/higress-ai-gateway"
cp -r ./* "$RUNTIME_DIR/extensions/higress-ai-gateway/"
# Configure provider
clawdbot models auth login --provider higress
# or
openclaw models auth login --provider higress
```
## Usage
After installation, configure Higress as a model provider:
```bash
clawdbot models auth login --provider higress
```
The plugin will prompt for:
1. Gateway URL (default: http://localhost:8080)
2. Console URL (default: http://localhost:8001)
3. API Key (optional for local deployments)
4. Model list (auto-detected or manually specified)
5. Auto-routing default model (if using higress/auto)
## Auto-routing
To use auto-routing, include `higress/auto` in your model list during configuration. Then use it in your conversations:
```bash
# Use auto-routing
clawdbot chat --model higress/auto "深入思考 这个问题应该怎么解决?"
# The gateway will automatically route to the appropriate model based on:
# - Message content triggers (configured via higress-auto-router skill)
# - Fallback to default model if no rule matches
```
## Related Resources
- **Parent Skill**: [higress-clawdbot-integration](../SKILL.md)
- **Auto-routing Configuration**: [higress-auto-router](../../higress-auto-router/SKILL.md)
- **Session Monitoring**: [agent-session-monitor](../../agent-session-monitor/SKILL.md)
- **Higress AI Gateway**: https://github.com/higress-group/higress-standalone
## Compatibility
- **OpenClaw**: v2.0.0+
- **Clawdbot**: v2.0.0+
- **Higress AI Gateway**: All versions
## License
Apache-2.0

View File

@@ -0,0 +1,259 @@
---
name: higress-openclaw-integration
description: "Deploy and configure Higress AI Gateway for OpenClaw integration. Use when: (1) User wants to deploy Higress AI Gateway, (2) User wants to configure OpenClaw to use more model providers, (3) User mentions 'higress', 'ai gateway', 'model gateway', 'AI网关', (4) User wants to set up model routing or auto-routing, (5) User needs to manage LLM provider API keys."
---
# Higress AI Gateway Integration
Deploy Higress AI Gateway and configure OpenClaw to use it as a unified model provider.
## Quick Start
### Step 1: Collect Information from User
**Ask the user for the following information upfront:**
1. **Which LLM provider(s) to use?** (at least one required)
**Commonly Used Providers:**
| Provider | Parameter | Notes |
|----------|-----------|-------|
| 智谱 / z.ai | `--zhipuai-key` | Models: glm-*, Code Plan mode enabled by default |
| Claude Code | `--claude-code-key` | **Requires OAuth token from `claude setup-token`** |
| Moonshot (Kimi) | `--moonshot-key` | Models: moonshot-*, kimi-* |
| Minimax | `--minimax-key` | Models: abab-* |
| 阿里云通义千问 (Dashscope) | `--dashscope-key` | Models: qwen* |
| OpenAI | `--openai-key` | Models: gpt-*, o1-*, o3-* |
| DeepSeek | `--deepseek-key` | Models: deepseep-* |
| Grok | `--grok-key` | Models: grok-* |
**Other Providers:**
| Provider | Parameter | Notes |
|----------|-----------|-------|
| Claude | `--claude-key` | Models: claude-* |
| Google Gemini | `--gemini-key` | Models: gemini-* |
| OpenRouter | `--openrouter-key` | Supports all models (catch-all) |
| Groq | `--groq-key` | Fast inference |
| Doubao (豆包) | `--doubao-key` | Models: doubao-* |
| Mistral | `--mistral-key` | Models: mistral-* |
| Baichuan (百川) | `--baichuan-key` | Models: Baichuan* |
| 01.AI (Yi) | `--yi-key` | Models: yi-* |
| Stepfun (阶跃星辰) | `--stepfun-key` | Models: step-* |
| Cohere | `--cohere-key` | Models: command* |
| Fireworks AI | `--fireworks-key` | - |
| Together AI | `--togetherai-key` | - |
| GitHub Models | `--github-key` | - |
**Cloud Providers (require additional config):**
- Azure OpenAI: `--azure-key` (requires service URL)
- AWS Bedrock: `--bedrock-key` (requires region and access key)
- Google Vertex AI: `--vertex-key` (requires project ID and region)
**Brand Name Display (z.ai / 智谱):**
- If user communicates in Chinese: display as "智谱"
- If user communicates in English: display as "z.ai"
2. **Enable auto-routing?** (recommended)
- If yes: `--auto-routing --auto-routing-default-model <model-name>`
- Auto-routing allows using `model="higress/auto"` to automatically route requests based on message content
3. **Custom ports?** (optional, defaults: HTTP=8080, HTTPS=8443, Console=8001)
### Step 2: Deploy Gateway
**Auto-detect region for z.ai / 智谱 domain configuration:**
When user selects z.ai / 智谱 provider, detect their region:
```bash
# Run region detection script (scripts/detect-region.sh relative to skill directory)
REGION=$(bash scripts/detect-region.sh)
# Output: "china" or "international"
```
**Based on detection result:**
- If `REGION="china"`: use default domain `open.bigmodel.cn`, no extra parameter needed
- If `REGION="international"`: automatically add `--zhipuai-domain api.z.ai` to deployment command
**After deployment (for international users):**
Notify user in English: "The z.ai endpoint domain has been set to api.z.ai. If you want to change it, let me know and I can update the configuration."
```bash
# Create installation directory
mkdir -p higress-install
cd higress-install
# Download script (if not exists)
curl -fsSL https://higress.ai/ai-gateway/install.sh -o get-ai-gateway.sh
chmod +x get-ai-gateway.sh
# Deploy with user's configuration
# For z.ai / 智谱: always include --zhipuai-code-plan-mode
# For non-China users: include --zhipuai-domain api.z.ai
./get-ai-gateway.sh start --non-interactive \
--<provider>-key <api-key> \
[--auto-routing --auto-routing-default-model <model>]
```
**z.ai / 智谱 Options:**
| Option | Description |
|--------|-------------|
| `--zhipuai-code-plan-mode` | Enable Code Plan mode (enabled by default) |
| `--zhipuai-domain <domain>` | Custom domain, default: `open.bigmodel.cn` (China), `api.z.ai` (international) |
**Example (China user):**
```bash
./get-ai-gateway.sh start --non-interactive \
--zhipuai-key sk-xxx \
--zhipuai-code-plan-mode \
--auto-routing \
--auto-routing-default-model glm-5
```
**Example (International user):**
```bash
./get-ai-gateway.sh start --non-interactive \
--zhipuai-key sk-xxx \
--zhipuai-domain api.z.ai \
--zhipuai-code-plan-mode \
--auto-routing \
--auto-routing-default-model glm-5
```
### Step 3: Install OpenClaw Plugin
Install the Higress provider plugin for OpenClaw:
```bash
# Copy plugin files (PLUGIN_SRC is relative to skill directory: scripts/plugin)
PLUGIN_SRC="scripts/plugin"
PLUGIN_DEST="$HOME/.openclaw/extensions/higress"
mkdir -p "$PLUGIN_DEST"
cp -r "$PLUGIN_SRC"/* "$PLUGIN_DEST/"
```
**Tell user to run the following commands manually in their terminal (interactive commands, cannot be executed by AI agent):**
```bash
# Step 1: Enable the plugin
openclaw plugins enable higress
# Step 2: Configure provider (interactive - will prompt for Gateway URL, API Key, models, etc.)
openclaw models auth login --provider higress --set-default
# Step 3: Restart OpenClaw gateway to apply changes
openclaw gateway restart
```
The `openclaw models auth login` command will interactively prompt for:
1. Gateway URL (default: `http://localhost:8080`)
2. Console URL (default: `http://localhost:8001`)
3. API Key (optional for local deployments)
4. Model list (auto-detected or manually specified)
5. Auto-routing default model (if using `higress/auto`)
After configuration and restart, Higress models are available in OpenClaw with `higress/` prefix (e.g., `higress/glm-5`, `higress/auto`).
**Future Configuration Updates (No Restart Needed)**
After the initial setup, you can manage your configuration through conversation with OpenClaw:
- **Add New Providers**: Add new LLM providers (e.g., DeepSeek, OpenAI, Claude) and their models dynamically.
- **Update API Keys**: Update existing provider API keys without service restart.
- **Configure Auto-routing**: If you've set up multiple models, ask OpenClaw to configure auto-routing rules. Requests will be intelligently routed based on your message content, using the most suitable model automatically.
All configuration changes are hot-loaded through Higress — no `openclaw gateway restart` required. Iterate on your model provider setup dynamically without service interruption!
## Post-Deployment Management
### Add/Update API Keys (Hot-reload)
```bash
./get-ai-gateway.sh config add --provider <provider> --key <api-key>
./get-ai-gateway.sh config list
./get-ai-gateway.sh config remove --provider <provider>
```
Provider aliases: `dashscope`/`qwen`, `moonshot`/`kimi`, `zhipuai`/`zhipu`
### Update z.ai Domain (Hot-reload)
If user wants to change the z.ai domain after deployment:
```bash
# Update domain configuration
./get-ai-gateway.sh config add --provider zhipuai --extra-config "zhipuDomain=api.z.ai"
# Or revert to China endpoint
./get-ai-gateway.sh config add --provider zhipuai --extra-config "zhipuDomain=open.bigmodel.cn"
```
### Add Routing Rules (for auto-routing)
```bash
# Add rule: route to specific model when message starts with trigger
./get-ai-gateway.sh route add --model <model> --trigger "keyword1|keyword2"
# Examples
./get-ai-gateway.sh route add --model glm-4-flash --trigger "quick|fast"
./get-ai-gateway.sh route add --model claude-opus-4 --trigger "think|complex"
./get-ai-gateway.sh route add --model deepseek-coder --trigger "code|debug"
# List/remove rules
./get-ai-gateway.sh route list
./get-ai-gateway.sh route remove --rule-id 0
```
### Stop/Delete Gateway
```bash
./get-ai-gateway.sh stop
./get-ai-gateway.sh delete
```
## Endpoints
| Endpoint | URL |
|----------|-----|
| Chat Completions | http://localhost:8080/v1/chat/completions |
| Console | http://localhost:8001 |
| Logs | `./higress-install/logs/access.log` |
## Testing
```bash
# Test with specific model
curl 'http://localhost:8080/v1/chat/completions' \
-H 'Content-Type: application/json' \
-d '{"model": "<model-name>", "messages": [{"role": "user", "content": "Hello"}]}'
# Test auto-routing (if enabled)
curl 'http://localhost:8080/v1/chat/completions' \
-H 'Content-Type: application/json' \
-d '{"model": "higress/auto", "messages": [{"role": "user", "content": "What is AI?"}]}'
```
## Troubleshooting
| Issue | Solution |
|-------|----------|
| Container fails to start | Check `docker logs higress-ai-gateway` |
| Port already in use | Use `--http-port`, `--console-port` to change ports |
| API key error | Run `./get-ai-gateway.sh config list` to verify keys |
| Auto-routing not working | Ensure `--auto-routing` was set during deployment |
| Slow image download | Script auto-selects nearest registry based on timezone |
## Important Notes
1. **Claude Code Mode**: Requires OAuth token from `claude setup-token` command, not a regular API key
2. **z.ai Code Plan Mode**: Enabled by default, uses `/api/coding/paas/v4/chat/completions` endpoint, optimized for coding tasks
3. **z.ai Domain Selection**:
- China users: `open.bigmodel.cn` (default)
- International users: `api.z.ai` (auto-detected based on timezone)
- Users can update domain anytime after deployment
4. **Auto-routing**: Must be enabled during initial deployment (`--auto-routing`); routing rules can be added later
5. **OpenClaw Integration**: The `openclaw models auth login` and `openclaw gateway restart` commands are **interactive** and must be run by the user manually in their terminal
6. **Hot-reload**: API key changes take effect immediately; no container restart needed

View File

@@ -0,0 +1,15 @@
#!/bin/bash
# Detect if user is in China region based on timezone
# Returns: "china" or "international"
TIMEZONE=$(cat /etc/timezone 2>/dev/null || timedatectl show --property=Timezone --value 2>/dev/null || echo "Unknown")
# Check if timezone indicates China region (including Hong Kong)
if [[ "$TIMEZONE" == "Asia/Shanghai" ]] || \
[[ "$TIMEZONE" == "Asia/Hong_Kong" ]] || \
[[ "$TIMEZONE" == *"China"* ]] || \
[[ "$TIMEZONE" == *"Beijing"* ]]; then
echo "china"
else
echo "international"
fi

View File

@@ -0,0 +1,61 @@
# Higress AI Gateway Plugin
OpenClaw model provider plugin for Higress AI Gateway with auto-routing support.
## What is this?
This is a TypeScript-based provider plugin that enables OpenClaw to use Higress AI Gateway as a model provider. It provides:
- **Auto-routing support**: Use `higress/auto` to intelligently route requests based on message content
- **Dynamic model discovery**: Auto-detect available models from Higress Console
- **Smart URL handling**: Automatic URL normalization and validation
- **Flexible authentication**: Support for both local and remote gateway deployments
## Files
- **index.ts**: Main plugin implementation
- **package.json**: NPM package metadata and OpenClaw extension declaration
- **openclaw.plugin.json**: Plugin manifest for OpenClaw
## Installation
This plugin is automatically installed when you use the `higress-openclaw-integration` skill. See parent SKILL.md for complete installation instructions.
### Manual Installation
If you need to install manually:
```bash
# Copy plugin files
mkdir -p "$HOME/.openclaw/extensions/higress"
cp -r ./* "$HOME/.openclaw/extensions/higress/"
# Configure provider
openclaw plugins enable higress
openclaw models auth login --provider higress
```
## Usage
After installation, configure Higress as a model provider:
```bash
openclaw models auth login --provider higress
```
The plugin will prompt for:
1. Gateway URL (default: http://localhost:8080)
2. Console URL (default: http://localhost:8001)
3. API Key (optional for local deployments)
4. Model list (auto-detected or manually specified)
5. Auto-routing default model (if using higress/auto)
## Related Resources
- **Parent Skill**: [higress-openclaw-integration](../SKILL.md)
- **Auto-routing Configuration**: [higress-auto-router](../../higress-auto-router/SKILL.md)
## License
Apache-2.0

View File

@@ -2,33 +2,47 @@ import { emptyPluginConfigSchema } from "openclaw/plugin-sdk";
const DEFAULT_GATEWAY_URL = "http://localhost:8080";
const DEFAULT_CONSOLE_URL = "http://localhost:8001";
const DEFAULT_CONTEXT_WINDOW = 128_000;
const DEFAULT_MAX_TOKENS = 8192;
// Model-specific context window and max tokens configurations
const MODEL_CONFIG: Record<string, { contextWindow: number; maxTokens: number }> = {
"gpt-5.3-codex": { contextWindow: 400_000, maxTokens: 128_000 },
"gpt-5-mini": { contextWindow: 400_000, maxTokens: 128_000 },
"gpt-5-nano": { contextWindow: 400_000, maxTokens: 128_000 },
"claude-opus-4-6": { contextWindow: 1_000_000, maxTokens: 128_000 },
"claude-sonnet-4-6": { contextWindow: 1_000_000, maxTokens: 64_000 },
"claude-haiku-4-5": { contextWindow: 200_000, maxTokens: 64_000 },
"qwen3.5-plus": { contextWindow: 960_000, maxTokens: 64_000 },
"deepseek-chat": { contextWindow: 256_000, maxTokens: 128_000 },
"deepseek-reasoner": { contextWindow: 256_000, maxTokens: 128_000 },
"kimi-k2.5": { contextWindow: 256_000, maxTokens: 128_000 },
"glm-5": { contextWindow: 200_000, maxTokens: 128_000 },
"MiniMax-M2.5": { contextWindow: 200_000, maxTokens: 128_000 },
};
// Default values for unknown models
const DEFAULT_CONTEXT_WINDOW = 200_000;
const DEFAULT_MAX_TOKENS = 128_000;
// Common models that Higress AI Gateway typically supports
const DEFAULT_MODEL_IDS = [
// Auto-routing special model
"higress/auto",
// Commonly models
"kimi-k2.5",
"glm-5",
"MiniMax-M2.5",
"qwen3.5-plus",
// Anthropic models
"claude-opus-4-6",
"claude-sonnet-4-6",
"claude-haiku-4-5",
// OpenAI models
"gpt-5.2",
"gpt-5.3-codex",
"gpt-5-mini",
"gpt-5-nano",
// Anthropic models
"claude-opus-4.5",
"claude-sonnet-4.5",
"claude-haiku-4.5",
// Qwen models
"qwen3-turbo",
"qwen3-plus",
"qwen3-max",
"qwen3-coder-480b-a35b-instruct",
// DeepSeek models
"deepseek-chat",
"deepseek-reasoner",
// Other common models
"kimi-k2.5",
"glm-4.7",
"MiniMax-M2.1",
"deepseek-reasoner",
] as const;
function normalizeBaseUrl(value: string): string {
@@ -60,26 +74,33 @@ function parseModelIds(input: string): string[] {
function buildModelDefinition(modelId: string) {
const isAutoModel = modelId === "higress/auto";
const config = MODEL_CONFIG[modelId] || { contextWindow: DEFAULT_CONTEXT_WINDOW, maxTokens: DEFAULT_MAX_TOKENS };
return {
id: modelId,
name: isAutoModel ? "Higress Auto Router" : modelId,
api: "openai-completions",
reasoning: false,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: DEFAULT_CONTEXT_WINDOW,
maxTokens: DEFAULT_MAX_TOKENS,
contextWindow: config.contextWindow,
maxTokens: config.maxTokens,
};
}
async function testGatewayConnection(gatewayUrl: string): Promise<boolean> {
try {
const response = await fetch(`${gatewayUrl}/v1/models`, {
method: "GET",
// gatewayUrl already ends with /v1 from normalizeBaseUrl()
// Use chat/completions endpoint with empty body to test connection
// Higress doesn't support /models endpoint
const response = await fetch(`${gatewayUrl}/chat/completions`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({}),
signal: AbortSignal.timeout(5000),
});
return response.ok || response.status === 401; // 401 means gateway is up but needs auth
// Any response (including 400/401/422) means gateway is reachable
return true;
} catch {
return false;
}
@@ -108,7 +129,7 @@ async function fetchAvailableModels(consoleUrl: string): Promise<string[]> {
}
const higressPlugin = {
id: "higress-ai-gateway",
id: "higress",
name: "Higress AI Gateway",
description: "Model provider plugin for Higress AI Gateway with auto-routing support",
configSchema: emptyPluginConfigSchema(),
@@ -150,8 +171,6 @@ const higressPlugin = {
[
"Could not connect to Higress AI Gateway.",
"Make sure the gateway is running and the URL is correct.",
"",
`Tried: ${gatewayUrl}/v1/models`,
].join("\n"),
"Connection Warning",
);
@@ -184,21 +203,19 @@ const higressPlugin = {
const modelIds = parseModelIds(modelInput);
const hasAutoModel = modelIds.includes("higress/auto");
// FIX: Avoid double prefix - if modelId already starts with provider, don't add prefix again
const defaultModelId = hasAutoModel
? "higress/auto"
: (modelIds[0] ?? "qwen-turbo");
const defaultModelRef = defaultModelId.startsWith("higress/")
? defaultModelId
: `higress/${defaultModelId}`;
// Always add higress/ provider prefix to create model reference
const defaultModelId = hasAutoModel
? "higress/auto"
: (modelIds[0] ?? "glm-5");
const defaultModelRef = `higress/${defaultModelId}`;
// Step 7: Configure default model for auto-routing
let autoRoutingDefaultModel = "qwen-turbo";
let autoRoutingDefaultModel = "glm-5";
if (hasAutoModel) {
const autoRoutingModelInput = await ctx.prompter.text({
message: "Default model for auto-routing (when no rule matches)",
initialValue: "qwen-turbo",
initialValue: "glm-5",
});
autoRoutingDefaultModel = autoRoutingModelInput.trim(); // FIX: Add trim() here
}
@@ -218,7 +235,8 @@ const higressPlugin = {
models: {
providers: {
higress: {
baseUrl: `${gatewayUrl}/v1`,
// gatewayUrl already ends with /v1 from normalizeBaseUrl()
baseUrl: gatewayUrl,
apiKey: apiKey,
api: "openai-completions",
authHeader: apiKey !== "higress-local",
@@ -230,10 +248,8 @@ const higressPlugin = {
defaults: {
models: Object.fromEntries(
modelIds.map((modelId) => {
// FIX: Avoid double prefix - only add provider prefix if not already present
const modelRef = modelId.startsWith("higress/")
? modelId
: `higress/${modelId}`;
// Always add higress/ provider prefix to create model reference
const modelRef = `higress/${modelId}`;
return [modelRef, {}];
}),
),
@@ -241,7 +257,7 @@ const higressPlugin = {
},
plugins: {
entries: {
"higress-ai-gateway": {
"higress": {
enabled: true,
config: {
gatewayUrl,
@@ -258,20 +274,22 @@ const higressPlugin = {
hasAutoModel
? `Auto-routing enabled: use model "higress/auto" to route based on message content.`
: "Add 'higress/auto' to models to enable auto-routing.",
`Gateway endpoint: ${gatewayUrl}/v1/chat/completions`,
// gatewayUrl already ends with /v1 from normalizeBaseUrl()
`Gateway endpoint: ${gatewayUrl}/chat/completions`,
`Console: ${consoleUrl}`,
"",
"🎯 Recommended Skills (install via Clawdbot conversation):",
"💡 Future Configuration Updates (No Restart Needed):",
" • Add New Providers: Add LLM providers (DeepSeek, OpenAI, Claude, etc.) dynamically.",
" • Update API Keys: Update existing provider keys without restart.",
" • Configure Auto-Routing: Ask OpenClaw to set up intelligent routing rules.",
" All changes hot-load via Higress — no gateway restart required!",
"",
"🎯 Recommended Skills (install via OpenClaw conversation):",
"",
"1. Auto-Routing Skill:",
" Configure automatic model routing based on message content",
" https://github.com/alibaba/higress/tree/main/.claude/skills/higress-auto-router",
' Say: "Install higress-auto-router skill"',
"",
"2. Agent Session Monitor Skill:",
" Track token usage and monitor conversation history",
" https://github.com/alibaba/higress/tree/main/.claude/skills/agent-session-monitor",
' Say: "Install agent-session-monitor skill"',
],
};
},

View File

@@ -1,5 +1,5 @@
{
"id": "higress-ai-gateway",
"id": "higress",
"name": "Higress AI Gateway",
"description": "Model provider plugin for Higress AI Gateway with auto-routing support",
"providers": ["higress"],

View File

@@ -1,5 +1,5 @@
{
"name": "@higress/higress-ai-gateway",
"name": "@higress/higress",
"version": "1.0.0",
"description": "Higress AI Gateway model provider plugin for OpenClaw with auto-routing support",
"main": "index.ts",

View File

@@ -19,7 +19,7 @@ on:
jobs:
lint:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
@@ -30,7 +30,7 @@ jobs:
# - run: make lint
higress-wasmplugin-test:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
strategy:
matrix:
# TODO(Xunzhuo): Enable C WASM Filters in CI
@@ -38,6 +38,18 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Disable containerd image store
run: |
sudo bash -c 'cat > /etc/docker/daemon.json << EOF
{
"features": {
"containerd-snapshotter": false
}
}
EOF'
sudo systemctl restart docker
docker info -f '{{ .DriverStatus }}'
- name: Free Up GitHub Actions Ubuntu Runner Disk Space 🔧
uses: jlumbroso/free-disk-space@main
with:
@@ -79,7 +91,7 @@ jobs:
command: GOPROXY="https://proxy.golang.org,direct" PLUGIN_TYPE=${{ matrix.wasmPluginType }} make higress-wasmplugin-test
publish:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
needs: [higress-wasmplugin-test]
steps:
- uses: actions/checkout@v4

View File

@@ -10,7 +10,7 @@ env:
GO_VERSION: 1.24
jobs:
lint:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
@@ -21,7 +21,7 @@ jobs:
# - run: make lint
coverage-test:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
@@ -57,7 +57,7 @@ jobs:
build:
# The type of runner that the job will run on
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
needs: [lint, coverage-test]
steps:
- name: "Checkout ${{ github.ref }}"
@@ -91,17 +91,29 @@ jobs:
path: out/
gateway-conformance-test:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
needs: [build]
steps:
- uses: actions/checkout@v3
higress-conformance-test:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
needs: [build]
steps:
- uses: actions/checkout@v4
- name: Disable containerd image store
run: |
sudo bash -c 'cat > /etc/docker/daemon.json << EOF
{
"features": {
"containerd-snapshotter": false
}
}
EOF'
sudo systemctl restart docker
docker info -f '{{ .DriverStatus }}'
- name: Free Up GitHub Actions Ubuntu Runner Disk Space 🔧
uses: jlumbroso/free-disk-space@main
with:
@@ -139,7 +151,7 @@ jobs:
run: GOPROXY="https://proxy.golang.org,direct" make higress-conformance-test
publish:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
needs: [higress-conformance-test, gateway-conformance-test]
steps:
- uses: actions/checkout@v4

View File

@@ -0,0 +1,50 @@
name: Sync Skills to OSS
on:
push:
branches:
- main
paths:
- '.claude/skills/**'
workflow_dispatch: ~
jobs:
sync-skills-to-oss:
runs-on: ubuntu-latest
environment:
name: oss
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download AI Gateway Install Script
run: |
wget -O install.sh https://raw.githubusercontent.com/higress-group/higress-standalone/main/all-in-one/get-ai-gateway.sh
chmod +x install.sh
- name: Package Skills
run: |
mkdir -p packaged-skills
for skill_dir in .claude/skills/*/; do
if [ -d "$skill_dir" ]; then
skill_name=$(basename "$skill_dir")
echo "Packaging $skill_name..."
(cd "$skill_dir" && zip -r "$GITHUB_WORKSPACE/packaged-skills/${skill_name}.zip" .)
fi
done
- name: Sync Skills to OSS
uses: go-choppy/ossutil-github-action@master
with:
ossArgs: 'cp -r -u packaged-skills/ oss://higress-ai/skills/'
accessKey: ${{ secrets.ACCESS_KEYID }}
accessSecret: ${{ secrets.ACCESS_KEYSECRET }}
endpoint: oss-cn-hongkong.aliyuncs.com
- name: Sync Install Script to OSS
uses: go-choppy/ossutil-github-action@master
with:
ossArgs: 'cp -u install.sh oss://higress-ai/ai-gateway/install.sh'
accessKey: ${{ secrets.ACCESS_KEYID }}
accessSecret: ${{ secrets.ACCESS_KEYSECRET }}
endpoint: oss-cn-hongkong.aliyuncs.com

6
.gitmodules vendored
View File

@@ -21,15 +21,15 @@
[submodule "istio/proxy"]
path = istio/proxy
url = https://github.com/higress-group/proxy
branch = istio-1.19
branch = envoy-1.36
shallow = true
[submodule "envoy/go-control-plane"]
path = envoy/go-control-plane
url = https://github.com/higress-group/go-control-plane
branch = istio-1.27
branch = envoy-1.36
shallow = true
[submodule "envoy/envoy"]
path = envoy/envoy
url = https://github.com/higress-group/envoy
branch = envoy-1.27
branch = envoy-1.36
shallow = true

View File

@@ -146,7 +146,7 @@ docker-buildx-push: clean-env docker.higress-buildx
export PARENT_GIT_TAG:=$(shell cat VERSION)
export PARENT_GIT_REVISION:=$(TAG)
export ENVOY_PACKAGE_URL_PATTERN?=https://github.com/higress-group/proxy/releases/download/v2.2.0/envoy-symbol-ARCH.tar.gz
export ENVOY_PACKAGE_URL_PATTERN?=https://github.com/higress-group/proxy/releases/download/v2.2.1/envoy-symbol-ARCH.tar.gz
build-envoy: prebuild
./tools/hack/build-envoy.sh
@@ -200,8 +200,8 @@ install: pre-install
helm install higress helm/higress -n higress-system --create-namespace --set 'global.local=true'
HIGRESS_LATEST_IMAGE_TAG ?= latest
ENVOY_LATEST_IMAGE_TAG ?= cdf0f16bf622102f89a0d0257834f43f502e4b99
ISTIO_LATEST_IMAGE_TAG ?= a7525f292c38d7d3380f3ce7ee971ad6e3c46adf
ENVOY_LATEST_IMAGE_TAG ?= ca6ff3a92e3fa592bff706894b22e0509a69757b
ISTIO_LATEST_IMAGE_TAG ?= c482b42b9a14885bd6692c6abd01345d50a372f7
install-dev: pre-install
helm install higress helm/core -n higress-system --create-namespace --set 'controller.tag=$(TAG)' --set 'gateway.replicas=1' --set 'pilot.tag=$(ISTIO_LATEST_IMAGE_TAG)' --set 'gateway.tag=$(ENVOY_LATEST_IMAGE_TAG)' --set 'global.local=true'

View File

@@ -86,6 +86,18 @@ Port descriptions:
>
> **Southeast Asia**: `higress-registry.ap-southeast-7.cr.aliyuncs.com`
> **For Kubernetes deployments**, you can configure the `global.hub` parameter in Helm values to use a mirror registry closer to your region. This applies to both Higress component images and built-in Wasm plugin images:
>
> ```bash
> # Example: Using North America mirror
> helm install higress -n higress-system higress.io/higress --set global.hub=higress-registry.us-west-1.cr.aliyuncs.com --create-namespace
> ```
>
> Available mirror registries:
> - **China (Hangzhou)**: `higress-registry.cn-hangzhou.cr.aliyuncs.com` (default)
> - **North America**: `higress-registry.us-west-1.cr.aliyuncs.com`
> - **Southeast Asia**: `higress-registry.ap-southeast-7.cr.aliyuncs.com`
For other installation methods such as Helm deployment under K8s, please refer to the official [Quick Start documentation](https://higress.io/en-us/docs/user/quickstart).
If you are deploying on the cloud, it is recommended to use the [Enterprise Edition](https://www.aliyun.com/product/apigateway?spm=higress-github.topbar.0.0.0)
@@ -182,6 +194,8 @@ Higress would not be possible without the valuable open-source work of projects
- Higress Console: https://github.com/higress-group/higress-console
- Higress Standalone: https://github.com/higress-group/higress-standalone
- Higress Plugin Serverhttps://github.com/higress-group/plugin-server
- Higress Wasm Plugin Golang SDKhttps://github.com/higress-group/wasm-go
### Contributors

View File

@@ -208,6 +208,8 @@ WeChat公式アカウント
- Higressコンソールhttps://github.com/higress-group/higress-console
- Higressスタンドアロン版https://github.com/higress-group/higress-standalone
- Higress Plugin Serverhttps://github.com/higress-group/plugin-server
- Higress Wasm Plugin Golang SDKhttps://github.com/higress-group/wasm-go
### 貢献者

View File

@@ -80,6 +80,24 @@ docker run -d --rm --name higress-ai -v ${PWD}:/data \
**Higress 的所有 Docker 镜像都一直使用自己独享的仓库,不受 Docker Hub 境内访问受限的影响**
> 如果从 `higress-registry.cn-hangzhou.cr.aliyuncs.com` 拉取镜像超时,可以尝试使用以下镜像加速源:
>
> **北美**: `higress-registry.us-west-1.cr.aliyuncs.com`
>
> **东南亚**: `higress-registry.ap-southeast-7.cr.aliyuncs.com`
> **K8s 部署时**,可以通过 Helm values 配置 `global.hub` 参数来使用距离部署区域更近的镜像仓库,该参数会同时应用于 Higress 组件镜像和内置 Wasm 插件镜像:
>
> ```bash
> # 示例:使用北美镜像源
> helm install higress -n higress-system higress.io/higress --set global.hub=higress-registry.us-west-1.cr.aliyuncs.com --create-namespace
> ```
>
> 可用镜像仓库:
> - **中国(杭州)**: `higress-registry.cn-hangzhou.cr.aliyuncs.com`(默认)
> - **北美**: `higress-registry.us-west-1.cr.aliyuncs.com`
> - **东南亚**: `higress-registry.ap-southeast-7.cr.aliyuncs.com`
K8s 下使用 Helm 部署等其他安装方式可以参考官网 [Quick Start 文档](https://higress.cn/docs/latest/user/quickstart/)。
如果您是在云上部署,推荐使用[企业版](https://www.aliyun.com/product/apigateway?spm=higress-github.topbar.0.0.0)
@@ -221,6 +239,8 @@ K8s 下使用 Helm 部署等其他安装方式可以参考官网 [Quick Start
- Higress 控制台https://github.com/higress-group/higress-console
- Higress独立运行版https://github.com/higress-group/higress-standalone
- Higress 插件服务器https://github.com/higress-group/plugin-server
- Higress Wasm 插件 Golang SDKhttps://github.com/higress-group/wasm-go
### 贡献者

View File

@@ -1 +1 @@
v2.1.9
v2.2.0

45
go.mod
View File

@@ -20,7 +20,7 @@ require (
github.com/caddyserver/certmagic v0.21.3
github.com/dubbogo/go-zookeeper v1.0.4-0.20211212162352-f9d2183d89d5
github.com/dubbogo/gost v1.13.1
github.com/envoyproxy/go-control-plane/envoy v1.35.0
github.com/envoyproxy/go-control-plane/envoy v1.36.0
github.com/go-errors/errors v1.5.1
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.4
@@ -38,10 +38,10 @@ require (
github.com/tidwall/gjson v1.17.0
go.uber.org/atomic v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/net v0.44.0
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4
google.golang.org/grpc v1.76.0
google.golang.org/protobuf v1.36.10
golang.org/x/net v0.47.0
google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda
google.golang.org/grpc v1.78.0
google.golang.org/protobuf v1.36.11
istio.io/api v1.27.1-0.20250820125923-f5a5d3a605a9
istio.io/client-go v1.27.1-0.20250820130622-12f6d11feb40
istio.io/istio v0.0.0
@@ -65,7 +65,7 @@ require (
cloud.google.com/go v0.120.0 // indirect
cloud.google.com/go/auth v0.16.5 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.8.4 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
cloud.google.com/go/logging v1.13.0 // indirect
cloud.google.com/go/longrunning v0.6.7 // indirect
dario.cat/mergo v1.0.2 // indirect
@@ -103,11 +103,10 @@ require (
github.com/buger/jsonparser v1.1.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/clbanning/mxj v1.8.4 // indirect
github.com/clbanning/mxj/v2 v2.5.5 // indirect
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e // indirect
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
github.com/coreos/go-oidc/v3 v3.14.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@@ -117,23 +116,23 @@ require (
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.9.3 // indirect
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
github.com/envoyproxy/go-control-plane v0.13.4 // indirect
github.com/envoyproxy/go-control-plane v0.14.0 // indirect
github.com/envoyproxy/go-control-plane/contrib v0.0.0-20251016030003-90eca0228178 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-jose/go-jose/v4 v4.1.2 // indirect
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.21.2 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.1 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/golang/mock v1.7.0-rc.1 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.26.0 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
@@ -220,7 +219,7 @@ require (
github.com/yl2chen/cidranger v1.0.2 // indirect
github.com/zeebo/blake3 v0.2.3 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
go.opentelemetry.io/otel v1.38.0 // indirect
@@ -231,24 +230,24 @@ require (
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.42.0 // indirect
golang.org/x/crypto v0.44.0 // indirect
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a // indirect
golang.org/x/mod v0.28.0 // indirect
golang.org/x/oauth2 v0.31.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.36.0 // indirect
golang.org/x/term v0.35.0 // indirect
golang.org/x/text v0.29.0 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/oauth2 v0.32.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.13.0 // indirect
golang.org/x/tools v0.37.0 // indirect
golang.org/x/tools v0.38.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
google.golang.org/api v0.250.0 // indirect
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250922171735-9219d122eba9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/gcfg.v1 v1.2.3 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect

1897
go.sum
View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 2.1.9
appVersion: 2.2.0
description: Helm chart for deploying higress gateways
icon: https://higress.io/img/higress_logo_small.png
home: http://higress.io/
@@ -15,4 +15,4 @@ dependencies:
repository: "file://../redis"
version: 0.0.1
type: application
version: 2.1.9
version: 2.2.0

View File

@@ -23,7 +23,7 @@ spec:
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.global.hub }}/{{ .Values.redis.image | default "redis-stack-server" }}:{{ .Values.redis.tag | default .Chart.AppVersion }}"
image: "{{ .Values.global.hub }}/higress/{{ .Values.redis.image | default "redis-stack-server" }}:{{ .Values.redis.tag | default .Chart.AppVersion }}"
{{- if .Values.global.imagePullPolicy }}
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
{{- end }}

View File

@@ -39,7 +39,7 @@ template:
{{- end }}
containers:
- name: higress-gateway
image: "{{ .Values.gateway.hub | default .Values.global.hub }}/{{ .Values.gateway.image | default "gateway" }}:{{ .Values.gateway.tag | default .Chart.AppVersion }}"
image: "{{ .Values.gateway.hub | default .Values.global.hub }}/higress/{{ .Values.gateway.image | default "gateway" }}:{{ .Values.gateway.tag | default .Chart.AppVersion }}"
args:
- proxy
- router
@@ -123,6 +123,8 @@ template:
- name: LITE_METRICS
value: "on"
{{- end }}
- name: ISTIO_DELTA_XDS
value: "{{ .Values.global.enableDeltaXDS }}"
{{- if include "skywalking.enabled" . }}
- name: ISTIO_BOOTSTRAP_OVERRIDE
value: /etc/istio/custom-bootstrap/custom_bootstrap.json
@@ -203,7 +205,7 @@ template:
{{- if $o11y.enabled }}
{{- $config := $o11y.promtail }}
- name: promtail
image: {{ $config.image.repository | default (printf "%s/promtail" .Values.global.hub) }}:{{ $config.image.tag }}
image: {{ $config.image.repository | default (printf "%s/higress/promtail" .Values.global.hub) }}:{{ $config.image.tag }}
imagePullPolicy: IfNotPresent
args:
- -config.file=/etc/promtail/promtail.yaml

View File

@@ -144,3 +144,7 @@ rules:
- apiGroups: [""]
verbs: [ "get", "watch", "list", "update", "patch", "create", "delete" ]
resources: [ "serviceaccounts"]
# istio leader election need
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "update", "patch", "create"]

View File

@@ -38,7 +38,7 @@ spec:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.controller.securityContext | nindent 12 }}
image: "{{ .Values.controller.hub | default .Values.global.hub }}/{{ .Values.controller.image | default "higress" }}:{{ .Values.controller.tag | default .Chart.AppVersion }}"
image: "{{ .Values.controller.hub | default .Values.global.hub }}/higress/{{ .Values.controller.image | default "higress" }}:{{ .Values.controller.tag | default .Chart.AppVersion }}"
args:
- "serve"
- --gatewaySelectorKey=higress
@@ -104,7 +104,7 @@ spec:
- name: log
mountPath: /var/log
- name: discovery
image: "{{ .Values.pilot.hub | default .Values.global.hub }}/{{ .Values.pilot.image | default "pilot" }}:{{ .Values.pilot.tag | default .Chart.AppVersion }}"
image: "{{ .Values.pilot.hub | default .Values.global.hub }}/higress/{{ .Values.pilot.image | default "pilot" }}:{{ .Values.pilot.tag | default .Chart.AppVersion }}"
{{- if .Values.global.imagePullPolicy }}
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
{{- end }}
@@ -173,6 +173,8 @@ spec:
value: "{{ .Values.global.xdsMaxRecvMsgSize }}"
- name: ENBALE_SCOPED_RDS
value: "{{ .Values.global.enableSRDS }}"
- name: ISTIO_DELTA_XDS
value: "{{ .Values.global.enableDeltaXDS }}"
- name: ON_DEMAND_RDS
value: "{{ .Values.global.onDemandRDS }}"
- name: HOST_RDS_MERGE_SUBSET

View File

@@ -23,7 +23,7 @@ spec:
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: {{ .Values.pluginServer.hub | default .Values.global.hub }}/{{ .Values.pluginServer.image | default "plugin-server" }}:{{ .Values.pluginServer.tag | default "1.0.0" }}
image: {{ .Values.pluginServer.hub | default .Values.global.hub }}/higress/{{ .Values.pluginServer.image | default "plugin-server" }}:{{ .Values.pluginServer.tag | default "1.0.0" }}
{{- if .Values.global.imagePullPolicy }}
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
{{- end }}

View File

@@ -9,6 +9,8 @@ global:
xdsMaxRecvMsgSize: "104857600"
defaultUpstreamConcurrencyThreshold: 10000
enableSRDS: true
# -- Whether to enable Istio delta xDS, default is false.
enableDeltaXDS: true
# -- Whether to enable Redis(redis-stack-server) for Higress, default is false.
enableRedis: false
enablePluginServer: false
@@ -68,10 +70,14 @@ global:
# cpu: 100m
# memory: 128Mi
# -- Default hub for Istio images.
# Releases are published to docker hub under 'istio' project.
# Dev builds from prow are on gcr.io
hub: higress-registry.cn-hangzhou.cr.aliyuncs.com/higress
# -- Default hub (registry) for Higress images.
# For Higress deployments, images are pulled from: {hub}/higress/{image}
# For built-in plugins, images are pulled from: {hub}/{pluginNamespace}/{plugin-name}
# Change this to use a mirror registry closer to your deployment region for faster image pulls.
hub: higress-registry.cn-hangzhou.cr.aliyuncs.com
# -- Namespace for built-in plugin images. Default is "plugins".
# Used by higress-console to configure plugin image path.
pluginNamespace: "plugins"
# -- Specify image pull policy if default behavior isn't desired.
# Default behavior: latest images will be Always else IfNotPresent.

View File

@@ -1,9 +1,9 @@
dependencies:
- name: higress-core
repository: file://../core
version: 2.1.9
version: 2.2.0
- name: higress-console
repository: https://higress.io/helm-charts/
version: 2.1.9
digest: sha256:d696af6726b40219cc16e7cf8de7400101479dfbd8deb3101d7ee736415b9875
generated: "2025-11-13T16:33:49.721553+08:00"
version: 2.2.0
digest: sha256:2cb148fa6d52856344e1905d3fea018466c2feb52013e08997c2d5c7d50f2e5d
generated: "2026-02-11T17:45:59.187965929+08:00"

View File

@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 2.1.9
appVersion: 2.2.0
description: Helm chart for deploying Higress gateways
icon: https://higress.io/img/higress_logo_small.png
home: http://higress.io/
@@ -12,9 +12,9 @@ sources:
dependencies:
- name: higress-core
repository: "file://../core"
version: 2.1.9
version: 2.2.0
- name: higress-console
repository: "https://higress.io/helm-charts/"
version: 2.1.9
version: 2.2.0
type: application
version: 2.1.9
version: 2.2.0

View File

@@ -163,6 +163,7 @@ The command removes all the Kubernetes components associated with the chart and
| global.defaultResources | object | `{"requests":{"cpu":"10m"}}` | A minimal set of requested resources to applied to all deployments so that Horizontal Pod Autoscaler will be able to function (if set). Each component can overwrite these default values by adding its own resources block in the relevant section below and setting the desired resources values. |
| global.defaultUpstreamConcurrencyThreshold | int | `10000` | |
| global.disableAlpnH2 | bool | `false` | Whether to disable HTTP/2 in ALPN |
| global.enableDeltaXDS | bool | `true` | Whether to enable Istio delta xDS, default is false. |
| global.enableGatewayAPI | bool | `true` | If true, Higress Controller will monitor Gateway API resources as well |
| global.enableH3 | bool | `false` | |
| global.enableIPv6 | bool | `false` | |
@@ -177,7 +178,7 @@ The command removes all the Kubernetes components associated with the chart and
| global.enableStatus | bool | `true` | If true, Higress Controller will update the status field of Ingress resources. When migrating from Nginx Ingress, in order to avoid status field of Ingress objects being overwritten, this parameter needs to be set to false, so Higress won't write the entry IP to the status field of the corresponding Ingress object. |
| global.externalIstiod | bool | `false` | Configure a remote cluster data plane controlled by an external istiod. When set to true, istiod is not deployed locally and only a subset of the other discovery charts are enabled. |
| global.hostRDSMergeSubset | bool | `false` | |
| global.hub | string | `"higress-registry.cn-hangzhou.cr.aliyuncs.com/higress"` | Default hub for Istio images. Releases are published to docker hub under 'istio' project. Dev builds from prow are on gcr.io |
| global.hub | string | `"higress-registry.cn-hangzhou.cr.aliyuncs.com"` | Default hub (registry) for Higress images. For Higress deployments, images are pulled from: {hub}/higress/{image} For built-in plugins, images are pulled from: {hub}/{pluginNamespace}/{plugin-name} Change this to use a mirror registry closer to your deployment region for faster image pulls. |
| global.imagePullPolicy | string | `""` | Specify image pull policy if default behavior isn't desired. Default behavior: latest images will be Always else IfNotPresent. |
| global.imagePullSecrets | list | `[]` | ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. For components that don't use ServiceAccounts (i.e. grafana, servicegraph, tracing) ImagePullSecrets will be added to the corresponding Deployment(StatefulSet) objects. Must be set for any cluster configured with private docker registry. |
| global.ingressClass | string | `"higress"` | IngressClass filters which ingress resources the higress controller watches. The default ingress class is higress. There are some special cases for special ingress class. 1. When the ingress class is set as nginx, the higress controller will watch ingress resources with the nginx ingress class or without any ingress class. 2. When the ingress class is set empty, the higress controller will watch all ingress resources in the k8s cluster. |
@@ -202,6 +203,7 @@ The command removes all the Kubernetes components associated with the chart and
| global.onlyPushRouteCluster | bool | `true` | |
| global.operatorManageWebhooks | bool | `false` | Configure whether Operator manages webhook configurations. The current behavior of Istiod is to manage its own webhook configurations. When this option is set as true, Istio Operator, instead of webhooks, manages the webhook configurations. When this option is set as false, webhooks manage their own webhook configurations. |
| global.pilotCertProvider | string | `"istiod"` | Configure the certificate provider for control plane communication. Currently, two providers are supported: "kubernetes" and "istiod". As some platforms may not have kubernetes signing APIs, Istiod is the default |
| global.pluginNamespace | string | `"plugins"` | Namespace for built-in plugin images. Default is "plugins". Used by higress-console to configure plugin image path. |
| global.priorityClassName | string | `""` | Kubernetes >=v1.11.0 will create two PriorityClass, including system-cluster-critical and system-node-critical, it is better to configure this in order to make sure your Istio pods will not be killed because of low priority class. Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass for more detail. |
| global.proxy.autoInject | string | `"enabled"` | This controls the 'policy' in the sidecar injector. |
| global.proxy.clusterDomain | string | `"cluster.local"` | CAUTION: It is important to ensure that all Istio helm charts specify the same clusterDomain value cluster domain. Default value is "cluster.local". |

View File

@@ -64,16 +64,16 @@ require (
github.com/containerd/platforms v0.2.1 // indirect
github.com/containerd/ttrpc v1.2.7 // indirect
github.com/envoyproxy/go-control-plane/contrib v0.0.0-20251016030003-90eca0228178 // indirect
github.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect
github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-jose/go-jose/v4 v4.1.2 // indirect
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240409071808-615f978279ca // indirect
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
@@ -111,10 +111,9 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect
github.com/buger/goterm v1.0.4 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.3 // indirect
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/containerd v1.7.27 // indirect
github.com/containerd/continuity v0.4.4 // indirect
@@ -132,7 +131,7 @@ require (
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@@ -152,7 +151,7 @@ require (
github.com/gofrs/flock v0.12.1 // indirect
github.com/gogo/googleapis v1.4.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/golang/mock v1.7.0-rc.1 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.26.0 // indirect
@@ -162,7 +161,6 @@ require (
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
@@ -231,7 +229,6 @@ require (
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/prometheus/prometheus v0.307.1 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rubenv/sql-migrate v1.8.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
@@ -268,26 +265,26 @@ require (
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.42.0 // indirect
golang.org/x/crypto v0.44.0 // indirect
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a // indirect
golang.org/x/mod v0.28.0 // indirect
golang.org/x/net v0.44.0 // indirect
golang.org/x/oauth2 v0.31.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.36.0 // indirect
golang.org/x/term v0.35.0 // indirect
golang.org/x/text v0.29.0 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.32.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.13.0 // indirect
golang.org/x/tools v0.37.0 // indirect
golang.org/x/tools v0.38.0 // indirect
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250922171735-9219d122eba9 // indirect
google.golang.org/grpc v1.76.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect
google.golang.org/grpc v1.78.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
istio.io/api v1.27.1-0.20250820125923-f5a5d3a605a9 // indirect

View File

File diff suppressed because it is too large Load Diff

View File

@@ -224,6 +224,17 @@ Anthropic Claude 所对应的 `type` 为 `claude`。它特有的配置字段如
| 名称 | 数据类型 | 填写要求 | 默认值 | 描述 |
| --------------- | -------- | -------- | ------ | ----------------------------------------- |
| `claudeVersion` | string | 可选 | - | Claude 服务的 API 版本,默认为 2023-06-01 |
| `claudeCodeMode` | boolean | 可选 | false | 启用 Claude Code 模式,用于支持 Claude Code OAuth 令牌认证。启用后将伪装成 Claude Code 客户端发起请求 |
**Claude Code 模式说明**
启用 `claudeCodeMode: true` 时,插件将:
- 使用 Bearer Token 认证替代 x-api-key适配 Claude Code OAuth 令牌)
- 设置 Claude Code 特定的请求头user-agent、x-app、anthropic-beta
- 为请求 URL 添加 `?beta=true` 查询参数
- 自动注入 Claude Code 的系统提示词(如未提供)
这允许在 Higress 中直接使用 Claude Code 的 OAuth Token 进行身份验证。
#### Ollama
@@ -1211,6 +1222,44 @@ URL: `http://your-domain/v1/messages`
}
```
### 使用 Claude Code 模式
Claude Code 是 Anthropic 提供的官方 CLI 工具。通过启用 `claudeCodeMode`,可以使用 Claude Code 的 OAuth Token 进行身份验证:
**配置信息**
```yaml
provider:
type: claude
apiTokens:
- 'sk-ant-oat01-xxxxx' # Claude Code OAuth Token
claudeCodeMode: true # 启用 Claude Code 模式
```
启用此模式后,插件将自动:
- 使用 Bearer Token 认证(而非 x-api-key
- 设置 Claude Code 特定的请求头和查询参数
- 注入 Claude Code 的系统提示词(如未提供)
**请求示例**
```json
{
"model": "claude-sonnet-4-5-20250929",
"max_tokens": 8192,
"messages": [
{
"role": "user",
"content": "List files in current directory"
}
]
}
```
插件将自动转换为适合 Claude Code 的请求格式,包括:
- 添加系统提示词:`"You are Claude Code, Anthropic's official CLI for Claude."`
- 设置适当的认证和请求头
### 使用智能协议转换
当目标供应商不原生支持 Claude 协议时,插件会自动进行协议转换:

View File

@@ -185,11 +185,22 @@ For MiniMax, the corresponding `type` is `minimax`. Its unique configuration fie
#### Anthropic Claude
For Anthropic Claude, the corresponding `type` is `claude`. Its unique configuration field is:
For Anthropic Claude, the corresponding `type` is `claude`. Its unique configuration fields are:
| Name | Data Type | Filling Requirements | Default Value | Description |
|------------|-------------|----------------------|---------------|---------------------------------------------------------------------------------------------------------------|
| `claudeVersion` | string | Optional | - | The version of the Claude service's API, default is 2023-06-01. |
| `claudeCodeMode` | boolean | Optional | false | Enable Claude Code mode for OAuth token authentication. When enabled, requests will be formatted as Claude Code client requests. |
**Claude Code Mode**
When `claudeCodeMode: true` is enabled, the plugin will:
- Use Bearer Token authentication instead of x-api-key (compatible with Claude Code OAuth tokens)
- Set Claude Code-specific request headers (user-agent, x-app, anthropic-beta)
- Add `?beta=true` query parameter to request URLs
- Automatically inject Claude Code system prompt if not provided
This enables direct use of Claude Code OAuth tokens for authentication in Higress.
#### Ollama
@@ -1148,6 +1159,44 @@ Both protocol formats will return responses in their respective formats:
}
```
### Using Claude Code Mode
Claude Code is Anthropic's official CLI tool. By enabling `claudeCodeMode`, you can authenticate using Claude Code OAuth tokens:
**Configuration Information**
```yaml
provider:
type: claude
apiTokens:
- "sk-ant-oat01-xxxxx" # Claude Code OAuth Token
claudeCodeMode: true # Enable Claude Code mode
```
Once this mode is enabled, the plugin will automatically:
- Use Bearer Token authentication (instead of x-api-key)
- Set Claude Code-specific request headers and query parameters
- Inject Claude Code system prompt if not provided
**Request Example**
```json
{
"model": "claude-sonnet-4-5-20250929",
"max_tokens": 8192,
"messages": [
{
"role": "user",
"content": "List files in current directory"
}
]
}
```
The plugin will automatically transform the request into Claude Code format, including:
- Adding system prompt: `"You are Claude Code, Anthropic's official CLI for Claude."`
- Setting appropriate authentication and request headers
### Using Intelligent Protocol Conversion
When the target provider doesn't natively support Claude protocol, the plugin automatically performs protocol conversion:

View File

@@ -149,4 +149,16 @@ func TestBedrock(t *testing.T) {
test.RunBedrockOnHttpRequestBodyTests(t)
test.RunBedrockOnHttpResponseHeadersTests(t)
test.RunBedrockOnHttpResponseBodyTests(t)
test.RunBedrockToolCallTests(t)
}
func TestClaude(t *testing.T) {
test.RunClaudeParseConfigTests(t)
test.RunClaudeOnHttpRequestHeadersTests(t)
test.RunClaudeOnHttpRequestBodyTests(t)
}
func TestConsumerAffinity(t *testing.T) {
test.RunConsumerAffinityParseConfigTests(t)
test.RunConsumerAffinityOnHttpRequestHeadersTests(t)
}

View File

@@ -206,7 +206,16 @@ func (m *azureProvider) transformRequestPath(ctx wrapper.HttpContext, apiName Ap
path = strings.ReplaceAll(path, pathAzureModelPlaceholder, model)
log.Debugf("azureProvider: model replaced path: %s", path)
}
path = path + "?" + m.serviceUrl.RawQuery
if !strings.Contains(path, "?") {
// No query string yet
path = path + "?" + m.serviceUrl.RawQuery
} else if strings.HasSuffix(path, "?") {
// Ends with "?" and has no query parameter
path = path + m.serviceUrl.RawQuery
} else {
// Has other query parameters
path = path + "&" + m.serviceUrl.RawQuery
}
log.Debugf("azureProvider: final path: %s", path)
return path

View File

@@ -769,7 +769,15 @@ func (b *bedrockProvider) buildBedrockTextGenerationRequest(origRequest *chatCom
case roleSystem:
systemMessages = append(systemMessages, systemContentBlock{Text: msg.StringContent()})
case roleTool:
messages = append(messages, chatToolMessage2BedrockMessage(msg))
toolResultContent := chatToolMessage2BedrockToolResultContent(msg)
if len(messages) > 0 && messages[len(messages)-1].Role == roleUser && messages[len(messages)-1].Content[0].ToolResult != nil {
messages[len(messages)-1].Content = append(messages[len(messages)-1].Content, toolResultContent)
} else {
messages = append(messages, bedrockMessage{
Role: roleUser,
Content: []bedrockMessageContent{toolResultContent},
})
}
default:
messages = append(messages, chatMessage2BedrockMessage(msg))
}
@@ -1060,7 +1068,7 @@ type tokenUsage struct {
TotalTokens int `json:"totalTokens"`
}
func chatToolMessage2BedrockMessage(chatMessage chatMessage) bedrockMessage {
func chatToolMessage2BedrockToolResultContent(chatMessage chatMessage) bedrockMessageContent {
toolResultContent := &toolResultBlock{}
toolResultContent.ToolUseId = chatMessage.ToolCallId
if text, ok := chatMessage.Content.(string); ok {
@@ -1083,29 +1091,29 @@ func chatToolMessage2BedrockMessage(chatMessage chatMessage) bedrockMessage {
} else {
log.Warnf("the content type is not supported, current content is %v", chatMessage.Content)
}
return bedrockMessage{
Role: roleUser,
Content: []bedrockMessageContent{
{
ToolResult: toolResultContent,
},
},
return bedrockMessageContent{
ToolResult: toolResultContent,
}
}
func chatMessage2BedrockMessage(chatMessage chatMessage) bedrockMessage {
var result bedrockMessage
if len(chatMessage.ToolCalls) > 0 {
contents := make([]bedrockMessageContent, 0, len(chatMessage.ToolCalls))
for _, toolCall := range chatMessage.ToolCalls {
params := map[string]interface{}{}
json.Unmarshal([]byte(toolCall.Function.Arguments), &params)
contents = append(contents, bedrockMessageContent{
ToolUse: &toolUseBlock{
Input: params,
Name: toolCall.Function.Name,
ToolUseId: toolCall.Id,
},
})
}
result = bedrockMessage{
Role: chatMessage.Role,
Content: []bedrockMessageContent{{}},
}
params := map[string]interface{}{}
json.Unmarshal([]byte(chatMessage.ToolCalls[0].Function.Arguments), &params)
result.Content[0].ToolUse = &toolUseBlock{
Input: params,
Name: chatMessage.ToolCalls[0].Function.Name,
ToolUseId: chatMessage.ToolCalls[0].Id,
Content: contents,
}
} else if chatMessage.IsStringContent() {
result = bedrockMessage{

View File

@@ -19,6 +19,11 @@ const (
claudeDomain = "api.anthropic.com"
claudeDefaultVersion = "2023-06-01"
claudeDefaultMaxTokens = 4096
// Claude Code mode constants
claudeCodeUserAgent = "claude-cli/2.1.2 (external, cli)"
claudeCodeBetaFeatures = "oauth-2025-04-20,interleaved-thinking-2025-05-14,claude-code-20250219"
claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude."
)
type claudeProviderInitializer struct{}
@@ -68,8 +73,8 @@ type claudeChatMessageContent struct {
Name string `json:"name,omitempty"` // For tool_use
Input map[string]interface{} `json:"input,omitempty"` // For tool_use
// Tool result fields
ToolUseId string `json:"tool_use_id,omitempty"` // For tool_result
Content claudeChatMessageContentWr `json:"content,omitempty"` // For tool_result - can be string or array
ToolUseId string `json:"tool_use_id,omitempty"` // For tool_result
Content *claudeChatMessageContentWr `json:"content,omitempty"` // For tool_result - can be string or array
}
// UnmarshalJSON implements custom JSON unmarshaling for claudeChatMessageContentWr
@@ -232,13 +237,13 @@ type claudeTextGenResponse struct {
}
type claudeTextGenContent struct {
Type string `json:"type,omitempty"`
Text string `json:"text,omitempty"`
Id string `json:"id,omitempty"` // For tool_use
Name string `json:"name,omitempty"` // For tool_use
Input map[string]interface{} `json:"input,omitempty"` // For tool_use
Signature string `json:"signature,omitempty"` // For thinking
Thinking string `json:"thinking,omitempty"` // For thinking
Type string `json:"type,omitempty"`
Text *string `json:"text,omitempty"` // Use pointer: empty string outputs "text":"", nil omits field
Id string `json:"id,omitempty"` // For tool_use
Name string `json:"name,omitempty"` // For tool_use
Input *map[string]interface{} `json:"input,omitempty"` // Use pointer: empty map outputs "input":{}, nil omits field
Signature *string `json:"signature,omitempty"` // For thinking - use pointer for empty string output
Thinking *string `json:"thinking,omitempty"` // For thinking - use pointer for empty string output
}
type claudeTextGenUsage struct {
@@ -264,11 +269,12 @@ type claudeTextGenStreamResponse struct {
}
type claudeTextGenDelta struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
PartialJson string `json:"partial_json,omitempty"`
StopReason *string `json:"stop_reason,omitempty"`
StopSequence *string `json:"stop_sequence,omitempty"`
Type string `json:"type,omitempty"`
Text string `json:"text,omitempty"`
Thinking string `json:"thinking,omitempty"`
PartialJson string `json:"partial_json,omitempty"`
StopReason *string `json:"stop_reason,omitempty"`
StopSequence json.RawMessage `json:"stop_sequence,omitempty"` // Use RawMessage to output explicit null
}
func (c *claudeProviderInitializer) ValidateConfig(config *ProviderConfig) error {
@@ -319,13 +325,36 @@ func (c *claudeProvider) TransformRequestHeaders(ctx wrapper.HttpContext, apiNam
util.OverwriteRequestPathHeaderByCapability(headers, string(apiName), c.config.capabilities)
util.OverwriteRequestHostHeader(headers, claudeDomain)
headers.Set("x-api-key", c.config.GetApiTokenInUse(ctx))
if c.config.apiVersion == "" {
c.config.apiVersion = claudeDefaultVersion
}
headers.Set("anthropic-version", c.config.apiVersion)
// Check if Claude Code mode is enabled
if c.config.claudeCodeMode {
// Claude Code mode: use OAuth token with Bearer authorization
token := c.config.GetApiTokenInUse(ctx)
headers.Set("authorization", "Bearer "+token)
headers.Del("x-api-key")
// Set Claude Code specific headers
headers.Set("user-agent", claudeCodeUserAgent)
headers.Set("x-app", "cli")
headers.Set("anthropic-beta", claudeCodeBetaFeatures)
// Add ?beta=true query parameter to the path
currentPath := headers.Get(":path")
if currentPath != "" && !strings.Contains(currentPath, "beta=true") {
if strings.Contains(currentPath, "?") {
headers.Set(":path", currentPath+"&beta=true")
} else {
headers.Set(":path", currentPath+"?beta=true")
}
}
} else {
// Standard mode: use x-api-key
headers.Set("x-api-key", c.config.GetApiTokenInUse(ctx))
}
}
func (c *claudeProvider) OnRequestBody(ctx wrapper.HttpContext, apiName ApiName, body []byte) (types.Action, error) {
@@ -413,18 +442,158 @@ func (c *claudeProvider) buildClaudeTextGenRequest(origRequest *chatCompletionRe
claudeRequest.MaxTokens = claudeDefaultMaxTokens
}
// Convert OpenAI reasoning parameters to Claude thinking configuration
if origRequest.ReasoningEffort != "" || origRequest.ReasoningMaxTokens > 0 {
var budgetTokens int
if origRequest.ReasoningMaxTokens > 0 {
budgetTokens = origRequest.ReasoningMaxTokens
} else {
// Convert reasoning_effort to budget_tokens
switch origRequest.ReasoningEffort {
case "low":
budgetTokens = 1024 // Minimum required by Claude
case "medium":
budgetTokens = 8192
case "high":
budgetTokens = 16384
default:
budgetTokens = 8192 // Default to medium
}
}
// Ensure minimum budget_tokens requirement
if budgetTokens < 1024 {
budgetTokens = 1024
}
claudeRequest.Thinking = &claudeThinkingConfig{
Type: "enabled",
BudgetTokens: budgetTokens,
}
}
// Track if system message exists in original request
hasSystemMessage := false
for _, message := range origRequest.Messages {
if message.Role == roleSystem {
claudeRequest.System = &claudeSystemPrompt{
StringValue: message.StringContent(),
IsArray: false,
hasSystemMessage = true
// In Claude Code mode, use array format with cache_control
if c.config.claudeCodeMode {
claudeRequest.System = &claudeSystemPrompt{
ArrayValue: []claudeChatMessageContent{
{
Type: contentTypeText,
Text: message.StringContent(),
CacheControl: map[string]interface{}{
"type": "ephemeral",
},
},
},
IsArray: true,
}
} else {
claudeRequest.System = &claudeSystemPrompt{
StringValue: message.StringContent(),
IsArray: false,
}
}
continue
}
// Handle OpenAI "tool" role messages - convert to Claude "user" role with tool_result content
if message.Role == roleTool {
toolResultContent := claudeChatMessageContent{
Type: "tool_result",
ToolUseId: message.ToolCallId,
}
// Tool result content can be string or array
if message.IsStringContent() {
toolResultContent.Content = &claudeChatMessageContentWr{
StringValue: message.StringContent(),
IsString: true,
}
} else {
// For array content, extract text parts
var textParts []string
for _, part := range message.ParseContent() {
if part.Type == contentTypeText {
textParts = append(textParts, part.Text)
}
}
toolResultContent.Content = &claudeChatMessageContentWr{
StringValue: strings.Join(textParts, "\n"),
IsString: true,
}
}
// Check if the last message is a user message with tool_result, merge if so
if len(claudeRequest.Messages) > 0 {
lastMsg := &claudeRequest.Messages[len(claudeRequest.Messages)-1]
if lastMsg.Role == roleUser && !lastMsg.Content.IsString {
// Check if last message contains tool_result
hasToolResult := false
for _, content := range lastMsg.Content.ArrayValue {
if content.Type == "tool_result" {
hasToolResult = true
break
}
}
if hasToolResult {
// Merge with existing tool_result message
lastMsg.Content.ArrayValue = append(lastMsg.Content.ArrayValue, toolResultContent)
continue
}
}
}
// Create new user message with tool_result
claudeMessage := claudeChatMessage{
Role: roleUser,
Content: NewArrayContent([]claudeChatMessageContent{toolResultContent}),
}
claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
continue
}
claudeMessage := claudeChatMessage{
Role: message.Role,
}
// Handle assistant messages with tool_calls - convert to Claude tool_use content blocks
if message.Role == roleAssistant && len(message.ToolCalls) > 0 {
chatMessageContents := make([]claudeChatMessageContent, 0)
// Add text content if present
if message.IsStringContent() && message.StringContent() != "" {
chatMessageContents = append(chatMessageContents, claudeChatMessageContent{
Type: contentTypeText,
Text: message.StringContent(),
})
}
// Convert tool_calls to tool_use content blocks
for _, tc := range message.ToolCalls {
var inputMap map[string]interface{}
if tc.Function.Arguments != "" {
if err := json.Unmarshal([]byte(tc.Function.Arguments), &inputMap); err != nil {
log.Errorf("failed to parse tool call arguments: %v", err)
inputMap = make(map[string]interface{})
}
} else {
inputMap = make(map[string]interface{})
}
chatMessageContents = append(chatMessageContents, claudeChatMessageContent{
Type: "tool_use",
Id: tc.Id,
Name: tc.Function.Name,
Input: inputMap,
})
}
claudeMessage.Content = NewArrayContent(chatMessageContents)
claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
continue
}
if message.IsStringContent() {
claudeMessage.Content = NewStringContent(message.StringContent())
} else {
@@ -478,6 +647,22 @@ func (c *claudeProvider) buildClaudeTextGenRequest(origRequest *chatCompletionRe
claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
}
// In Claude Code mode, add default system prompt if not present
if c.config.claudeCodeMode && !hasSystemMessage {
claudeRequest.System = &claudeSystemPrompt{
ArrayValue: []claudeChatMessageContent{
{
Type: contentTypeText,
Text: claudeCodeSystemPrompt,
CacheControl: map[string]interface{}{
"type": "ephemeral",
},
},
},
IsArray: true,
}
}
for _, tool := range origRequest.Tools {
claudeTool := claudeTool{
Name: tool.Function.Name,
@@ -499,9 +684,41 @@ func (c *claudeProvider) buildClaudeTextGenRequest(origRequest *chatCompletionRe
}
func (c *claudeProvider) responseClaude2OpenAI(ctx wrapper.HttpContext, origResponse *claudeTextGenResponse) *chatCompletionResponse {
// Extract text content, thinking content, and tool calls from Claude response
var textContent string
var reasoningContent string
var toolCalls []toolCall
for _, content := range origResponse.Content {
switch content.Type {
case contentTypeText:
if content.Text != nil {
textContent = *content.Text
}
case "thinking":
if content.Thinking != nil {
reasoningContent = *content.Thinking
}
case "tool_use":
var args []byte
if content.Input != nil {
args, _ = json.Marshal(*content.Input)
} else {
args = []byte("{}")
}
toolCalls = append(toolCalls, toolCall{
Id: content.Id,
Type: "function",
Function: functionCall{
Name: content.Name,
Arguments: string(args),
},
})
}
}
choice := chatCompletionChoice{
Index: 0,
Message: &chatMessage{Role: roleAssistant, Content: origResponse.Content[0].Text},
Message: &chatMessage{Role: roleAssistant, Content: textContent, ReasoningContent: reasoningContent, ToolCalls: toolCalls},
FinishReason: util.Ptr(stopReasonClaude2OpenAI(origResponse.StopReason)),
}
@@ -537,6 +754,8 @@ func stopReasonClaude2OpenAI(reason *string) string {
return finishReasonStop
case "max_tokens":
return finishReasonLength
case "tool_use":
return finishReasonToolCall
default:
return *reason
}
@@ -563,11 +782,64 @@ func (c *claudeProvider) streamResponseClaude2OpenAI(ctx wrapper.HttpContext, or
}
return c.createChatCompletionResponse(ctx, origResponse, choice)
case "content_block_start":
// Handle tool_use content block start
if origResponse.ContentBlock != nil && origResponse.ContentBlock.Type == "tool_use" {
var index int
if origResponse.Index != nil {
index = *origResponse.Index
}
choice := chatCompletionChoice{
Index: index,
Delta: &chatMessage{
ToolCalls: []toolCall{
{
Index: index,
Id: origResponse.ContentBlock.Id,
Type: "function",
Function: functionCall{
Name: origResponse.ContentBlock.Name,
Arguments: "",
},
},
},
},
}
return c.createChatCompletionResponse(ctx, origResponse, choice)
}
return nil
case "content_block_delta":
var index int
if origResponse.Index != nil {
index = *origResponse.Index
}
// Handle tool_use input_json_delta
if origResponse.Delta != nil && origResponse.Delta.Type == "input_json_delta" {
choice := chatCompletionChoice{
Index: index,
Delta: &chatMessage{
ToolCalls: []toolCall{
{
Index: index,
Function: functionCall{
Arguments: origResponse.Delta.PartialJson,
},
},
},
},
}
return c.createChatCompletionResponse(ctx, origResponse, choice)
}
// Handle thinking_delta
if origResponse.Delta != nil && origResponse.Delta.Type == "thinking_delta" {
choice := chatCompletionChoice{
Index: index,
Delta: &chatMessage{Reasoning: origResponse.Delta.Thinking},
}
return c.createChatCompletionResponse(ctx, origResponse, choice)
}
// Handle text_delta
choice := chatCompletionChoice{
Index: index,
Delta: &chatMessage{Content: origResponse.Delta.Text},
@@ -604,7 +876,7 @@ func (c *claudeProvider) streamResponseClaude2OpenAI(ctx wrapper.HttpContext, or
TotalTokens: c.usage.TotalTokens,
},
}
case "content_block_stop", "ping", "content_block_start":
case "content_block_stop", "ping":
log.Debugf("skip processing response type: %s", origResponse.Type)
return nil
default:

View File

@@ -0,0 +1,421 @@
package provider
import (
"encoding/json"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestClaudeProviderInitializer_ValidateConfig(t *testing.T) {
initializer := &claudeProviderInitializer{}
t.Run("valid_config_with_api_tokens", func(t *testing.T) {
config := &ProviderConfig{
apiTokens: []string{"test-token"},
}
err := initializer.ValidateConfig(config)
assert.NoError(t, err)
})
t.Run("invalid_config_without_api_tokens", func(t *testing.T) {
config := &ProviderConfig{
apiTokens: nil,
}
err := initializer.ValidateConfig(config)
assert.Error(t, err)
assert.Contains(t, err.Error(), "no apiToken found in provider config")
})
t.Run("invalid_config_with_empty_api_tokens", func(t *testing.T) {
config := &ProviderConfig{
apiTokens: []string{},
}
err := initializer.ValidateConfig(config)
assert.Error(t, err)
assert.Contains(t, err.Error(), "no apiToken found in provider config")
})
}
func TestClaudeProviderInitializer_DefaultCapabilities(t *testing.T) {
initializer := &claudeProviderInitializer{}
capabilities := initializer.DefaultCapabilities()
expected := map[string]string{
string(ApiNameChatCompletion): PathAnthropicMessages,
string(ApiNameCompletion): PathAnthropicComplete,
string(ApiNameAnthropicMessages): PathAnthropicMessages,
string(ApiNameEmbeddings): PathOpenAIEmbeddings,
string(ApiNameModels): PathOpenAIModels,
}
assert.Equal(t, expected, capabilities)
}
func TestClaudeProviderInitializer_CreateProvider(t *testing.T) {
initializer := &claudeProviderInitializer{}
config := ProviderConfig{
apiTokens: []string{"test-token"},
}
provider, err := initializer.CreateProvider(config)
require.NoError(t, err)
require.NotNil(t, provider)
assert.Equal(t, providerTypeClaude, provider.GetProviderType())
claudeProvider, ok := provider.(*claudeProvider)
require.True(t, ok)
assert.NotNil(t, claudeProvider.config.apiTokens)
assert.Equal(t, []string{"test-token"}, claudeProvider.config.apiTokens)
}
func TestClaudeProvider_GetProviderType(t *testing.T) {
provider := &claudeProvider{
config: ProviderConfig{
apiTokens: []string{"test-token"},
},
contextCache: createContextCache(&ProviderConfig{}),
}
assert.Equal(t, providerTypeClaude, provider.GetProviderType())
}
// Note: TransformRequestHeaders tests are skipped because they require WASM runtime
// The header transformation logic is tested via integration tests instead.
// Here we test the helper functions and logic that can be unit tested.
func TestClaudeCodeMode_HeaderLogic(t *testing.T) {
// Test the logic for adding beta=true query parameter
t.Run("adds_beta_query_param_to_path_without_query", func(t *testing.T) {
currentPath := "/v1/messages"
var newPath string
if currentPath != "" && !strings.Contains(currentPath, "beta=true") {
if strings.Contains(currentPath, "?") {
newPath = currentPath + "&beta=true"
} else {
newPath = currentPath + "?beta=true"
}
} else {
newPath = currentPath
}
assert.Equal(t, "/v1/messages?beta=true", newPath)
})
t.Run("adds_beta_query_param_to_path_with_existing_query", func(t *testing.T) {
currentPath := "/v1/messages?foo=bar"
var newPath string
if currentPath != "" && !strings.Contains(currentPath, "beta=true") {
if strings.Contains(currentPath, "?") {
newPath = currentPath + "&beta=true"
} else {
newPath = currentPath + "?beta=true"
}
} else {
newPath = currentPath
}
assert.Equal(t, "/v1/messages?foo=bar&beta=true", newPath)
})
t.Run("does_not_duplicate_beta_param", func(t *testing.T) {
currentPath := "/v1/messages?beta=true"
var newPath string
if currentPath != "" && !strings.Contains(currentPath, "beta=true") {
if strings.Contains(currentPath, "?") {
newPath = currentPath + "&beta=true"
} else {
newPath = currentPath + "?beta=true"
}
} else {
newPath = currentPath
}
assert.Equal(t, "/v1/messages?beta=true", newPath)
})
t.Run("bearer_token_format", func(t *testing.T) {
token := "sk-ant-oat01-oauth-token"
bearerAuth := "Bearer " + token
assert.Equal(t, "Bearer sk-ant-oat01-oauth-token", bearerAuth)
})
}
func TestClaudeProvider_BuildClaudeTextGenRequest_StandardMode(t *testing.T) {
provider := &claudeProvider{
config: ProviderConfig{
claudeCodeMode: false,
},
}
t.Run("builds_request_without_injecting_defaults", func(t *testing.T) {
request := &chatCompletionRequest{
Model: "claude-sonnet-4-5-20250929",
MaxTokens: 8192,
Stream: true,
Messages: []chatMessage{
{Role: roleUser, Content: "Hello"},
},
}
claudeReq := provider.buildClaudeTextGenRequest(request)
// Should not have system prompt injected
assert.Nil(t, claudeReq.System)
// Should not have tools injected
assert.Empty(t, claudeReq.Tools)
})
t.Run("preserves_existing_system_message", func(t *testing.T) {
request := &chatCompletionRequest{
Model: "claude-sonnet-4-5-20250929",
MaxTokens: 8192,
Messages: []chatMessage{
{Role: roleSystem, Content: "You are a helpful assistant."},
{Role: roleUser, Content: "Hello"},
},
}
claudeReq := provider.buildClaudeTextGenRequest(request)
assert.NotNil(t, claudeReq.System)
assert.False(t, claudeReq.System.IsArray)
assert.Equal(t, "You are a helpful assistant.", claudeReq.System.StringValue)
})
}
func TestClaudeProvider_BuildClaudeTextGenRequest_ClaudeCodeMode(t *testing.T) {
provider := &claudeProvider{
config: ProviderConfig{
claudeCodeMode: true,
},
}
t.Run("injects_default_system_prompt_when_missing", func(t *testing.T) {
request := &chatCompletionRequest{
Model: "claude-sonnet-4-5-20250929",
MaxTokens: 8192,
Stream: true,
Messages: []chatMessage{
{Role: roleUser, Content: "List files"},
},
}
claudeReq := provider.buildClaudeTextGenRequest(request)
// Should have default Claude Code system prompt
require.NotNil(t, claudeReq.System)
assert.True(t, claudeReq.System.IsArray)
require.Len(t, claudeReq.System.ArrayValue, 1)
assert.Equal(t, claudeCodeSystemPrompt, claudeReq.System.ArrayValue[0].Text)
assert.Equal(t, contentTypeText, claudeReq.System.ArrayValue[0].Type)
// Should have cache_control
assert.NotNil(t, claudeReq.System.ArrayValue[0].CacheControl)
assert.Equal(t, "ephemeral", claudeReq.System.ArrayValue[0].CacheControl["type"])
})
t.Run("preserves_existing_system_message_with_cache_control", func(t *testing.T) {
request := &chatCompletionRequest{
Model: "claude-sonnet-4-5-20250929",
MaxTokens: 8192,
Messages: []chatMessage{
{Role: roleSystem, Content: "Custom system prompt"},
{Role: roleUser, Content: "Hello"},
},
}
claudeReq := provider.buildClaudeTextGenRequest(request)
// Should preserve custom system prompt but with array format and cache_control
require.NotNil(t, claudeReq.System)
assert.True(t, claudeReq.System.IsArray)
require.Len(t, claudeReq.System.ArrayValue, 1)
assert.Equal(t, "Custom system prompt", claudeReq.System.ArrayValue[0].Text)
// Should have cache_control
assert.NotNil(t, claudeReq.System.ArrayValue[0].CacheControl)
assert.Equal(t, "ephemeral", claudeReq.System.ArrayValue[0].CacheControl["type"])
})
t.Run("full_request_transformation", func(t *testing.T) {
request := &chatCompletionRequest{
Model: "claude-sonnet-4-5-20250929",
MaxTokens: 8192,
Stream: true,
Temperature: 1.0,
Messages: []chatMessage{
{Role: roleUser, Content: "List files in current directory"},
},
}
claudeReq := provider.buildClaudeTextGenRequest(request)
// Verify complete request structure
assert.Equal(t, "claude-sonnet-4-5-20250929", claudeReq.Model)
assert.Equal(t, 8192, claudeReq.MaxTokens)
assert.True(t, claudeReq.Stream)
assert.Equal(t, 1.0, claudeReq.Temperature)
// Verify system prompt
require.NotNil(t, claudeReq.System)
assert.True(t, claudeReq.System.IsArray)
assert.Equal(t, claudeCodeSystemPrompt, claudeReq.System.ArrayValue[0].Text)
// Verify messages
require.Len(t, claudeReq.Messages, 1)
assert.Equal(t, roleUser, claudeReq.Messages[0].Role)
// Verify no tools are injected by default
assert.Empty(t, claudeReq.Tools)
// Verify the request can be serialized to JSON
jsonBytes, err := json.Marshal(claudeReq)
require.NoError(t, err)
assert.NotEmpty(t, jsonBytes)
})
}
// Note: TransformRequestBody tests are skipped because they require WASM runtime
// The request body transformation is tested indirectly through buildClaudeTextGenRequest tests
// Test constants
func TestClaudeConstants(t *testing.T) {
assert.Equal(t, "api.anthropic.com", claudeDomain)
assert.Equal(t, "2023-06-01", claudeDefaultVersion)
assert.Equal(t, 4096, claudeDefaultMaxTokens)
assert.Equal(t, "claude", providerTypeClaude)
// Claude Code mode constants
assert.Equal(t, "claude-cli/2.1.2 (external, cli)", claudeCodeUserAgent)
assert.Equal(t, "oauth-2025-04-20,interleaved-thinking-2025-05-14,claude-code-20250219", claudeCodeBetaFeatures)
assert.Equal(t, "You are Claude Code, Anthropic's official CLI for Claude.", claudeCodeSystemPrompt)
}
func TestClaudeProvider_GetApiName(t *testing.T) {
provider := &claudeProvider{}
t.Run("messages_path", func(t *testing.T) {
assert.Equal(t, ApiNameChatCompletion, provider.GetApiName("/v1/messages"))
assert.Equal(t, ApiNameChatCompletion, provider.GetApiName("/api/v1/messages"))
})
t.Run("complete_path", func(t *testing.T) {
assert.Equal(t, ApiNameCompletion, provider.GetApiName("/v1/complete"))
})
t.Run("models_path", func(t *testing.T) {
assert.Equal(t, ApiNameModels, provider.GetApiName("/v1/models"))
})
t.Run("embeddings_path", func(t *testing.T) {
assert.Equal(t, ApiNameEmbeddings, provider.GetApiName("/v1/embeddings"))
})
t.Run("unknown_path", func(t *testing.T) {
assert.Equal(t, ApiName(""), provider.GetApiName("/unknown"))
})
}
func TestClaudeProvider_BuildClaudeTextGenRequest_ToolRoleConversion(t *testing.T) {
provider := &claudeProvider{
config: ProviderConfig{
claudeCodeMode: false,
},
}
t.Run("converts_single_tool_role_to_user_with_tool_result", func(t *testing.T) {
request := &chatCompletionRequest{
Model: "claude-sonnet-4-5-20250929",
MaxTokens: 1024,
Messages: []chatMessage{
{Role: roleUser, Content: "What's the weather?"},
{Role: roleAssistant, Content: nil, ToolCalls: []toolCall{
{Id: "call_123", Type: "function", Function: functionCall{Name: "get_weather", Arguments: `{"city": "Beijing"}`}},
}},
{Role: roleTool, ToolCallId: "call_123", Content: "Sunny, 25°C"},
},
}
claudeReq := provider.buildClaudeTextGenRequest(request)
// Should have 3 messages: user, assistant with tool_use, user with tool_result
require.Len(t, claudeReq.Messages, 3)
// First message should be user
assert.Equal(t, roleUser, claudeReq.Messages[0].Role)
// Second message should be assistant with tool_use
assert.Equal(t, roleAssistant, claudeReq.Messages[1].Role)
require.False(t, claudeReq.Messages[1].Content.IsString)
require.Len(t, claudeReq.Messages[1].Content.ArrayValue, 1)
assert.Equal(t, "tool_use", claudeReq.Messages[1].Content.ArrayValue[0].Type)
assert.Equal(t, "call_123", claudeReq.Messages[1].Content.ArrayValue[0].Id)
assert.Equal(t, "get_weather", claudeReq.Messages[1].Content.ArrayValue[0].Name)
// Third message should be user with tool_result
assert.Equal(t, roleUser, claudeReq.Messages[2].Role)
require.False(t, claudeReq.Messages[2].Content.IsString)
require.Len(t, claudeReq.Messages[2].Content.ArrayValue, 1)
assert.Equal(t, "tool_result", claudeReq.Messages[2].Content.ArrayValue[0].Type)
assert.Equal(t, "call_123", claudeReq.Messages[2].Content.ArrayValue[0].ToolUseId)
})
t.Run("merges_multiple_tool_results_into_single_user_message", func(t *testing.T) {
request := &chatCompletionRequest{
Model: "claude-sonnet-4-5-20250929",
MaxTokens: 1024,
Messages: []chatMessage{
{Role: roleUser, Content: "What's the weather and time?"},
{Role: roleAssistant, Content: nil, ToolCalls: []toolCall{
{Id: "call_1", Type: "function", Function: functionCall{Name: "get_weather", Arguments: `{"city": "Beijing"}`}},
{Id: "call_2", Type: "function", Function: functionCall{Name: "get_time", Arguments: `{"timezone": "Asia/Shanghai"}`}},
}},
{Role: roleTool, ToolCallId: "call_1", Content: "Sunny, 25°C"},
{Role: roleTool, ToolCallId: "call_2", Content: "3:00 PM"},
},
}
claudeReq := provider.buildClaudeTextGenRequest(request)
// Should have 3 messages: user, assistant with 2 tool_use, user with 2 tool_results
require.Len(t, claudeReq.Messages, 3)
// Assistant message should have 2 tool_use blocks
require.Len(t, claudeReq.Messages[1].Content.ArrayValue, 2)
assert.Equal(t, "tool_use", claudeReq.Messages[1].Content.ArrayValue[0].Type)
assert.Equal(t, "tool_use", claudeReq.Messages[1].Content.ArrayValue[1].Type)
// User message should have 2 tool_result blocks merged
assert.Equal(t, roleUser, claudeReq.Messages[2].Role)
require.Len(t, claudeReq.Messages[2].Content.ArrayValue, 2)
assert.Equal(t, "tool_result", claudeReq.Messages[2].Content.ArrayValue[0].Type)
assert.Equal(t, "call_1", claudeReq.Messages[2].Content.ArrayValue[0].ToolUseId)
assert.Equal(t, "tool_result", claudeReq.Messages[2].Content.ArrayValue[1].Type)
assert.Equal(t, "call_2", claudeReq.Messages[2].Content.ArrayValue[1].ToolUseId)
})
t.Run("handles_assistant_tool_calls_with_text_content", func(t *testing.T) {
request := &chatCompletionRequest{
Model: "claude-sonnet-4-5-20250929",
MaxTokens: 1024,
Messages: []chatMessage{
{Role: roleUser, Content: "What's the weather?"},
{Role: roleAssistant, Content: "Let me check the weather for you.", ToolCalls: []toolCall{
{Id: "call_123", Type: "function", Function: functionCall{Name: "get_weather", Arguments: `{"city": "Beijing"}`}},
}},
},
}
claudeReq := provider.buildClaudeTextGenRequest(request)
require.Len(t, claudeReq.Messages, 2)
// Assistant message should have both text and tool_use
assert.Equal(t, roleAssistant, claudeReq.Messages[1].Role)
require.False(t, claudeReq.Messages[1].Content.IsString)
require.Len(t, claudeReq.Messages[1].Content.ArrayValue, 2)
assert.Equal(t, contentTypeText, claudeReq.Messages[1].Content.ArrayValue[0].Type)
assert.Equal(t, "Let me check the weather for you.", claudeReq.Messages[1].Content.ArrayValue[0].Text)
assert.Equal(t, "tool_use", claudeReq.Messages[1].Content.ArrayValue[1].Type)
})
}

View File

@@ -119,6 +119,15 @@ func (c *ClaudeToOpenAIConverter) ConvertClaudeRequestToOpenAI(body []byte) ([]b
}
openaiRequest.Messages = append(openaiRequest.Messages, toolMsg)
}
// Also add text content if present alongside tool results
// This handles cases like: [tool_result, tool_result, text]
if len(conversionResult.textParts) > 0 {
textMsg := chatMessage{
Role: claudeMsg.Role,
Content: strings.Join(conversionResult.textParts, "\n\n"),
}
openaiRequest.Messages = append(openaiRequest.Messages, textMsg)
}
}
// Handle regular content if no tool calls or tool results
@@ -136,7 +145,8 @@ func (c *ClaudeToOpenAIConverter) ConvertClaudeRequestToOpenAI(body []byte) ([]b
if claudeRequest.System != nil {
systemMsg := chatMessage{Role: roleSystem}
if !claudeRequest.System.IsArray {
systemMsg.Content = claudeRequest.System.StringValue
// Strip dynamic cch field from billing header to enable caching
systemMsg.Content = stripCchFromBillingHeader(claudeRequest.System.StringValue)
} else {
conversionResult := c.convertContentArray(claudeRequest.System.ArrayValue)
systemMsg.Content = conversionResult.openaiContents
@@ -183,6 +193,7 @@ func (c *ClaudeToOpenAIConverter) ConvertClaudeRequestToOpenAI(body []byte) ([]b
if claudeRequest.Thinking.Type == "enabled" {
openaiRequest.ReasoningMaxTokens = claudeRequest.Thinking.BudgetTokens
openaiRequest.Thinking = &thinkingParam{Type: "enabled", BudgetToken: claudeRequest.Thinking.BudgetTokens}
// Set ReasoningEffort based on budget_tokens
// low: <4096, medium: >=4096 and <16384, high: >=16384
@@ -198,7 +209,10 @@ func (c *ClaudeToOpenAIConverter) ConvertClaudeRequestToOpenAI(body []byte) ([]b
claudeRequest.Thinking.BudgetTokens, openaiRequest.ReasoningEffort, openaiRequest.ReasoningMaxTokens)
}
} else {
log.Debugf("[Claude->OpenAI] No thinking config found")
// Explicitly disable thinking when not configured in Claude request
// This prevents providers like ZhipuAI from enabling thinking by default
openaiRequest.Thinking = &thinkingParam{Type: "disabled"}
log.Debugf("[Claude->OpenAI] No thinking config found, explicitly disabled")
}
result, err := json.Marshal(openaiRequest)
@@ -253,19 +267,21 @@ func (c *ClaudeToOpenAIConverter) ConvertOpenAIResponseToClaude(ctx wrapper.Http
}
if reasoningText != "" {
emptySignature := ""
contents = append(contents, claudeTextGenContent{
Type: "thinking",
Signature: "", // OpenAI doesn't provide signature, use empty string
Thinking: reasoningText,
Signature: &emptySignature, // Use pointer for empty string
Thinking: &reasoningText,
})
log.Debugf("[OpenAI->Claude] Added thinking content: %s", reasoningText)
}
// Add text content if present
if choice.Message.StringContent() != "" {
textContent := choice.Message.StringContent()
contents = append(contents, claudeTextGenContent{
Type: "text",
Text: choice.Message.StringContent(),
Text: &textContent,
})
}
@@ -288,7 +304,7 @@ func (c *ClaudeToOpenAIConverter) ConvertOpenAIResponseToClaude(ctx wrapper.Http
Type: "tool_use",
Id: toolCall.Id,
Name: toolCall.Function.Name,
Input: input,
Input: &input,
})
}
}
@@ -338,7 +354,7 @@ func (c *ClaudeToOpenAIConverter) ConvertOpenAIStreamResponseToClaude(ctx wrappe
Index: &c.thinkingBlockIndex,
}
stopData, _ := json.Marshal(stopEvent)
result.WriteString(fmt.Sprintf("data: %s\n\n", stopData))
result.WriteString(fmt.Sprintf("event: %s\ndata: %s\n\n", stopEvent.Type, stopData))
}
if c.textBlockStarted && !c.textBlockStopped {
c.textBlockStopped = true
@@ -348,7 +364,7 @@ func (c *ClaudeToOpenAIConverter) ConvertOpenAIStreamResponseToClaude(ctx wrappe
Index: &c.textBlockIndex,
}
stopData, _ := json.Marshal(stopEvent)
result.WriteString(fmt.Sprintf("data: %s\n\n", stopData))
result.WriteString(fmt.Sprintf("event: %s\ndata: %s\n\n", stopEvent.Type, stopData))
}
// Send final content_block_stop events for any remaining unclosed tool calls
for index, toolCall := range c.toolCallStates {
@@ -360,7 +376,7 @@ func (c *ClaudeToOpenAIConverter) ConvertOpenAIStreamResponseToClaude(ctx wrappe
Index: &toolCall.claudeContentIndex,
}
stopData, _ := json.Marshal(stopEvent)
result.WriteString(fmt.Sprintf("data: %s\n\n", stopData))
result.WriteString(fmt.Sprintf("event: %s\ndata: %s\n\n", stopEvent.Type, stopData))
}
}
@@ -370,12 +386,12 @@ func (c *ClaudeToOpenAIConverter) ConvertOpenAIStreamResponseToClaude(ctx wrappe
messageDelta := &claudeTextGenStreamResponse{
Type: "message_delta",
Delta: &claudeTextGenDelta{
Type: "message_delta",
StopReason: c.pendingStopReason,
StopReason: c.pendingStopReason,
StopSequence: json.RawMessage("null"),
},
}
stopData, _ := json.Marshal(messageDelta)
result.WriteString(fmt.Sprintf("data: %s\n\n", stopData))
result.WriteString(fmt.Sprintf("event: %s\ndata: %s\n\n", messageDelta.Type, stopData))
c.pendingStopReason = nil
}
@@ -386,7 +402,7 @@ func (c *ClaudeToOpenAIConverter) ConvertOpenAIStreamResponseToClaude(ctx wrappe
Type: "message_stop",
}
stopData, _ := json.Marshal(messageStopEvent)
result.WriteString(fmt.Sprintf("data: %s\n\n", stopData))
result.WriteString(fmt.Sprintf("event: %s\ndata: %s\n\n", messageStopEvent.Type, stopData))
}
// Reset all state for next request
@@ -515,13 +531,14 @@ func (c *ClaudeToOpenAIConverter) buildClaudeStreamResponse(ctx wrapper.HttpCont
c.nextContentIndex++
c.thinkingBlockStarted = true
log.Debugf("[OpenAI->Claude] Generated content_block_start event for thinking at index %d", c.thinkingBlockIndex)
emptyStr := ""
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_start",
Index: &c.thinkingBlockIndex,
ContentBlock: &claudeTextGenContent{
Type: "thinking",
Signature: "", // OpenAI doesn't provide signature
Thinking: "",
Signature: &emptyStr, // Use pointer for empty string output
Thinking: &emptyStr, // Use pointer for empty string output
},
})
}
@@ -532,8 +549,8 @@ func (c *ClaudeToOpenAIConverter) buildClaudeStreamResponse(ctx wrapper.HttpCont
Type: "content_block_delta",
Index: &c.thinkingBlockIndex,
Delta: &claudeTextGenDelta{
Type: "thinking_delta", // Use thinking_delta for reasoning content
Text: reasoningText,
Type: "thinking_delta",
Thinking: reasoningText, // Use Thinking field, not Text
},
})
}
@@ -564,12 +581,13 @@ func (c *ClaudeToOpenAIConverter) buildClaudeStreamResponse(ctx wrapper.HttpCont
c.nextContentIndex++
c.textBlockStarted = true
log.Debugf("[OpenAI->Claude] Generated content_block_start event for text at index %d", c.textBlockIndex)
emptyText := ""
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_start",
Index: &c.textBlockIndex,
ContentBlock: &claudeTextGenContent{
Type: "text",
Text: "",
Text: &emptyText,
},
})
}
@@ -588,6 +606,30 @@ func (c *ClaudeToOpenAIConverter) buildClaudeStreamResponse(ctx wrapper.HttpCont
// Handle tool calls in streaming response
if choice.Delta != nil && len(choice.Delta.ToolCalls) > 0 {
// Ensure message_start is sent before any content blocks
if !c.messageStartSent {
c.messageId = openaiResponse.Id
c.messageStartSent = true
message := &claudeTextGenResponse{
Id: openaiResponse.Id,
Type: "message",
Role: "assistant",
Model: openaiResponse.Model,
Content: []claudeTextGenContent{},
}
if openaiResponse.Usage != nil {
message.Usage = claudeTextGenUsage{
InputTokens: openaiResponse.Usage.PromptTokens,
OutputTokens: 0,
}
}
responses = append(responses, &claudeTextGenStreamResponse{
Type: "message_start",
Message: message,
})
log.Debugf("[OpenAI->Claude] Generated message_start event before tool calls for id: %s", openaiResponse.Id)
}
// Initialize toolCallStates if needed
if c.toolCallStates == nil {
c.toolCallStates = make(map[int]*toolCallInfo)
@@ -722,7 +764,9 @@ func (c *ClaudeToOpenAIConverter) buildClaudeStreamResponse(ctx wrapper.HttpCont
}
// Handle usage information
if openaiResponse.Usage != nil && choice.FinishReason == nil {
// Note: Some providers may send usage in the same chunk as finish_reason,
// so we check for usage regardless of whether finish_reason is present
if openaiResponse.Usage != nil {
log.Debugf("[OpenAI->Claude] Processing usage info - input: %d, output: %d",
openaiResponse.Usage.PromptTokens, openaiResponse.Usage.CompletionTokens)
@@ -730,7 +774,7 @@ func (c *ClaudeToOpenAIConverter) buildClaudeStreamResponse(ctx wrapper.HttpCont
messageDelta := &claudeTextGenStreamResponse{
Type: "message_delta",
Delta: &claudeTextGenDelta{
Type: "message_delta",
StopSequence: json.RawMessage("null"), // Explicit null per Claude spec
},
Usage: &claudeTextGenUsage{
InputTokens: openaiResponse.Usage.PromptTokens,
@@ -789,10 +833,12 @@ func (c *ClaudeToOpenAIConverter) convertContentArray(claudeContents []claudeCha
switch claudeContent.Type {
case "text":
if claudeContent.Text != "" {
result.textParts = append(result.textParts, claudeContent.Text)
// Strip dynamic cch field from billing header to enable caching
processedText := stripCchFromBillingHeader(claudeContent.Text)
result.textParts = append(result.textParts, processedText)
result.openaiContents = append(result.openaiContents, chatMessageContent{
Type: contentTypeText,
Text: claudeContent.Text,
Text: processedText,
CacheControl: claudeContent.CacheControl,
})
}
@@ -884,6 +930,7 @@ func (c *ClaudeToOpenAIConverter) startToolCall(toolState *toolCallInfo) []*clau
toolState.claudeContentIndex, toolState.id, toolState.name)
// Send content_block_start
emptyInput := map[string]interface{}{}
responses = append(responses, &claudeTextGenStreamResponse{
Type: "content_block_start",
Index: &toolState.claudeContentIndex,
@@ -891,7 +938,7 @@ func (c *ClaudeToOpenAIConverter) startToolCall(toolState *toolCallInfo) []*clau
Type: "tool_use",
Id: toolState.id,
Name: toolState.name,
Input: map[string]interface{}{}, // Empty input as per Claude spec
Input: &emptyInput, // Empty input as per Claude spec
},
})
@@ -910,3 +957,42 @@ func (c *ClaudeToOpenAIConverter) startToolCall(toolState *toolCallInfo) []*clau
return responses
}
// stripCchFromBillingHeader removes the dynamic cch field from x-anthropic-billing-header text
// to enable caching. The cch value changes on every request, which would break prompt caching.
// Example input: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode; cch=abc123;"
// Example output: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode;"
func stripCchFromBillingHeader(text string) string {
const billingHeaderPrefix = "x-anthropic-billing-header:"
// Check if this is a billing header
if !strings.HasPrefix(text, billingHeaderPrefix) {
return text
}
// Remove cch=xxx pattern (may appear with or without trailing semicolon)
// Pattern: ; cch=<any-non-semicolon-chars> followed by ; or end of string
result := text
// Try to find and remove ; cch=... pattern
// We need to handle both "; cch=xxx;" and "; cch=xxx" (at end)
for {
cchIdx := strings.Index(result, "; cch=")
if cchIdx == -1 {
break
}
// Find the end of cch value (next semicolon or end of string)
start := cchIdx + 2 // skip "; "
end := strings.Index(result[start:], ";")
if end == -1 {
// cch is at the end, remove from "; cch=" to end
result = result[:cchIdx]
} else {
// cch is followed by more content, remove "; cch=xxx" part
result = result[:cchIdx] + result[start+end:]
}
}
return result
}

View File

@@ -388,6 +388,7 @@ func TestClaudeToOpenAIConverter_ConvertClaudeRequestToOpenAI(t *testing.T) {
t.Run("convert_tool_result_with_actual_error_data", func(t *testing.T) {
// Test using the actual JSON data from the error log to ensure our fix works
// This tests the fix for issue #3344 - text content alongside tool_result should be preserved
claudeRequest := `{
"model": "anthropic/claude-sonnet-4",
"messages": [{
@@ -415,14 +416,20 @@ func TestClaudeToOpenAIConverter_ConvertClaudeRequestToOpenAI(t *testing.T) {
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
// Should have one tool message (the text content is included in the same message array)
require.Len(t, openaiRequest.Messages, 1)
// Should have two messages: tool message + user message with text content
// This is the fix for issue #3344 - text content alongside tool_result is preserved
require.Len(t, openaiRequest.Messages, 2)
// Should be tool message
// First should be tool message
toolMsg := openaiRequest.Messages[0]
assert.Equal(t, "tool", toolMsg.Role)
assert.Contains(t, toolMsg.Content, "three.js")
assert.Equal(t, "toolu_vrtx_01UbCfwoTgoDBqbYEwkVaxd5", toolMsg.ToolCallId)
// Second should be user message with text content
userMsg := openaiRequest.Messages[1]
assert.Equal(t, "user", userMsg.Role)
assert.Equal(t, "继续", userMsg.Content)
})
t.Run("convert_multiple_tool_calls", func(t *testing.T) {
@@ -617,7 +624,7 @@ func TestClaudeToOpenAIConverter_ConvertOpenAIResponseToClaude(t *testing.T) {
// First content should be text
textContent := claudeResponse.Content[0]
assert.Equal(t, "text", textContent.Type)
assert.Equal(t, "I'll analyze the README file to understand this project's purpose.", textContent.Text)
assert.Equal(t, "I'll analyze the README file to understand this project's purpose.", *textContent.Text)
// Second content should be tool_use
toolContent := claudeResponse.Content[1]
@@ -627,7 +634,7 @@ func TestClaudeToOpenAIConverter_ConvertOpenAIResponseToClaude(t *testing.T) {
// Verify tool arguments
require.NotNil(t, toolContent.Input)
assert.Equal(t, "/Users/zhangty/git/higress/README.md", toolContent.Input["file_path"])
assert.Equal(t, "/Users/zhangty/git/higress/README.md", (*toolContent.Input)["file_path"])
})
}
@@ -830,21 +837,147 @@ func TestClaudeToOpenAIConverter_ConvertReasoningResponseToClaude(t *testing.T)
// First should be thinking
thinkingContent := claudeResponse.Content[0]
assert.Equal(t, "thinking", thinkingContent.Type)
assert.Equal(t, "", thinkingContent.Signature) // OpenAI doesn't provide signature
assert.Contains(t, thinkingContent.Thinking, "Let me think about this step by step")
require.NotNil(t, thinkingContent.Signature)
assert.Equal(t, "", *thinkingContent.Signature) // OpenAI doesn't provide signature
require.NotNil(t, thinkingContent.Thinking)
assert.Contains(t, *thinkingContent.Thinking, "Let me think about this step by step")
// Second should be text
textContent := claudeResponse.Content[1]
assert.Equal(t, "text", textContent.Type)
assert.Equal(t, tt.expectedText, textContent.Text)
require.NotNil(t, textContent.Text)
assert.Equal(t, tt.expectedText, *textContent.Text)
} else {
// Should only have text content
assert.Len(t, claudeResponse.Content, 1)
textContent := claudeResponse.Content[0]
assert.Equal(t, "text", textContent.Type)
assert.Equal(t, tt.expectedText, textContent.Text)
require.NotNil(t, textContent.Text)
assert.Equal(t, tt.expectedText, *textContent.Text)
}
})
}
}
func TestClaudeToOpenAIConverter_StripCchFromSystemMessage(t *testing.T) {
converter := &ClaudeToOpenAIConverter{}
t.Run("string_system_with_billing_header", func(t *testing.T) {
// Test that cch field is stripped from string format system message
claudeRequest := `{
"model": "claude-sonnet-4",
"max_tokens": 1024,
"system": [
{
"type": "text",
"text": "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode; cch=abc123;"
}
],
"messages": [{
"role": "user",
"content": "Hello"
}]
}`
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
require.NoError(t, err)
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
require.Len(t, openaiRequest.Messages, 2)
// First message should be system with cch stripped
systemMsg := openaiRequest.Messages[0]
assert.Equal(t, "system", systemMsg.Role)
// The system content should have cch removed
contentArray, ok := systemMsg.Content.([]interface{})
require.True(t, ok, "System content should be an array")
require.Len(t, contentArray, 1)
contentMap, ok := contentArray[0].(map[string]interface{})
require.True(t, ok)
assert.Equal(t, "text", contentMap["type"])
assert.Equal(t, "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode;", contentMap["text"])
assert.NotContains(t, contentMap["text"], "cch=")
})
t.Run("plain_string_system_unchanged", func(t *testing.T) {
// Test that normal system messages are not modified
claudeRequest := `{
"model": "claude-sonnet-4",
"max_tokens": 1024,
"system": "You are a helpful assistant.",
"messages": [{
"role": "user",
"content": "Hello"
}]
}`
result, err := converter.ConvertClaudeRequestToOpenAI([]byte(claudeRequest))
require.NoError(t, err)
var openaiRequest chatCompletionRequest
err = json.Unmarshal(result, &openaiRequest)
require.NoError(t, err)
// First message should be system with original content
systemMsg := openaiRequest.Messages[0]
assert.Equal(t, "system", systemMsg.Role)
assert.Equal(t, "You are a helpful assistant.", systemMsg.Content)
})
}
func TestStripCchFromBillingHeader(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{
name: "billing header with cch at end",
input: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode; cch=abc123;",
expected: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode;",
},
{
name: "billing header with cch at end without trailing semicolon",
input: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode; cch=abc123",
expected: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode",
},
{
name: "billing header with cch in middle",
input: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cch=abc123; cc_entrypoint=claude-vscode;",
expected: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode;",
},
{
name: "billing header without cch",
input: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode;",
expected: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode;",
},
{
name: "non-billing header text unchanged",
input: "This is a normal system prompt",
expected: "This is a normal system prompt",
},
{
name: "empty string unchanged",
input: "",
expected: "",
},
{
name: "billing header with multiple cch fields",
input: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cch=first; cc_entrypoint=claude-vscode; cch=second;",
expected: "x-anthropic-billing-header: cc_version=2.1.37.3a3; cc_entrypoint=claude-vscode;",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := stripCchFromBillingHeader(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}

View File

@@ -605,7 +605,7 @@ func (c *ProviderConfig) SetApiTokenInUse(ctx wrapper.HttpContext) {
if c.isFailoverEnabled() {
apiToken = c.GetGlobalRandomToken()
} else {
apiToken = c.GetRandomToken()
apiToken = c.GetOrSetTokenWithContext(ctx)
}
log.Debugf("Use apiToken %s to send request", apiToken)
ctx.SetContext(c.failover.ctxApiTokenInUse, apiToken)

View File

@@ -30,7 +30,13 @@ const (
)
type NonOpenAIStyleOptions struct {
ReasoningMaxTokens int `json:"reasoning_max_tokens,omitempty"`
ReasoningMaxTokens int `json:"reasoning_max_tokens,omitempty"`
Thinking *thinkingParam `json:"thinking,omitempty"`
}
type thinkingParam struct {
Type string `json:"type,omitempty"`
BudgetToken int `json:"budget_token,omitempty"`
}
type chatCompletionRequest struct {

View File

@@ -2,8 +2,10 @@ package provider
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"hash/fnv"
"math/rand"
"net/http"
"path"
@@ -151,6 +153,7 @@ const (
protocolOriginal = "original"
roleSystem = "system"
roleDeveloper = "developer"
roleAssistant = "assistant"
roleUser = "user"
roleTool = "tool"
@@ -193,6 +196,12 @@ type providerInitializer interface {
var (
errUnsupportedApiName = errors.New("unsupported API name")
// Providers that support the "developer" role. Other providers will have "developer" roles converted to "system".
developerRoleSupportedProviders = map[string]bool{
providerTypeOpenAI: true,
providerTypeAzure: true,
}
providerInitializers = map[string]providerInitializer{
providerTypeMoonshot: &moonshotProviderInitializer{},
providerTypeAzure: &azureProviderInitializer{},
@@ -442,6 +451,15 @@ type ProviderConfig struct {
// @Title zh-CN 豆包服务域名
// @Description zh-CN 仅适用于豆包服务,默认转发域名为 ark.cn-beijing.volces.com
doubaoDomain string `required:"false" yaml:"doubaoDomain" json:"doubaoDomain"`
// @Title zh-CN Claude Code 模式
// @Description zh-CN 仅适用于Claude服务。启用后将伪装成Claude Code客户端发起请求支持使用Claude Code的OAuth Token进行认证。
claudeCodeMode bool `required:"false" yaml:"claudeCodeMode" json:"claudeCodeMode"`
// @Title zh-CN 智谱AI服务域名
// @Description zh-CN 仅适用于智谱AI服务。默认为 open.bigmodel.cn中国可配置为 api.z.ai国际
zhipuDomain string `required:"false" yaml:"zhipuDomain" json:"zhipuDomain"`
// @Title zh-CN 智谱AI Code Plan 模式
// @Description zh-CN 仅适用于智谱AI服务。启用后将使用 /api/coding/paas/v4/chat/completions 接口
zhipuCodePlanMode bool `required:"false" yaml:"zhipuCodePlanMode" json:"zhipuCodePlanMode"`
}
func (c *ProviderConfig) GetId() string {
@@ -646,6 +664,9 @@ func (c *ProviderConfig) FromJson(json gjson.Result) {
c.vllmServerHost = json.Get("vllmServerHost").String()
c.vllmCustomUrl = json.Get("vllmCustomUrl").String()
c.doubaoDomain = json.Get("doubaoDomain").String()
c.claudeCodeMode = json.Get("claudeCodeMode").Bool()
c.zhipuDomain = json.Get("zhipuDomain").String()
c.zhipuCodePlanMode = json.Get("zhipuCodePlanMode").Bool()
c.contextCleanupCommands = make([]string, 0)
for _, cmd := range json.Get("contextCleanupCommands").Array() {
if cmd.String() != "" {
@@ -686,12 +707,45 @@ func (c *ProviderConfig) Validate() error {
func (c *ProviderConfig) GetOrSetTokenWithContext(ctx wrapper.HttpContext) string {
ctxApiKey := ctx.GetContext(ctxKeyApiKey)
if ctxApiKey == nil {
ctxApiKey = c.GetRandomToken()
token := c.selectApiToken(ctx)
ctxApiKey = token
ctx.SetContext(ctxKeyApiKey, ctxApiKey)
}
return ctxApiKey.(string)
}
// selectApiToken selects an API token based on the request context
// For stateful APIs, it uses consumer affinity if available
func (c *ProviderConfig) selectApiToken(ctx wrapper.HttpContext) string {
// Get API name from context if available
ctxApiName := ctx.GetContext(CtxKeyApiName)
var apiName string
if ctxApiName != nil {
// ctxApiName is of type ApiName, need to convert to string
apiName = string(ctxApiName.(ApiName))
}
// For stateful APIs, try to use consumer affinity
if isStatefulAPI(apiName) {
consumer := c.getConsumerFromContext(ctx)
if consumer != "" {
return c.GetTokenWithConsumerAffinity(ctx, consumer)
}
}
// Fall back to random selection
return c.GetRandomToken()
}
// getConsumerFromContext retrieves the consumer identifier from the request context
func (c *ProviderConfig) getConsumerFromContext(ctx wrapper.HttpContext) string {
consumer, err := proxywasm.GetHttpRequestHeader("x-mse-consumer")
if err == nil && consumer != "" {
return consumer
}
return ""
}
func (c *ProviderConfig) GetRandomToken() string {
apiTokens := c.apiTokens
count := len(apiTokens)
@@ -705,6 +759,50 @@ func (c *ProviderConfig) GetRandomToken() string {
}
}
// isStatefulAPI checks if the given API name is a stateful API that requires consumer affinity
func isStatefulAPI(apiName string) bool {
// These APIs maintain session state and should be routed to the same provider consistently
statefulAPIs := map[string]bool{
string(ApiNameResponses): true, // Response API - uses previous_response_id
string(ApiNameFiles): true, // Files API - maintains file state
string(ApiNameRetrieveFile): true, // File retrieval - depends on file upload
string(ApiNameRetrieveFileContent): true, // File content - depends on file upload
string(ApiNameBatches): true, // Batch API - maintains batch state
string(ApiNameRetrieveBatch): true, // Batch status - depends on batch creation
string(ApiNameCancelBatch): true, // Batch operations - depends on batch state
string(ApiNameFineTuningJobs): true, // Fine-tuning - maintains job state
string(ApiNameRetrieveFineTuningJob): true, // Fine-tuning job status
string(ApiNameFineTuningJobEvents): true, // Fine-tuning events
string(ApiNameFineTuningJobCheckpoints): true, // Fine-tuning checkpoints
string(ApiNameCancelFineTuningJob): true, // Cancel fine-tuning job
string(ApiNameResumeFineTuningJob): true, // Resume fine-tuning job
}
return statefulAPIs[apiName]
}
// GetTokenWithConsumerAffinity selects an API token based on consumer affinity
// If x-mse-consumer header is present and API is stateful, it will consistently select the same token
func (c *ProviderConfig) GetTokenWithConsumerAffinity(ctx wrapper.HttpContext, consumer string) string {
apiTokens := c.apiTokens
count := len(apiTokens)
switch count {
case 0:
return ""
case 1:
return apiTokens[0]
default:
// Use FNV-1a hash for consistent token selection
h := fnv.New32a()
h.Write([]byte(consumer))
hashValue := h.Sum32()
index := int(hashValue) % count
if index < 0 {
index += count
}
return apiTokens[index]
}
}
func (c *ProviderConfig) IsOriginal() bool {
return c.protocol == protocolOriginal
}
@@ -834,6 +932,34 @@ func doGetMappedModel(model string, modelMapping map[string]string) string {
return ""
}
// isDeveloperRoleSupported checks if the provider supports the "developer" role.
func isDeveloperRoleSupported(providerType string) bool {
return developerRoleSupportedProviders[providerType]
}
// convertDeveloperRoleToSystem converts "developer" roles to "system" role in the request body.
// This is used for providers that don't support the "developer" role.
func convertDeveloperRoleToSystem(body []byte) ([]byte, error) {
request := &chatCompletionRequest{}
if err := json.Unmarshal(body, request); err != nil {
return body, fmt.Errorf("unable to unmarshal request for developer role conversion: %v", err)
}
converted := false
for i := range request.Messages {
if request.Messages[i].Role == roleDeveloper {
request.Messages[i].Role = roleSystem
converted = true
}
}
if converted {
return json.Marshal(request)
}
return body, nil
}
func ExtractStreamingEvents(ctx wrapper.HttpContext, chunk []byte) []StreamEvent {
body := chunk
if bufferedStreamingBody, has := ctx.GetContext(ctxKeyStreamingBody).([]byte); has {
@@ -972,6 +1098,18 @@ func (c *ProviderConfig) handleRequestBody(
}
}
// convert developer role to system role for providers that don't support it
if apiName == ApiNameChatCompletion && !isDeveloperRoleSupported(c.typ) {
body, err = convertDeveloperRoleToSystem(body)
if err != nil {
log.Warnf("[developerRole] failed to convert developer role to system: %v", err)
// Continue processing even if conversion fails
err = nil
} else {
log.Debugf("[developerRole] converted developer role to system for provider: %s", c.typ)
}
}
// use openai protocol (either original openai or converted from claude)
if handler, ok := provider.(TransformRequestBodyHandler); ok {
body, err = handler.TransformRequestBody(ctx, apiName, body)

View File

@@ -0,0 +1,275 @@
package provider
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestIsStatefulAPI(t *testing.T) {
tests := []struct {
name string
apiName string
expected bool
}{
// Stateful APIs - should return true
{
name: "responses_api",
apiName: string(ApiNameResponses),
expected: true,
},
{
name: "files_api",
apiName: string(ApiNameFiles),
expected: true,
},
{
name: "retrieve_file_api",
apiName: string(ApiNameRetrieveFile),
expected: true,
},
{
name: "retrieve_file_content_api",
apiName: string(ApiNameRetrieveFileContent),
expected: true,
},
{
name: "batches_api",
apiName: string(ApiNameBatches),
expected: true,
},
{
name: "retrieve_batch_api",
apiName: string(ApiNameRetrieveBatch),
expected: true,
},
{
name: "cancel_batch_api",
apiName: string(ApiNameCancelBatch),
expected: true,
},
{
name: "fine_tuning_jobs_api",
apiName: string(ApiNameFineTuningJobs),
expected: true,
},
{
name: "retrieve_fine_tuning_job_api",
apiName: string(ApiNameRetrieveFineTuningJob),
expected: true,
},
{
name: "fine_tuning_job_events_api",
apiName: string(ApiNameFineTuningJobEvents),
expected: true,
},
{
name: "fine_tuning_job_checkpoints_api",
apiName: string(ApiNameFineTuningJobCheckpoints),
expected: true,
},
{
name: "cancel_fine_tuning_job_api",
apiName: string(ApiNameCancelFineTuningJob),
expected: true,
},
{
name: "resume_fine_tuning_job_api",
apiName: string(ApiNameResumeFineTuningJob),
expected: true,
},
// Non-stateful APIs - should return false
{
name: "chat_completion_api",
apiName: string(ApiNameChatCompletion),
expected: false,
},
{
name: "completion_api",
apiName: string(ApiNameCompletion),
expected: false,
},
{
name: "embeddings_api",
apiName: string(ApiNameEmbeddings),
expected: false,
},
{
name: "models_api",
apiName: string(ApiNameModels),
expected: false,
},
{
name: "image_generation_api",
apiName: string(ApiNameImageGeneration),
expected: false,
},
{
name: "audio_speech_api",
apiName: string(ApiNameAudioSpeech),
expected: false,
},
// Empty/unknown API - should return false
{
name: "empty_api_name",
apiName: "",
expected: false,
},
{
name: "unknown_api_name",
apiName: "unknown/api",
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := isStatefulAPI(tt.apiName)
assert.Equal(t, tt.expected, result)
})
}
}
func TestGetTokenWithConsumerAffinity(t *testing.T) {
tests := []struct {
name string
apiTokens []string
consumer string
wantEmpty bool
wantToken string // If not empty, expected specific token (for single token case)
}{
{
name: "no_tokens_returns_empty",
apiTokens: []string{},
consumer: "consumer1",
wantEmpty: true,
},
{
name: "nil_tokens_returns_empty",
apiTokens: nil,
consumer: "consumer1",
wantEmpty: true,
},
{
name: "single_token_always_returns_same_token",
apiTokens: []string{"token1"},
consumer: "consumer1",
wantToken: "token1",
},
{
name: "single_token_with_different_consumer",
apiTokens: []string{"token1"},
consumer: "consumer2",
wantToken: "token1",
},
{
name: "multiple_tokens_consistent_for_same_consumer",
apiTokens: []string{"token1", "token2", "token3"},
consumer: "consumer1",
wantEmpty: false, // Will get one of the tokens, consistently
},
{
name: "multiple_tokens_different_consumers_may_get_different_tokens",
apiTokens: []string{"token1", "token2"},
consumer: "consumerA",
wantEmpty: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := &ProviderConfig{
apiTokens: tt.apiTokens,
}
result := config.GetTokenWithConsumerAffinity(nil, tt.consumer)
if tt.wantEmpty {
assert.Empty(t, result)
} else if tt.wantToken != "" {
assert.Equal(t, tt.wantToken, result)
} else {
assert.NotEmpty(t, result)
assert.Contains(t, tt.apiTokens, result)
}
})
}
}
func TestGetTokenWithConsumerAffinity_Consistency(t *testing.T) {
// Test that the same consumer always gets the same token (consistency)
config := &ProviderConfig{
apiTokens: []string{"token1", "token2", "token3", "token4", "token5"},
}
t.Run("same_consumer_gets_same_token_repeatedly", func(t *testing.T) {
consumer := "test-consumer"
var firstResult string
// Call multiple times and verify consistency
for i := 0; i < 10; i++ {
result := config.GetTokenWithConsumerAffinity(nil, consumer)
if i == 0 {
firstResult = result
}
assert.Equal(t, firstResult, result, "Consumer should consistently get the same token")
}
})
t.Run("different_consumers_distribute_across_tokens", func(t *testing.T) {
// Use multiple consumers and verify they distribute across tokens
consumers := []string{"consumer1", "consumer2", "consumer3", "consumer4", "consumer5", "consumer6", "consumer7", "consumer8", "consumer9", "consumer10"}
tokenCounts := make(map[string]int)
for _, consumer := range consumers {
token := config.GetTokenWithConsumerAffinity(nil, consumer)
tokenCounts[token]++
}
// Verify all tokens returned are valid
for token := range tokenCounts {
assert.Contains(t, config.apiTokens, token)
}
// With 10 consumers and 5 tokens, we expect some distribution
// (not necessarily perfect distribution, but should use multiple tokens)
assert.GreaterOrEqual(t, len(tokenCounts), 2, "Should use at least 2 different tokens")
})
t.Run("empty_consumer_returns_empty_string", func(t *testing.T) {
config := &ProviderConfig{
apiTokens: []string{"token1", "token2"},
}
result := config.GetTokenWithConsumerAffinity(nil, "")
// Empty consumer still returns a token (hash of empty string)
assert.NotEmpty(t, result)
assert.Contains(t, []string{"token1", "token2"}, result)
})
}
func TestGetTokenWithConsumerAffinity_HashDistribution(t *testing.T) {
// Test that the hash function distributes consumers reasonably across tokens
config := &ProviderConfig{
apiTokens: []string{"token1", "token2", "token3"},
}
// Test specific consumers to verify hash behavior
testCases := []struct {
consumer string
expectValid bool
}{
{"user-alice", true},
{"user-bob", true},
{"user-charlie", true},
{"service-api-v1", true},
{"service-api-v2", true},
}
for _, tc := range testCases {
t.Run("consumer_"+tc.consumer, func(t *testing.T) {
result := config.GetTokenWithConsumerAffinity(nil, tc.consumer)
assert.True(t, tc.expectValid)
assert.Contains(t, config.apiTokens, result)
})
}
}

View File

@@ -8,11 +8,15 @@ import (
"github.com/alibaba/higress/plugins/wasm-go/extensions/ai-proxy/util"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm/types"
"github.com/higress-group/wasm-go/pkg/wrapper"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
const (
zhipuAiDomain = "open.bigmodel.cn"
zhipuAiDefaultDomain = "open.bigmodel.cn"
zhipuAiInternationalDomain = "api.z.ai"
zhipuAiChatCompletionPath = "/api/paas/v4/chat/completions"
zhipuAiCodePlanPath = "/api/coding/paas/v4/chat/completions"
zhipuAiEmbeddingsPath = "/api/paas/v4/embeddings"
zhipuAiAnthropicMessagesPath = "/api/anthropic/v1/messages"
)
@@ -26,16 +30,20 @@ func (m *zhipuAiProviderInitializer) ValidateConfig(config *ProviderConfig) erro
return nil
}
func (m *zhipuAiProviderInitializer) DefaultCapabilities() map[string]string {
func (m *zhipuAiProviderInitializer) DefaultCapabilities(codePlanMode bool) map[string]string {
chatPath := zhipuAiChatCompletionPath
if codePlanMode {
chatPath = zhipuAiCodePlanPath
}
return map[string]string{
string(ApiNameChatCompletion): zhipuAiChatCompletionPath,
string(ApiNameChatCompletion): chatPath,
string(ApiNameEmbeddings): zhipuAiEmbeddingsPath,
// string(ApiNameAnthropicMessages): zhipuAiAnthropicMessagesPath,
}
}
func (m *zhipuAiProviderInitializer) CreateProvider(config ProviderConfig) (Provider, error) {
config.setDefaultCapabilities(m.DefaultCapabilities())
config.setDefaultCapabilities(m.DefaultCapabilities(config.zhipuCodePlanMode))
return &zhipuAiProvider{
config: config,
contextCache: createContextCache(&config),
@@ -65,13 +73,35 @@ func (m *zhipuAiProvider) OnRequestBody(ctx wrapper.HttpContext, apiName ApiName
func (m *zhipuAiProvider) TransformRequestHeaders(ctx wrapper.HttpContext, apiName ApiName, headers http.Header) {
util.OverwriteRequestPathHeaderByCapability(headers, string(apiName), m.config.capabilities)
util.OverwriteRequestHostHeader(headers, zhipuAiDomain)
// Use configured domain or default to China domain
domain := m.config.zhipuDomain
if domain == "" {
domain = zhipuAiDefaultDomain
}
util.OverwriteRequestHostHeader(headers, domain)
util.OverwriteRequestAuthorizationHeader(headers, "Bearer "+m.config.GetApiTokenInUse(ctx))
headers.Del("Content-Length")
}
func (m *zhipuAiProvider) TransformRequestBody(ctx wrapper.HttpContext, apiName ApiName, body []byte) ([]byte, error) {
if apiName != ApiNameChatCompletion {
return m.config.defaultTransformRequestBody(ctx, apiName, body)
}
// Check if reasoning_effort is set
reasoningEffort := gjson.GetBytes(body, "reasoning_effort").String()
if reasoningEffort != "" {
// Add thinking config for ZhipuAI
body, _ = sjson.SetBytes(body, "thinking", map[string]string{"type": "enabled"})
// Remove reasoning_effort field as ZhipuAI doesn't recognize it
body, _ = sjson.DeleteBytes(body, "reasoning_effort")
}
return m.config.defaultTransformRequestBody(ctx, apiName, body)
}
func (m *zhipuAiProvider) GetApiName(path string) ApiName {
if strings.Contains(path, zhipuAiChatCompletionPath) {
if strings.Contains(path, zhipuAiChatCompletionPath) || strings.Contains(path, zhipuAiCodePlanPath) {
return ApiNameChatCompletion
}
if strings.Contains(path, zhipuAiEmbeddingsPath) {

View File

@@ -343,7 +343,7 @@ func RunAzureOnHttpRequestHeadersTests(t *testing.T) {
// 验证Path是否被正确处理
pathValue, hasPath := test.GetHeaderValue(requestHeaders, ":path")
require.True(t, hasPath, "Path header should exist")
require.Contains(t, pathValue, "/openai/deployments/test-deployment/chat/completions", "Path should contain Azure deployment path")
require.Equal(t, "/openai/deployments/test-deployment/chat/completions?api-version=2024-02-15-preview", pathValue, "Path should equal Azure deployment path")
// 验证Content-Length是否被删除
_, hasContentLength := test.GetHeaderValue(requestHeaders, "Content-Length")
@@ -443,8 +443,7 @@ func RunAzureOnHttpRequestBodyTests(t *testing.T) {
requestHeaders := host.GetRequestHeaders()
pathValue, hasPath := test.GetHeaderValue(requestHeaders, ":path")
require.True(t, hasPath, "Path header should exist")
require.Contains(t, pathValue, "/openai/deployments/test-deployment/chat/completions", "Path should contain Azure deployment path")
require.Contains(t, pathValue, "api-version=2024-02-15-preview", "Path should contain API version")
require.Equal(t, pathValue, "/openai/deployments/test-deployment/chat/completions?api-version=2024-02-15-preview", "Path should contain Azure deployment path")
})
// 测试Azure OpenAI请求体处理不同模型
@@ -577,7 +576,7 @@ func RunAzureOnHttpRequestBodyTests(t *testing.T) {
requestHeaders := host.GetRequestHeaders()
pathValue, hasPath := test.GetHeaderValue(requestHeaders, ":path")
require.True(t, hasPath, "Path header should exist")
require.Contains(t, pathValue, "/openai/deployments/deployment-only/chat/completions", "Path should use default deployment")
require.Equal(t, pathValue, "/openai/deployments/deployment-only/chat/completions?api-version=2024-02-15-preview", "Path should use default deployment")
})
// 测试Azure OpenAI请求体处理仅域名配置
@@ -613,7 +612,42 @@ func RunAzureOnHttpRequestBodyTests(t *testing.T) {
requestHeaders := host.GetRequestHeaders()
pathValue, hasPath := test.GetHeaderValue(requestHeaders, ":path")
require.True(t, hasPath, "Path header should exist")
require.Contains(t, pathValue, "/openai/deployments/gpt-3.5-turbo/chat/completions", "Path should use model from request body")
require.Equal(t, pathValue, "/openai/deployments/gpt-3.5-turbo/chat/completions?api-version=2024-02-15-preview", "Path should use model from request body")
})
// 测试Azure OpenAI模型无关请求处理仅域名配置
t.Run("azure domain only model independent", func(t *testing.T) {
host, status := test.NewTestHost(azureDomainOnlyConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
// 设置请求头
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/files?limit=10&purpose=assistants"},
{":method", "GET"},
})
require.Equal(t, types.HeaderStopIteration, action)
// 验证请求路径是否使用模型占位符
requestHeaders := host.GetRequestHeaders()
pathValue, hasPath := test.GetHeaderValue(requestHeaders, ":path")
require.True(t, hasPath, "Path header should exist")
require.Equal(t, pathValue, "/openai/files?limit=10&purpose=assistants&api-version=2024-02-15-preview", "Path should have api-version appended")
// 设置请求头
action = host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/files?"},
{":method", "GET"},
})
require.Equal(t, types.HeaderStopIteration, action)
// 验证请求路径是否使用模型占位符
requestHeaders = host.GetRequestHeaders()
pathValue, hasPath = test.GetHeaderValue(requestHeaders, ":path")
require.True(t, hasPath, "Path header should exist")
require.Equal(t, pathValue, "/openai/files?api-version=2024-02-15-preview", "Path should have api-version appended")
})
})
}
@@ -827,10 +861,8 @@ func RunAzureBasePathHandlingTests(t *testing.T) {
require.NotContains(t, pathValue, "/azure-gpt4",
"After body stage: basePath should be removed from path")
// 在 openai 协议下,路径会被转换为 Azure 的路径格式
require.Contains(t, pathValue, "/openai/deployments/gpt-4/chat/completions",
require.Equal(t, pathValue, "/openai/deployments/gpt-4/chat/completions?api-version=2024-02-15-preview",
"Path should be transformed to Azure format")
require.Contains(t, pathValue, "api-version=2024-02-15-preview",
"Path should contain API version")
})
// 测试 basePath prepend 在 original 协议下能正常工作

View File

@@ -442,6 +442,186 @@ func RunBedrockOnHttpResponseHeadersTests(t *testing.T) {
})
}
func RunBedrockToolCallTests(t *testing.T) {
test.RunTest(t, func(t *testing.T) {
// Test single tool call conversion (regression test)
t.Run("bedrock single tool call conversion", func(t *testing.T) {
host, status := test.NewTestHost(bedrockApiTokenConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/chat/completions"},
{":method", "POST"},
{"Content-Type", "application/json"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestBody := `{
"model": "gpt-4",
"messages": [
{"role": "user", "content": "What is the weather in Beijing?"},
{"role": "assistant", "content": "Let me check the weather for you.", "tool_calls": [{"id": "call_001", "type": "function", "function": {"name": "get_weather", "arguments": "{\"city\":\"Beijing\"}"}}]},
{"role": "tool", "content": "Sunny, 25°C", "tool_call_id": "call_001"}
],
"tools": [{"type": "function", "function": {"name": "get_weather", "description": "Get weather info", "parameters": {"type": "object", "properties": {"city": {"type": "string"}}}}}]
}`
action = host.CallOnHttpRequestBody([]byte(requestBody))
require.Equal(t, types.ActionContinue, action)
processedBody := host.GetRequestBody()
require.NotNil(t, processedBody)
var bodyMap map[string]interface{}
err := json.Unmarshal(processedBody, &bodyMap)
require.NoError(t, err)
messages := bodyMap["messages"].([]interface{})
// messages[0] = user, messages[1] = assistant with toolUse, messages[2] = user with toolResult
require.Len(t, messages, 3, "Should have 3 messages: user, assistant, user(toolResult)")
// Verify assistant message has exactly 1 toolUse
assistantMsg := messages[1].(map[string]interface{})
require.Equal(t, "assistant", assistantMsg["role"])
assistantContent := assistantMsg["content"].([]interface{})
require.Len(t, assistantContent, 1, "Assistant should have 1 content block")
toolUseBlock := assistantContent[0].(map[string]interface{})
require.Contains(t, toolUseBlock, "toolUse", "Content block should contain toolUse")
// Verify tool result message
toolResultMsg := messages[2].(map[string]interface{})
require.Equal(t, "user", toolResultMsg["role"])
toolResultContent := toolResultMsg["content"].([]interface{})
require.Len(t, toolResultContent, 1, "Tool result message should have 1 content block")
require.Contains(t, toolResultContent[0].(map[string]interface{}), "toolResult", "Content block should contain toolResult")
})
// Test multiple parallel tool calls conversion
t.Run("bedrock multiple parallel tool calls conversion", func(t *testing.T) {
host, status := test.NewTestHost(bedrockApiTokenConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/chat/completions"},
{":method", "POST"},
{"Content-Type", "application/json"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestBody := `{
"model": "gpt-4",
"messages": [
{"role": "user", "content": "What is the weather in Beijing and Shanghai?"},
{"role": "assistant", "content": "Let me check both cities.", "tool_calls": [{"id": "call_001", "type": "function", "function": {"name": "get_weather", "arguments": "{\"city\":\"Beijing\"}"}}, {"id": "call_002", "type": "function", "function": {"name": "get_weather", "arguments": "{\"city\":\"Shanghai\"}"}}]},
{"role": "tool", "content": "Sunny, 25°C", "tool_call_id": "call_001"},
{"role": "tool", "content": "Cloudy, 22°C", "tool_call_id": "call_002"}
],
"tools": [{"type": "function", "function": {"name": "get_weather", "description": "Get weather info", "parameters": {"type": "object", "properties": {"city": {"type": "string"}}}}}]
}`
action = host.CallOnHttpRequestBody([]byte(requestBody))
require.Equal(t, types.ActionContinue, action)
processedBody := host.GetRequestBody()
require.NotNil(t, processedBody)
var bodyMap map[string]interface{}
err := json.Unmarshal(processedBody, &bodyMap)
require.NoError(t, err)
messages := bodyMap["messages"].([]interface{})
// messages[0] = user, messages[1] = assistant with 2 toolUse, messages[2] = user with 2 toolResult
require.Len(t, messages, 3, "Should have 3 messages: user, assistant, user(toolResults merged)")
// Verify assistant message has 2 toolUse blocks
assistantMsg := messages[1].(map[string]interface{})
require.Equal(t, "assistant", assistantMsg["role"])
assistantContent := assistantMsg["content"].([]interface{})
require.Len(t, assistantContent, 2, "Assistant should have 2 content blocks for parallel tool calls")
firstToolUse := assistantContent[0].(map[string]interface{})["toolUse"].(map[string]interface{})
require.Equal(t, "get_weather", firstToolUse["name"])
require.Equal(t, "call_001", firstToolUse["toolUseId"])
secondToolUse := assistantContent[1].(map[string]interface{})["toolUse"].(map[string]interface{})
require.Equal(t, "get_weather", secondToolUse["name"])
require.Equal(t, "call_002", secondToolUse["toolUseId"])
// Verify tool results are merged into a single user message
toolResultMsg := messages[2].(map[string]interface{})
require.Equal(t, "user", toolResultMsg["role"])
toolResultContent := toolResultMsg["content"].([]interface{})
require.Len(t, toolResultContent, 2, "Tool results should be merged into 2 content blocks in one user message")
firstResult := toolResultContent[0].(map[string]interface{})["toolResult"].(map[string]interface{})
require.Equal(t, "call_001", firstResult["toolUseId"])
secondResult := toolResultContent[1].(map[string]interface{})["toolResult"].(map[string]interface{})
require.Equal(t, "call_002", secondResult["toolUseId"])
})
// Test tool call with text content mixed
t.Run("bedrock tool call with text content mixed", func(t *testing.T) {
host, status := test.NewTestHost(bedrockApiTokenConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/chat/completions"},
{":method", "POST"},
{"Content-Type", "application/json"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestBody := `{
"model": "gpt-4",
"messages": [
{"role": "user", "content": "What is the weather in Beijing?"},
{"role": "assistant", "content": "Let me check.", "tool_calls": [{"id": "call_001", "type": "function", "function": {"name": "get_weather", "arguments": "{\"city\":\"Beijing\"}"}}]},
{"role": "tool", "content": "Sunny, 25°C", "tool_call_id": "call_001"},
{"role": "assistant", "content": "The weather in Beijing is sunny with 25°C."},
{"role": "user", "content": "Thanks!"}
],
"tools": [{"type": "function", "function": {"name": "get_weather", "description": "Get weather info", "parameters": {"type": "object", "properties": {"city": {"type": "string"}}}}}]
}`
action = host.CallOnHttpRequestBody([]byte(requestBody))
require.Equal(t, types.ActionContinue, action)
processedBody := host.GetRequestBody()
require.NotNil(t, processedBody)
var bodyMap map[string]interface{}
err := json.Unmarshal(processedBody, &bodyMap)
require.NoError(t, err)
messages := bodyMap["messages"].([]interface{})
// messages[0] = user, messages[1] = assistant(toolUse), messages[2] = user(toolResult),
// messages[3] = assistant(text), messages[4] = user(text)
require.Len(t, messages, 5, "Should have 5 messages in mixed tool call and text scenario")
// Verify message roles alternate correctly
require.Equal(t, "user", messages[0].(map[string]interface{})["role"])
require.Equal(t, "assistant", messages[1].(map[string]interface{})["role"])
require.Equal(t, "user", messages[2].(map[string]interface{})["role"])
require.Equal(t, "assistant", messages[3].(map[string]interface{})["role"])
require.Equal(t, "user", messages[4].(map[string]interface{})["role"])
// Verify assistant text message (messages[3]) has text content
assistantTextMsg := messages[3].(map[string]interface{})
assistantTextContent := assistantTextMsg["content"].([]interface{})
require.Len(t, assistantTextContent, 1)
require.Contains(t, assistantTextContent[0].(map[string]interface{}), "text", "Text assistant message should have text content")
require.Contains(t, assistantTextContent[0].(map[string]interface{})["text"], "sunny", "Text content should contain weather info")
})
})
}
func RunBedrockOnHttpResponseBodyTests(t *testing.T) {
test.RunTest(t, func(t *testing.T) {
// Test Bedrock response body processing

View File

@@ -0,0 +1,317 @@
package test
import (
"encoding/json"
"testing"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm/types"
"github.com/higress-group/wasm-go/pkg/test"
"github.com/stretchr/testify/require"
)
// Claude standard mode config
var claudeStandardConfig = func() json.RawMessage {
data, _ := json.Marshal(map[string]interface{}{
"provider": map[string]interface{}{
"type": "claude",
"apiTokens": []string{"sk-ant-api-key-123"},
},
})
return data
}()
// Claude Code mode config
var claudeCodeModeConfig = func() json.RawMessage {
data, _ := json.Marshal(map[string]interface{}{
"provider": map[string]interface{}{
"type": "claude",
"apiTokens": []string{"sk-ant-oat01-oauth-token-456"},
"claudeCodeMode": true,
},
})
return data
}()
// Claude Code mode config with custom apiVersion
var claudeCodeModeWithVersionConfig = func() json.RawMessage {
data, _ := json.Marshal(map[string]interface{}{
"provider": map[string]interface{}{
"type": "claude",
"apiTokens": []string{"sk-ant-oat01-oauth-token-789"},
"claudeCodeMode": true,
"claudeVersion": "2024-01-01",
},
})
return data
}()
// Claude config without token (should fail validation)
var claudeNoTokenConfig = func() json.RawMessage {
data, _ := json.Marshal(map[string]interface{}{
"provider": map[string]interface{}{
"type": "claude",
},
})
return data
}()
func RunClaudeParseConfigTests(t *testing.T) {
test.RunGoTest(t, func(t *testing.T) {
t.Run("claude standard config", func(t *testing.T) {
host, status := test.NewTestHost(claudeStandardConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
config, err := host.GetMatchConfig()
require.NoError(t, err)
require.NotNil(t, config)
})
t.Run("claude code mode config", func(t *testing.T) {
host, status := test.NewTestHost(claudeCodeModeConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
config, err := host.GetMatchConfig()
require.NoError(t, err)
require.NotNil(t, config)
})
t.Run("claude config without token fails", func(t *testing.T) {
host, status := test.NewTestHost(claudeNoTokenConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusFailed, status)
})
})
}
func RunClaudeOnHttpRequestHeadersTests(t *testing.T) {
test.RunTest(t, func(t *testing.T) {
t.Run("claude standard mode uses x-api-key", func(t *testing.T) {
host, status := test.NewTestHost(claudeStandardConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "api.anthropic.com"},
{":path", "/v1/chat/completions"},
{":method", "POST"},
{"Content-Type", "application/json"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
require.True(t, test.HasHeaderWithValue(requestHeaders, "x-api-key", "sk-ant-api-key-123"))
require.True(t, test.HasHeaderWithValue(requestHeaders, "anthropic-version", "2023-06-01"))
// Should NOT have Claude Code specific headers
_, hasAuth := test.GetHeaderValue(requestHeaders, "authorization")
require.False(t, hasAuth, "standard mode should not have authorization header")
_, hasXApp := test.GetHeaderValue(requestHeaders, "x-app")
require.False(t, hasXApp, "standard mode should not have x-app header")
})
t.Run("claude code mode uses bearer authorization", func(t *testing.T) {
host, status := test.NewTestHost(claudeCodeModeConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "api.anthropic.com"},
{":path", "/v1/chat/completions"},
{":method", "POST"},
{"Content-Type", "application/json"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
// Claude Code mode should use Bearer authorization
require.True(t, test.HasHeaderWithValue(requestHeaders, "authorization", "Bearer sk-ant-oat01-oauth-token-456"))
// Should NOT have x-api-key in Claude Code mode
_, hasXApiKey := test.GetHeaderValue(requestHeaders, "x-api-key")
require.False(t, hasXApiKey, "claude code mode should not have x-api-key header")
// Should have Claude Code specific headers
require.True(t, test.HasHeaderWithValue(requestHeaders, "x-app", "cli"))
require.True(t, test.HasHeaderWithValue(requestHeaders, "user-agent", "claude-cli/2.1.2 (external, cli)"))
require.True(t, test.HasHeaderWithValue(requestHeaders, "anthropic-beta", "oauth-2025-04-20,interleaved-thinking-2025-05-14,claude-code-20250219"))
require.True(t, test.HasHeaderWithValue(requestHeaders, "anthropic-version", "2023-06-01"))
})
t.Run("claude code mode adds beta query param", func(t *testing.T) {
host, status := test.NewTestHost(claudeCodeModeConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "api.anthropic.com"},
{":path", "/v1/chat/completions"},
{":method", "POST"},
{"Content-Type", "application/json"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
path, found := test.GetHeaderValue(requestHeaders, ":path")
require.True(t, found)
require.Contains(t, path, "beta=true", "claude code mode should add beta=true query param")
})
t.Run("claude code mode with custom version", func(t *testing.T) {
host, status := test.NewTestHost(claudeCodeModeWithVersionConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "api.anthropic.com"},
{":path", "/v1/chat/completions"},
{":method", "POST"},
{"Content-Type", "application/json"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
require.True(t, test.HasHeaderWithValue(requestHeaders, "anthropic-version", "2024-01-01"))
})
})
}
func RunClaudeOnHttpRequestBodyTests(t *testing.T) {
test.RunTest(t, func(t *testing.T) {
t.Run("claude standard mode does not inject defaults", func(t *testing.T) {
host, status := test.NewTestHost(claudeStandardConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
host.CallOnHttpRequestHeaders([][2]string{
{":authority", "api.anthropic.com"},
{":path", "/v1/chat/completions"},
{":method", "POST"},
{"Content-Type", "application/json"},
})
body := `{
"model": "claude-sonnet-4-5-20250929",
"max_tokens": 8192,
"stream": true,
"messages": [
{"role": "user", "content": "Hello"}
]
}`
action := host.CallOnHttpRequestBody([]byte(body))
require.Equal(t, types.ActionContinue, action)
processedBody := host.GetRequestBody()
var request map[string]interface{}
err := json.Unmarshal(processedBody, &request)
require.NoError(t, err)
// Standard mode should NOT inject system prompt or tools
_, hasSystem := request["system"]
require.False(t, hasSystem, "standard mode should not inject system prompt")
tools, hasTools := request["tools"]
if hasTools {
toolsArr, ok := tools.([]interface{})
require.True(t, ok)
require.Empty(t, toolsArr, "standard mode should not inject tools")
}
})
t.Run("claude code mode injects default system prompt", func(t *testing.T) {
host, status := test.NewTestHost(claudeCodeModeConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
host.CallOnHttpRequestHeaders([][2]string{
{":authority", "api.anthropic.com"},
{":path", "/v1/chat/completions"},
{":method", "POST"},
{"Content-Type", "application/json"},
})
body := `{
"model": "claude-sonnet-4-5-20250929",
"max_tokens": 8192,
"stream": true,
"messages": [
{"role": "user", "content": "List files"}
]
}`
action := host.CallOnHttpRequestBody([]byte(body))
require.Equal(t, types.ActionContinue, action)
processedBody := host.GetRequestBody()
var request map[string]interface{}
err := json.Unmarshal(processedBody, &request)
require.NoError(t, err)
// Claude Code mode should inject system prompt
system, hasSystem := request["system"]
require.True(t, hasSystem, "claude code mode should inject system prompt")
systemArr, ok := system.([]interface{})
require.True(t, ok, "system should be an array in claude code mode")
require.Len(t, systemArr, 1)
systemBlock, ok := systemArr[0].(map[string]interface{})
require.True(t, ok)
require.Equal(t, "text", systemBlock["type"])
require.Equal(t, "You are Claude Code, Anthropic's official CLI for Claude.", systemBlock["text"])
// Should have cache_control
cacheControl, hasCacheControl := systemBlock["cache_control"]
require.True(t, hasCacheControl, "system prompt should have cache_control")
cacheControlMap, ok := cacheControl.(map[string]interface{})
require.True(t, ok)
require.Equal(t, "ephemeral", cacheControlMap["type"])
})
t.Run("claude code mode preserves existing system prompt", func(t *testing.T) {
host, status := test.NewTestHost(claudeCodeModeConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
host.CallOnHttpRequestHeaders([][2]string{
{":authority", "api.anthropic.com"},
{":path", "/v1/chat/completions"},
{":method", "POST"},
{"Content-Type", "application/json"},
})
body := `{
"model": "claude-sonnet-4-5-20250929",
"max_tokens": 8192,
"messages": [
{"role": "system", "content": "You are a custom assistant."},
{"role": "user", "content": "Hello"}
]
}`
action := host.CallOnHttpRequestBody([]byte(body))
require.Equal(t, types.ActionContinue, action)
processedBody := host.GetRequestBody()
var request map[string]interface{}
err := json.Unmarshal(processedBody, &request)
require.NoError(t, err)
// Should preserve custom system prompt (not default)
system, hasSystem := request["system"]
require.True(t, hasSystem)
systemArr, ok := system.([]interface{})
require.True(t, ok)
require.Len(t, systemArr, 1)
systemBlock, ok := systemArr[0].(map[string]interface{})
require.True(t, ok)
require.Equal(t, "You are a custom assistant.", systemBlock["text"])
})
})
}
// Note: Response headers tests are skipped as they require complex mocking
// The response header transformation is covered by integration tests

View File

@@ -0,0 +1,292 @@
package test
import (
"encoding/json"
"strings"
"testing"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm/types"
"github.com/higress-group/wasm-go/pkg/test"
"github.com/stretchr/testify/require"
)
// 测试配置:多 API Token 配置(用于测试 consumer affinity
var multiTokenOpenAIConfig = func() json.RawMessage {
data, _ := json.Marshal(map[string]interface{}{
"provider": map[string]interface{}{
"type": "openai",
"apiTokens": []string{"sk-token-1", "sk-token-2", "sk-token-3"},
"modelMapping": map[string]string{
"*": "gpt-4",
},
},
})
return data
}()
// 测试配置:单 API Token 配置
var singleTokenOpenAIConfig = func() json.RawMessage {
data, _ := json.Marshal(map[string]interface{}{
"provider": map[string]interface{}{
"type": "openai",
"apiTokens": []string{"sk-single-token"},
"modelMapping": map[string]string{
"*": "gpt-4",
},
},
})
return data
}()
func RunConsumerAffinityParseConfigTests(t *testing.T) {
test.RunGoTest(t, func(t *testing.T) {
t.Run("multi token config", func(t *testing.T) {
host, status := test.NewTestHost(multiTokenOpenAIConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
config, err := host.GetMatchConfig()
require.NoError(t, err)
require.NotNil(t, config)
})
})
}
func RunConsumerAffinityOnHttpRequestHeadersTests(t *testing.T) {
test.RunTest(t, func(t *testing.T) {
// 测试 stateful APIresponses使用 consumer affinity
t.Run("stateful api responses with consumer header", func(t *testing.T) {
host, status := test.NewTestHost(multiTokenOpenAIConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
// 使用 x-mse-consumer header
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/responses"},
{":method", "POST"},
{"Content-Type", "application/json"},
{"x-mse-consumer", "consumer-alice"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
require.NotNil(t, requestHeaders)
// 验证 Authorization header 使用了其中一个 token
authValue, hasAuth := test.GetHeaderValue(requestHeaders, "Authorization")
require.True(t, hasAuth, "Authorization header should exist")
require.True(t, strings.Contains(authValue, "sk-token-"), "Authorization should contain one of the tokens")
})
// 测试 stateful APIfiles使用 consumer affinity
t.Run("stateful api files with consumer header", func(t *testing.T) {
host, status := test.NewTestHost(multiTokenOpenAIConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/files"},
{":method", "POST"},
{"Content-Type", "application/json"},
{"x-mse-consumer", "consumer-files"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
require.NotNil(t, requestHeaders)
authValue, hasAuth := test.GetHeaderValue(requestHeaders, "Authorization")
require.True(t, hasAuth, "Authorization header should exist")
require.True(t, strings.Contains(authValue, "sk-token-"), "Authorization should contain one of the tokens")
})
// 测试 stateful APIbatches使用 consumer affinity
t.Run("stateful api batches with consumer header", func(t *testing.T) {
host, status := test.NewTestHost(multiTokenOpenAIConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/batches"},
{":method", "POST"},
{"Content-Type", "application/json"},
{"x-mse-consumer", "consumer-batches"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
require.NotNil(t, requestHeaders)
authValue, hasAuth := test.GetHeaderValue(requestHeaders, "Authorization")
require.True(t, hasAuth, "Authorization header should exist")
require.True(t, strings.Contains(authValue, "sk-token-"), "Authorization should contain one of the tokens")
})
// 测试 stateful APIfine_tuning使用 consumer affinity
t.Run("stateful api fine_tuning with consumer header", func(t *testing.T) {
host, status := test.NewTestHost(multiTokenOpenAIConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/fine_tuning/jobs"},
{":method", "POST"},
{"Content-Type", "application/json"},
{"x-mse-consumer", "consumer-finetuning"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
require.NotNil(t, requestHeaders)
authValue, hasAuth := test.GetHeaderValue(requestHeaders, "Authorization")
require.True(t, hasAuth, "Authorization header should exist")
require.True(t, strings.Contains(authValue, "sk-token-"), "Authorization should contain one of the tokens")
})
// 测试非 stateful API 正常工作
t.Run("non stateful api chat completions works normally", func(t *testing.T) {
host, status := test.NewTestHost(multiTokenOpenAIConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/chat/completions"},
{":method", "POST"},
{"Content-Type", "application/json"},
{"x-mse-consumer", "consumer-chat"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
require.NotNil(t, requestHeaders)
authValue, hasAuth := test.GetHeaderValue(requestHeaders, "Authorization")
require.True(t, hasAuth, "Authorization header should exist")
require.True(t, strings.Contains(authValue, "sk-token-"), "Authorization should contain one of the tokens")
})
// 测试无 x-mse-consumer header 时正常工作
t.Run("stateful api without consumer header works normally", func(t *testing.T) {
host, status := test.NewTestHost(multiTokenOpenAIConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/responses"},
{":method", "POST"},
{"Content-Type", "application/json"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
require.NotNil(t, requestHeaders)
authValue, hasAuth := test.GetHeaderValue(requestHeaders, "Authorization")
require.True(t, hasAuth, "Authorization header should exist")
require.True(t, strings.Contains(authValue, "sk-token-"), "Authorization should contain one of the tokens")
})
// 测试单个 token 时始终使用该 token
t.Run("single token always used", func(t *testing.T) {
host, status := test.NewTestHost(singleTokenOpenAIConfig)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/responses"},
{":method", "POST"},
{"Content-Type", "application/json"},
{"x-mse-consumer", "consumer-test"},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
authValue, _ := test.GetHeaderValue(requestHeaders, "Authorization")
require.Contains(t, authValue, "sk-single-token", "Single token should always be used")
})
// 测试同一 consumer 多次请求获得相同 tokenconsumer affinity 一致性)
t.Run("same consumer gets consistent token across requests", func(t *testing.T) {
consumer := "consumer-consistency-test"
var firstToken string
// 运行 5 次请求,验证同一个 consumer 始终获得相同的 token
for i := 0; i < 5; i++ {
host, status := test.NewTestHost(multiTokenOpenAIConfig)
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/responses"},
{":method", "POST"},
{"Content-Type", "application/json"},
{"x-mse-consumer", consumer},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
authValue, hasAuth := test.GetHeaderValue(requestHeaders, "Authorization")
require.True(t, hasAuth, "Authorization header should exist")
require.True(t, strings.Contains(authValue, "sk-token-"), "Should use one of the configured tokens")
if i == 0 {
firstToken = authValue
} else {
require.Equal(t, firstToken, authValue, "Same consumer should get same token consistently (consumer affinity)")
}
host.Reset()
}
})
// 测试不同 consumer 可能获得不同 token
t.Run("different consumers get tokens based on hash", func(t *testing.T) {
tokens := make(map[string]string)
consumers := []string{"consumer-alpha", "consumer-beta", "consumer-gamma", "consumer-delta", "consumer-epsilon"}
for _, consumer := range consumers {
host, status := test.NewTestHost(multiTokenOpenAIConfig)
require.Equal(t, types.OnPluginStartStatusOK, status)
action := host.CallOnHttpRequestHeaders([][2]string{
{":authority", "example.com"},
{":path", "/v1/responses"},
{":method", "POST"},
{"Content-Type", "application/json"},
{"x-mse-consumer", consumer},
})
require.Equal(t, types.HeaderStopIteration, action)
requestHeaders := host.GetRequestHeaders()
authValue, _ := test.GetHeaderValue(requestHeaders, "Authorization")
tokens[consumer] = authValue
host.Reset()
}
// 验证至少使用了多个不同的 tokenhash 分布)
uniqueTokens := make(map[string]bool)
for _, token := range tokens {
uniqueTokens[token] = true
}
require.GreaterOrEqual(t, len(uniqueTokens), 2, "Different consumers should use at least 2 different tokens")
})
})
}

View File

@@ -24,6 +24,8 @@ description: AI可观测配置参考
| 名称 | 数据类型 | 填写要求 | 默认值 | 描述 |
|----------------|-------|------|-----|------------------------|
| `use_default_attributes` | bool | 非必填 | false | 是否使用默认完整属性配置,包含 messages、answer、question 等所有字段。适用于调试、审计场景 |
| `use_default_response_attributes` | bool | 非必填 | false | 是否使用轻量级默认属性配置(推荐),包含 model 和 token 统计,不缓冲流式响应体。适用于高并发生产环境 |
| `attributes` | []Attribute | 非必填 | - | 用户希望记录在log/span中的信息 |
| `disable_openai_usage` | bool | 非必填 | false | 非openai兼容协议时model、token的支持非标配置为true时可以避免报错 |
| `value_length_limit` | int | 非必填 | 4000 | 记录的单个value的长度限制 |
@@ -67,6 +69,7 @@ Attribute 配置说明:
| 内置属性键 | 说明 | 适用场景 |
|---------|------|---------|
| `question` | 用户提问内容 | 支持 OpenAI/Claude 消息格式 |
| `system` | 系统提示词 | 支持 Claude `/v1/messages` 的顶层 system 字段 |
| `answer` | AI 回答内容 | 支持 OpenAI/Claude 消息格式,流式和非流式 |
| `tool_calls` | 工具调用信息 | OpenAI/Claude 工具调用 |
| `reasoning` | 推理过程 | OpenAI o1 等推理模型 |
@@ -332,6 +335,195 @@ attributes:
2. **性能分析**:分析推理 token 占比,评估推理模型的实际开销
3. **使用统计**:细粒度统计各类 token 的使用情况
## 流式响应观测能力
流式Streaming响应是 AI 对话的常见场景,插件提供了完善的流式观测支持,能够正确拼接和提取流式响应中的关键信息。
### 流式响应的挑战
流式响应将完整内容拆分为多个 SSE chunk 逐步返回,例如:
```
data: {"choices":[{"delta":{"content":"Hello"}}]}
data: {"choices":[{"delta":{"content":" 👋"}}]}
data: {"choices":[{"delta":{"content":"!"}}]}
data: [DONE]
```
要获取完整的回答内容,需要将各个 chunk 中的 `delta.content` 拼接起来。
### 自动拼接机制
插件针对不同类型的内容提供了自动拼接能力:
| 内容类型 | 拼接方式 | 说明 |
|---------|---------|------|
| `answer` | 文本追加append | 将各 chunk 的 `delta.content` 按顺序拼接成完整回答 |
| `reasoning` | 文本追加append | 将各 chunk 的 `delta.reasoning_content` 按顺序拼接 |
| `tool_calls` | 按 index 组装 | 识别每个工具调用的 `index`,分别拼接各自的 `arguments` |
#### answer 和 reasoning 拼接示例
流式响应:
```
data: {"choices":[{"delta":{"content":"你好"}}]}
data: {"choices":[{"delta":{"content":",我是"}}]}
data: {"choices":[{"delta":{"content":"AI助手"}}]}
```
最终提取的 `answer``"你好我是AI助手"`
#### tool_calls 拼接示例
流式响应(多个并行工具调用):
```
data: {"choices":[{"delta":{"tool_calls":[{"index":0,"id":"call_001","function":{"name":"get_weather"}}]}}]}
data: {"choices":[{"delta":{"tool_calls":[{"index":1,"id":"call_002","function":{"name":"get_time"}}]}}]}
data: {"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\"city\":"}}]}}]}
data: {"choices":[{"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"Beijing\"}"}}]}}]}
data: {"choices":[{"delta":{"tool_calls":[{"index":1,"function":{"arguments":"{\"city\":\"Shanghai\"}"}}]}}]}
```
最终提取的 `tool_calls`
```json
[
{"index":0,"id":"call_001","function":{"name":"get_weather","arguments":"{\"city\":\"Beijing\"}"}},
{"index":1,"id":"call_002","function":{"name":"get_time","arguments":"{\"city\":\"Shanghai\"}"}}
]
```
### 使用默认配置快速启用
插件提供两种默认配置模式:
#### 轻量模式(推荐用于生产环境)
通过 `use_default_response_attributes: true` 启用轻量模式:
```yaml
use_default_response_attributes: true
```
此配置是**推荐的生产环境配置**,特别适合高并发、高延迟的场景:
| 字段 | 说明 |
|------|------|
| `model` | 模型名称(从请求体提取) |
| `reasoning_tokens` | 推理 token 数 |
| `cached_tokens` | 缓存命中 token 数 |
| `input_token_details` | 输入 token 详情 |
| `output_token_details` | 输出 token 详情 |
**为什么推荐轻量模式?**
LLM 请求有两个显著特点:**延迟高**(通常数秒到数十秒)和**请求体大**(多轮对话可能达到数百 KB 甚至 MB 级别)。
在高并发场景下,如果请求体和响应体都被缓存在内存中,积压的请求会占用大量内存:
- 假设 QPS=100平均延迟=10秒请求体=500KB
- 同时在处理的请求数 ≈ 100 × 10 = 1000 个
- 如果缓存完整请求体+响应体1000 × 1.5MB ≈ **1.5GB 内存**
轻量模式通过以下方式降低内存占用:
- **缓冲请求体**:仅用于提取 `model` 字段(很小),不提取 `question``system``messages` 等大字段
- **不缓冲流式响应体**:不提取 `answer``reasoning``tool_calls` 等需要完整响应的字段
- **只统计 token**:从响应的 usage 字段提取 token 信息
**内存对比:**
| 场景 | 完整模式 | 轻量模式 |
|------|----------|----------|
| 单次请求 (1MB 请求 + 500KB 响应) | ~1.5MB | ~1MB请求体 |
| 高并发 (100 QPS, 10s 延迟) | ~1.5GB | ~1GB |
| 超高并发 (1000 QPS, 10s 延迟) | ~15GB | ~10GB |
**注意**:轻量模式下 `chat_round` 字段会正常计算,`model` 会从请求体正常提取。
#### 完整模式
通过 `use_default_attributes: true` 可以一键启用完整的流式观测能力:
```yaml
use_default_attributes: true
```
此配置会自动记录以下字段,**但会缓冲完整的请求体和流式响应体**
| 字段 | 说明 | 内存影响 |
|------|------|----------|
| `messages` | 完整对话历史 | ⚠️ 可能很大 |
| `question` | 最后一条用户消息 | 需要缓冲请求体 |
| `system` | 系统提示词 | 需要缓冲请求体 |
| `answer` | AI 回答(自动拼接流式 chunk | ⚠️ 需要缓冲响应体 |
| `reasoning` | 推理过程(自动拼接流式 chunk | ⚠️ 需要缓冲响应体 |
| `tool_calls` | 工具调用(自动按 index 组装) | 需要缓冲响应体 |
| `reasoning_tokens` | 推理 token 数 | 无 |
| `cached_tokens` | 缓存命中 token 数 | 无 |
| `input_token_details` | 输入 token 详情 | 无 |
| `output_token_details` | 输出 token 详情 | 无 |
**注意**:完整模式适用于调试、审计等需要完整对话记录的场景,但在高并发生产环境可能消耗大量内存。
### 流式日志示例
启用默认配置后,一个流式请求的日志输出示例:
```json
{
"answer": "2 plus 2 equals 4.",
"question": "What is 2+2?",
"response_type": "stream",
"tool_calls": null,
"reasoning": null,
"model": "glm-4-flash",
"input_token": 10,
"output_token": 8,
"llm_first_token_duration": 425,
"llm_service_duration": 985,
"chat_id": "chat_abc123"
}
```
包含工具调用的流式日志示例:
```json
{
"answer": null,
"question": "What's the weather in Beijing?",
"response_type": "stream",
"tool_calls": [
{
"id": "call_abc123",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"location\": \"Beijing\"}"
}
}
],
"model": "glm-4-flash",
"input_token": 50,
"output_token": 15,
"llm_first_token_duration": 300,
"llm_service_duration": 500
}
```
### 流式特有指标
流式响应会额外记录以下指标:
- `llm_first_token_duration`:从请求发出到收到首个 token 的时间(首字延迟)
- `llm_stream_duration_count`:流式请求次数
可用于监控流式响应的用户体验:
```promql
# 平均首字延迟
irate(route_upstream_model_consumer_metric_llm_first_token_duration[5m])
/
irate(route_upstream_model_consumer_metric_llm_stream_duration_count[5m])
```
## 调试
### 验证 ai_log 内容

View File

@@ -105,6 +105,7 @@ const (
BuiltinAnswerKey = "answer"
BuiltinToolCallsKey = "tool_calls"
BuiltinReasoningKey = "reasoning"
BuiltinSystemKey = "system"
BuiltinReasoningTokens = "reasoning_tokens"
BuiltinCachedTokens = "cached_tokens"
BuiltinInputTokenDetails = "input_token_details"
@@ -115,6 +116,9 @@ const (
QuestionPathOpenAI = "messages.@reverse.0.content"
QuestionPathClaude = "messages.@reverse.0.content" // Claude uses same format
// System prompt paths (from request body)
SystemPathClaude = "system" // Claude /v1/messages has system as a top-level field
// Answer paths (from response body - non-streaming)
AnswerPathOpenAINonStreaming = "choices.0.message.content"
AnswerPathClaudeNonStreaming = "content.0.text"
@@ -123,10 +127,19 @@ const (
AnswerPathOpenAIStreaming = "choices.0.delta.content"
AnswerPathClaudeStreaming = "delta.text"
// Tool calls paths
// Tool calls paths (OpenAI format)
ToolCallsPathNonStreaming = "choices.0.message.tool_calls"
ToolCallsPathStreaming = "choices.0.delta.tool_calls"
// Claude/Anthropic tool calls paths (streaming)
ClaudeEventType = "type"
ClaudeContentBlockType = "content_block.type"
ClaudeContentBlockID = "content_block.id"
ClaudeContentBlockName = "content_block.name"
ClaudeContentBlockInput = "content_block.input"
ClaudeDeltaPartialJSON = "delta.partial_json"
ClaudeIndex = "index"
// Reasoning paths
ReasoningPathNonStreaming = "choices.0.message.reasoning_content"
ReasoningPathStreaming = "choices.0.delta.reasoning_content"
@@ -136,6 +149,7 @@ const (
)
// getDefaultAttributes returns the default attributes configuration for empty config
// This includes all attributes but may consume significant memory for large conversations
func getDefaultAttributes() []Attribute {
return []Attribute{
// Extract complete conversation history from request body
@@ -150,13 +164,19 @@ func getDefaultAttributes() []Attribute {
Key: BuiltinQuestionKey,
ApplyToLog: true,
},
{
Key: BuiltinSystemKey,
ApplyToLog: true,
},
{
Key: BuiltinAnswerKey,
ApplyToLog: true,
Rule: RuleAppend, // Streaming responses need to append content from all chunks
},
{
Key: BuiltinReasoningKey,
ApplyToLog: true,
Rule: RuleAppend, // Streaming responses need to append content from all chunks
},
{
Key: BuiltinToolCallsKey,
@@ -183,6 +203,34 @@ func getDefaultAttributes() []Attribute {
}
}
// getDefaultResponseAttributes returns a lightweight default attributes configuration
// for production environments with high concurrency and high latency.
// - Buffers request body for model extraction (small, essential field)
// - Does NOT extract large fields like question, system, messages
// - Does NOT buffer streaming response body (no answer, reasoning, tool_calls)
// - Only extracts token statistics from response context
func getDefaultResponseAttributes() []Attribute {
return []Attribute{
// Token statistics (extracted from context, no body buffering needed)
{
Key: BuiltinReasoningTokens,
ApplyToLog: true,
},
{
Key: BuiltinCachedTokens,
ApplyToLog: true,
},
{
Key: BuiltinInputTokenDetails,
ApplyToLog: true,
},
{
Key: BuiltinOutputTokenDetails,
ApplyToLog: true,
},
}
}
// Default session ID headers in priority order
var defaultSessionHeaders = []string{
"x-openclaw-session-key",
@@ -225,14 +273,18 @@ type ToolCallFunction struct {
// StreamingToolCallsBuffer holds the state for assembling streaming tool calls
type StreamingToolCallsBuffer struct {
ToolCalls map[int]*ToolCall // keyed by index
ToolCalls map[int]*ToolCall // keyed by index (OpenAI format)
InToolBlock map[int]bool // tracks which indices are in tool_use blocks (Claude format)
ArgumentsBuffer map[int]string // buffers partial JSON arguments (Claude format)
}
// extractStreamingToolCalls extracts and assembles tool calls from streaming response chunks
// extractStreamingToolCalls extracts and assembles tool calls from streaming response chunks (OpenAI format)
func extractStreamingToolCalls(data []byte, buffer *StreamingToolCallsBuffer) *StreamingToolCallsBuffer {
if buffer == nil {
buffer = &StreamingToolCallsBuffer{
ToolCalls: make(map[int]*ToolCall),
ToolCalls: make(map[int]*ToolCall),
InToolBlock: make(map[int]bool),
ArgumentsBuffer: make(map[int]string),
}
}
@@ -273,6 +325,86 @@ func extractStreamingToolCalls(data []byte, buffer *StreamingToolCallsBuffer) *S
return buffer
}
// extractClaudeStreamingToolCalls extracts and assembles tool calls from Claude/Anthropic streaming response chunks
// Claude format uses events: content_block_start, content_block_delta, content_block_stop
func extractClaudeStreamingToolCalls(data []byte, buffer *StreamingToolCallsBuffer) *StreamingToolCallsBuffer {
if buffer == nil {
buffer = &StreamingToolCallsBuffer{
ToolCalls: make(map[int]*ToolCall),
InToolBlock: make(map[int]bool),
ArgumentsBuffer: make(map[int]string),
}
}
chunks := bytes.Split(bytes.TrimSpace(wrapper.UnifySSEChunk(data)), []byte("\n\n"))
for _, chunk := range chunks {
// Get event type
eventType := gjson.GetBytes(chunk, ClaudeEventType)
if !eventType.Exists() {
continue
}
switch eventType.String() {
case "content_block_start":
// Check if this is a tool_use block
contentBlockType := gjson.GetBytes(chunk, ClaudeContentBlockType)
if contentBlockType.Exists() && contentBlockType.String() == "tool_use" {
index := int(gjson.GetBytes(chunk, ClaudeIndex).Int())
// Create tool call entry
tc := &ToolCall{Index: index}
// Extract id and name
if id := gjson.GetBytes(chunk, ClaudeContentBlockID).String(); id != "" {
tc.ID = id
}
if name := gjson.GetBytes(chunk, ClaudeContentBlockName).String(); name != "" {
tc.Function.Name = name
}
tc.Type = "tool_use"
buffer.ToolCalls[index] = tc
buffer.InToolBlock[index] = true
buffer.ArgumentsBuffer[index] = ""
// Try to extract initial input if present
if input := gjson.GetBytes(chunk, ClaudeContentBlockInput); input.Exists() {
if inputMap, ok := input.Value().(map[string]interface{}); ok {
if jsonBytes, err := json.Marshal(inputMap); err == nil {
buffer.ArgumentsBuffer[index] = string(jsonBytes)
}
}
}
}
case "content_block_delta":
// Check if we're in a tool block
index := int(gjson.GetBytes(chunk, ClaudeIndex).Int())
if buffer.InToolBlock[index] {
// Accumulate partial JSON arguments
partialJSON := gjson.GetBytes(chunk, ClaudeDeltaPartialJSON)
if partialJSON.Exists() {
buffer.ArgumentsBuffer[index] += partialJSON.String()
}
}
case "content_block_stop":
// Finalize the tool call if we were in a tool block
index := int(gjson.GetBytes(chunk, ClaudeIndex).Int())
if buffer.InToolBlock[index] {
buffer.InToolBlock[index] = false
// Parse accumulated arguments and set them
if tc, exists := buffer.ToolCalls[index]; exists {
tc.Function.Arguments = buffer.ArgumentsBuffer[index]
}
}
}
}
return buffer
}
// getToolCallsFromBuffer converts the buffer to a sorted slice of tool calls
func getToolCallsFromBuffer(buffer *StreamingToolCallsBuffer) []ToolCall {
if buffer == nil || len(buffer.ToolCalls) == 0 {
@@ -317,6 +449,8 @@ type AIStatisticsConfig struct {
attributes []Attribute
// If there exist attributes extracted from streaming body, chunks should be buffered
shouldBufferStreamingBody bool
// If there exist attributes extracted from request body, request body should be buffered
shouldBufferRequestBody bool
// If disableOpenaiUsage is true, model/input_token/output_token logs will be skipped
disableOpenaiUsage bool
valueLengthLimit int
@@ -411,6 +545,8 @@ func isContentTypeEnabled(contentType string, enabledContentTypes []string) bool
func parseConfig(configJson gjson.Result, config *AIStatisticsConfig) error {
// Check if use_default_attributes is enabled
useDefaultAttributes := configJson.Get("use_default_attributes").Bool()
// Check if use_default_response_attributes is enabled (lightweight mode)
useDefaultResponseAttributes := configJson.Get("use_default_response_attributes").Bool()
// Parse tracing span attributes setting.
attributeConfigs := configJson.Get("attributes").Array()
@@ -430,6 +566,13 @@ func parseConfig(configJson gjson.Result, config *AIStatisticsConfig) error {
config.valueLengthLimit = 10485760 // 10MB
}
log.Infof("Using default attributes configuration")
} else if useDefaultResponseAttributes {
config.attributes = getDefaultResponseAttributes()
// Use a reasonable default for lightweight mode
if !configJson.Get("value_length_limit").Exists() {
config.valueLengthLimit = 4000
}
log.Infof("Using default response attributes configuration (lightweight mode)")
} else {
config.attributes = make([]Attribute, len(attributeConfigs))
for i, attributeConfig := range attributeConfigs {
@@ -439,15 +582,38 @@ func parseConfig(configJson gjson.Result, config *AIStatisticsConfig) error {
log.Errorf("parse config failed, %v", err)
return err
}
if attribute.ValueSource == ResponseStreamingBody {
config.shouldBufferStreamingBody = true
}
if attribute.Rule != "" && attribute.Rule != RuleFirst && attribute.Rule != RuleReplace && attribute.Rule != RuleAppend {
return errors.New("value of rule must be one of [nil, first, replace, append]")
}
config.attributes[i] = attribute
}
}
// Check if any attribute needs request body or streaming body buffering
for _, attribute := range config.attributes {
// Check for request body buffering
if attribute.ValueSource == RequestBody {
config.shouldBufferRequestBody = true
}
// Check for streaming body buffering (explicitly configured)
if attribute.ValueSource == ResponseStreamingBody {
config.shouldBufferStreamingBody = true
}
// For built-in attributes without explicit ValueSource, check default sources
if attribute.ValueSource == "" && isBuiltinAttribute(attribute.Key) {
defaultSources := getBuiltinAttributeDefaultSources(attribute.Key)
for _, src := range defaultSources {
if src == RequestBody {
config.shouldBufferRequestBody = true
}
// Only answer/reasoning/tool_calls need actual body buffering
// Token-related attributes are extracted from context, not from body
if src == ResponseStreamingBody && needsBodyBuffering(attribute.Key) {
config.shouldBufferStreamingBody = true
}
}
}
}
// Metric settings
config.counterMetrics = make(map[string]proxywasm.MetricCounter)
@@ -458,8 +624,8 @@ func parseConfig(configJson gjson.Result, config *AIStatisticsConfig) error {
pathSuffixes := configJson.Get("enable_path_suffixes").Array()
config.enablePathSuffixes = make([]string, 0, len(pathSuffixes))
// If use_default_attributes is enabled and enable_path_suffixes is not configured, use default path suffixes
if useDefaultAttributes && !configJson.Get("enable_path_suffixes").Exists() {
// If use_default_attributes or use_default_response_attributes is enabled and enable_path_suffixes is not configured, use default path suffixes
if (useDefaultAttributes || useDefaultResponseAttributes) && !configJson.Get("enable_path_suffixes").Exists() {
config.enablePathSuffixes = []string{"/completions", "/messages"}
log.Infof("Using default path suffixes: /completions, /messages")
} else {
@@ -527,6 +693,8 @@ func onHttpRequestHeaders(ctx wrapper.HttpContext, config AIStatisticsConfig) ty
ctx.SetContext(ConsumerKey, consumer)
}
// Always buffer request body to extract model field
// This is essential for metrics and logging
ctx.SetRequestBodyBufferLimit(defaultMaxBodyBytes)
// Extract session ID from headers
@@ -551,13 +719,21 @@ func onHttpRequestBody(ctx wrapper.HttpContext, config AIStatisticsConfig, body
return types.ActionContinue
}
// Set user defined log & span attributes.
setAttributeBySource(ctx, config, RequestBody, body)
// Set span attributes for ARMS.
// Only process request body if we need to extract attributes from it
if config.shouldBufferRequestBody && len(body) > 0 {
// Set user defined log & span attributes.
setAttributeBySource(ctx, config, RequestBody, body)
}
// Extract model from request body if available, otherwise try path
requestModel := "UNKNOWN"
if model := gjson.GetBytes(body, "model"); model.Exists() {
requestModel = model.String()
} else {
if len(body) > 0 {
if model := gjson.GetBytes(body, "model"); model.Exists() {
requestModel = model.String()
}
}
// If model not found in body, try to extract from path (Gemini style)
if requestModel == "UNKNOWN" {
requestPath := ctx.GetStringContext(RequestPath, "")
if strings.Contains(requestPath, "generateContent") || strings.Contains(requestPath, "streamGenerateContent") { // Google Gemini GenerateContent
reg := regexp.MustCompile(`^.*/(?P<api_version>[^/]+)/models/(?P<model>[^:]+):\w+Content$`)
@@ -569,21 +745,23 @@ func onHttpRequestBody(ctx wrapper.HttpContext, config AIStatisticsConfig, body
}
ctx.SetContext(tokenusage.CtxKeyRequestModel, requestModel)
setSpanAttribute(ArmsRequestModel, requestModel)
// Set the number of conversation rounds
// Set the number of conversation rounds (only if body is available)
userPromptCount := 0
if messages := gjson.GetBytes(body, "messages"); messages.Exists() && messages.IsArray() {
// OpenAI and Claude/Anthropic format - both use "messages" array with "role" field
for _, msg := range messages.Array() {
if msg.Get("role").String() == "user" {
userPromptCount += 1
if len(body) > 0 {
if messages := gjson.GetBytes(body, "messages"); messages.Exists() && messages.IsArray() {
// OpenAI and Claude/Anthropic format - both use "messages" array with "role" field
for _, msg := range messages.Array() {
if msg.Get("role").String() == "user" {
userPromptCount += 1
}
}
}
} else if contents := gjson.GetBytes(body, "contents"); contents.Exists() && contents.IsArray() {
// Google Gemini GenerateContent
for _, content := range contents.Array() {
if !content.Get("role").Exists() || content.Get("role").String() == "user" {
userPromptCount += 1
} else if contents := gjson.GetBytes(body, "contents"); contents.Exists() && contents.IsArray() {
// Google Gemini GenerateContent
for _, content := range contents.Array() {
if !content.Get("role").Exists() || content.Get("role").String() == "user" {
userPromptCount += 1
}
}
}
}
@@ -680,14 +858,14 @@ func onHttpStreamingBody(ctx wrapper.HttpContext, config AIStatisticsConfig, dat
responseEndTime := time.Now().UnixMilli()
ctx.SetUserAttribute(LLMServiceDuration, responseEndTime-requestStartTime)
// Set user defined log & span attributes.
// Set user defined log & span attributes from streaming body.
// Always call setAttributeBySource even if shouldBufferStreamingBody is false,
// because token-related attributes are extracted from context (not buffered body).
var streamingBodyBuffer []byte
if config.shouldBufferStreamingBody {
streamingBodyBuffer, ok := ctx.GetContext(CtxStreamingBodyBuffer).([]byte)
if !ok {
return data
}
setAttributeBySource(ctx, config, ResponseStreamingBody, streamingBodyBuffer)
streamingBodyBuffer, _ = ctx.GetContext(CtxStreamingBodyBuffer).([]byte)
}
setAttributeBySource(ctx, config, ResponseStreamingBody, streamingBodyBuffer)
// Write log
debugLogAiLog(ctx)
@@ -849,21 +1027,32 @@ func setAttributeBySource(ctx wrapper.HttpContext, config AIStatisticsConfig, so
// isBuiltinAttribute checks if the given key is a built-in attribute
func isBuiltinAttribute(key string) bool {
return key == BuiltinQuestionKey || key == BuiltinAnswerKey || key == BuiltinToolCallsKey || key == BuiltinReasoningKey ||
return key == BuiltinQuestionKey || key == BuiltinAnswerKey || key == BuiltinToolCallsKey || key == BuiltinReasoningKey || key == BuiltinSystemKey ||
key == BuiltinReasoningTokens || key == BuiltinCachedTokens ||
key == BuiltinInputTokenDetails || key == BuiltinOutputTokenDetails
}
// needsBodyBuffering checks if a built-in attribute needs body buffering
// Token-related attributes are extracted from context (set by tokenusage.GetTokenUsage),
// so they don't require buffering the response body.
func needsBodyBuffering(key string) bool {
return key == BuiltinAnswerKey || key == BuiltinToolCallsKey || key == BuiltinReasoningKey
}
// getBuiltinAttributeDefaultSources returns the default value_source(s) for a built-in attribute
// Returns nil if the key is not a built-in attribute
// Note: Token-related attributes are extracted from context (set by tokenusage.GetTokenUsage),
// so they don't require body buffering even though they're processed during response phase.
func getBuiltinAttributeDefaultSources(key string) []string {
switch key {
case BuiltinQuestionKey:
case BuiltinQuestionKey, BuiltinSystemKey:
return []string{RequestBody}
case BuiltinAnswerKey, BuiltinToolCallsKey, BuiltinReasoningKey:
return []string{ResponseStreamingBody, ResponseBody}
case BuiltinReasoningTokens, BuiltinCachedTokens, BuiltinInputTokenDetails, BuiltinOutputTokenDetails:
// Token details are only available after response is received
// Token details are extracted from context (set by tokenusage.GetTokenUsage),
// not from body parsing. We use ResponseStreamingBody/ResponseBody to indicate
// they should be processed during response phase, but they don't require body buffering.
return []string{ResponseStreamingBody, ResponseBody}
default:
return nil
@@ -896,6 +1085,13 @@ func getBuiltinAttributeFallback(ctx wrapper.HttpContext, config AIStatisticsCon
return value
}
}
case BuiltinSystemKey:
if source == RequestBody {
// Try Claude /v1/messages format (system is a top-level field)
if value := gjson.GetBytes(body, SystemPathClaude).Value(); value != nil && value != "" {
return value
}
}
case BuiltinAnswerKey:
if source == ResponseStreamingBody {
// Try OpenAI format first
@@ -923,7 +1119,10 @@ func getBuiltinAttributeFallback(ctx wrapper.HttpContext, config AIStatisticsCon
if existingBuffer, ok := ctx.GetContext(CtxStreamingToolCallsBuffer).(*StreamingToolCallsBuffer); ok {
buffer = existingBuffer
}
// Try OpenAI format first
buffer = extractStreamingToolCalls(body, buffer)
// Also try Claude format (both formats can be checked)
buffer = extractClaudeStreamingToolCalls(body, buffer)
ctx.SetContext(CtxStreamingToolCallsBuffer, buffer)
// Also set tool_calls to user attributes so they appear in ai_log
@@ -1047,6 +1246,9 @@ func debugLogAiLog(ctx wrapper.HttpContext) {
if question := ctx.GetUserAttribute("question"); question != nil {
userAttrs["question"] = question
}
if system := ctx.GetUserAttribute("system"); system != nil {
userAttrs["system"] = system
}
if answer := ctx.GetUserAttribute("answer"); answer != nil {
userAttrs["answer"] = answer
}

View File

@@ -1,10 +1,14 @@
module jsonrpc-converter
go 1.24.3
go 1.24.1
replace github.com/alibaba/higress/plugins/wasm-go/pkg/mcp => ../../pkg/mcp
require (
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20250822030947-8345453fddd0
github.com/higress-group/wasm-go v1.0.4
github.com/alibaba/higress/plugins/wasm-go/pkg/mcp v0.0.0
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20251103120604-77e9cce339d2
github.com/higress-group/wasm-go v1.0.10-0.20260115123534-84ef43c39dc9
github.com/stretchr/testify v1.9.0
github.com/tidwall/gjson v1.18.0
)
@@ -15,6 +19,7 @@ require (
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/higress-group/gjson_template v0.0.0-20250413075336-4c4161ed428b // indirect
github.com/huandu/xstrings v1.5.0 // indirect
@@ -22,8 +27,10 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/tetratelabs/wazero v1.7.2 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tidwall/resp v0.1.1 // indirect

View File

@@ -20,10 +20,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/higress-group/gjson_template v0.0.0-20250413075336-4c4161ed428b h1:rRI9+ThQbe+nw4jUiYEyOFaREkXCMMW9k1X2gy2d6pE=
github.com/higress-group/gjson_template v0.0.0-20250413075336-4c4161ed428b/go.mod h1:rU3M+Tq5VrQOo0dxpKHGb03Ty0sdWIZfAH+YCOACx/Y=
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20250822030947-8345453fddd0 h1:YGdj8KBzVjabU3STUfwMZghB+VlX6YLfJtLbrsWaOD0=
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20250822030947-8345453fddd0/go.mod h1:tRI2LfMudSkKHhyv1uex3BWzcice2s/l8Ah8axporfA=
github.com/higress-group/wasm-go v1.0.4 h1:/GqbzCw4oWqJc8UbKEfF94E3/+4CPZGbzxpKo2L3Ldk=
github.com/higress-group/wasm-go v1.0.4/go.mod h1:B8C6+OlpnyYyZUBEdUXA7tYZYD+uwZTNjfkE5FywA+A=
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20251103120604-77e9cce339d2 h1:NY33OrWCJJ+DFiLc+lsBY4Ywor2Ik61ssk6qkGF8Ypo=
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20251103120604-77e9cce339d2/go.mod h1:tRI2LfMudSkKHhyv1uex3BWzcice2s/l8Ah8axporfA=
github.com/higress-group/wasm-go v1.0.10-0.20260115123534-84ef43c39dc9 h1:sUuUXZwr50l3W1St7MESlFmxmUAu+QUNNfJXx4P6bas=
github.com/higress-group/wasm-go v1.0.10-0.20260115123534-84ef43c39dc9/go.mod h1:uKVYICbRaxTlKqdm8E0dpjbysxM8uCPb9LV26hF3Km8=
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E=
@@ -49,6 +49,8 @@ github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tetratelabs/wazero v1.7.2 h1:1+z5nXJNwMLPAWaTePFi49SSTL0IMx/i3Fg8Yc25GDc=
github.com/tetratelabs/wazero v1.7.2/go.mod h1:ytl6Zuh20R/eROuyDaGPkp82O9C/DJfXAwJfQ3X6/7Y=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=

View File

@@ -9,8 +9,8 @@ import (
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm/types"
"github.com/higress-group/wasm-go/pkg/log"
"github.com/higress-group/wasm-go/pkg/mcp"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
"github.com/higress-group/wasm-go/pkg/wrapper"
"github.com/tidwall/gjson"
)

View File

@@ -1,9 +1,15 @@
package main
import (
"encoding/json"
"testing"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm/types"
"github.com/higress-group/wasm-go/pkg/test"
"github.com/stretchr/testify/require"
)
// TestTruncateString tests the truncateString function
func TestTruncateString(t *testing.T) {
tests := []struct {
name string
@@ -14,6 +20,8 @@ func TestTruncateString(t *testing.T) {
{"Short String", "Higress Is an AI-Native API Gateway", 1000, "Higress Is an AI-Native API Gateway"},
{"Exact Length", "Higress Is an AI-Native API Gateway", 35, "Higress Is an AI-Native API Gateway"},
{"Truncated String", "Higress Is an AI-Native API Gateway", 20, "Higress Is...(truncated)...PI Gateway"},
{"Empty String", "", 10, ""},
{"Single Char", "A", 10, "A"},
}
for _, tt := range tests {
@@ -26,3 +34,248 @@ func TestTruncateString(t *testing.T) {
})
}
}
// TestIsPreRequestStage tests the isPreRequestStage function
func TestIsPreRequestStage(t *testing.T) {
config := McpConverterConfig{Stage: ProcessRequest}
require.True(t, isPreRequestStage(config))
config = McpConverterConfig{Stage: ProcessResponse}
require.False(t, isPreRequestStage(config))
}
// TestIsPreResponseStage tests the isPreResponseStage function
func TestIsPreResponseStage(t *testing.T) {
config := McpConverterConfig{Stage: ProcessResponse}
require.True(t, isPreResponseStage(config))
config = McpConverterConfig{Stage: ProcessRequest}
require.False(t, isPreResponseStage(config))
}
// TestIsMethodAllowed tests the isMethodAllowed function
func TestIsMethodAllowed(t *testing.T) {
config := McpConverterConfig{AllowedMethods: []string{MethodToolList, MethodToolCall}}
require.True(t, isMethodAllowed(config, MethodToolList))
require.True(t, isMethodAllowed(config, MethodToolCall))
require.False(t, isMethodAllowed(config, "invalid/method"))
}
// TestConstants tests the constant values
func TestConstants(t *testing.T) {
require.Equal(t, "x-envoy-jsonrpc-id", JsonRpcId)
require.Equal(t, "x-envoy-jsonrpc-method", JsonRpcMethod)
require.Equal(t, "x-envoy-jsonrpc-params", JsonRpcParams)
require.Equal(t, "x-envoy-jsonrpc-result", JsonRpcResult)
require.Equal(t, "x-envoy-jsonrpc-error", JsonRpcError)
require.Equal(t, "x-envoy-mcp-tool-name", McpToolName)
require.Equal(t, "x-envoy-mcp-tool-arguments", McpToolArguments)
require.Equal(t, "x-envoy-mcp-tool-response", McpToolResponse)
require.Equal(t, "x-envoy-mcp-tool-error", McpToolError)
require.Equal(t, 4000, DefaultMaxHeaderLength)
require.Equal(t, "tools/list", MethodToolList)
require.Equal(t, "tools/call", MethodToolCall)
require.Equal(t, ProcessStage("request"), ProcessRequest)
require.Equal(t, ProcessStage("response"), ProcessResponse)
}
// TestMcpConverterConfigDefaults tests config default values
func TestMcpConverterConfigDefaults(t *testing.T) {
config := McpConverterConfig{}
require.Equal(t, 0, config.MaxHeaderLength)
require.Equal(t, ProcessStage(""), config.Stage)
require.Nil(t, config.AllowedMethods)
}
// TestProcessStage tests ProcessStage type
func TestProcessStage(t *testing.T) {
require.Equal(t, ProcessStage("request"), ProcessRequest)
require.Equal(t, ProcessStage("response"), ProcessResponse)
}
// TestRemoveJsonRpcHeadersFunction tests removeJsonRpcHeaders function logic
func TestRemoveJsonRpcHeadersFunction(t *testing.T) {
headersToRemove := []string{
JsonRpcId,
JsonRpcMethod,
JsonRpcParams,
JsonRpcResult,
McpToolName,
McpToolArguments,
McpToolResponse,
McpToolError,
}
require.Len(t, headersToRemove, 8)
}
// TestTruncateStringLong tests truncation of very long strings
func TestTruncateStringLong(t *testing.T) {
longString := ""
for i := 0; i < 5000; i++ {
longString += "a"
}
config := McpConverterConfig{MaxHeaderLength: 1000}
result := truncateString(longString, config)
require.Contains(t, result, "...(truncated)...")
require.LessOrEqual(t, len(result), 1020)
}
// TestTruncateStringWithSmallMaxLength tests truncation with small max length
func TestTruncateStringWithSmallMaxLength(t *testing.T) {
config := McpConverterConfig{MaxHeaderLength: 10}
result := truncateString("This is a very long string", config)
require.Contains(t, result, "...(truncated)...")
}
// TestPluginInit tests plugin initialization
func TestPluginInit(t *testing.T) {
configBytes, _ := json.Marshal(McpConverterConfig{
Stage: ProcessRequest,
MaxHeaderLength: DefaultMaxHeaderLength,
AllowedMethods: []string{MethodToolList, MethodToolCall},
})
host, status := test.NewTestHost(configBytes)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
}
// TestProcessJsonRpcRequest tests processJsonRpcRequest function
func TestProcessJsonRpcRequest(t *testing.T) {
configBytes, _ := json.Marshal(McpConverterConfig{
Stage: ProcessRequest,
MaxHeaderLength: DefaultMaxHeaderLength,
AllowedMethods: []string{MethodToolList, MethodToolCall},
})
host, status := test.NewTestHost(configBytes)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
host.InitHttp()
host.CallOnHttpRequestHeaders([][2]string{
{":authority", "mcp-server.example.com"},
{":method", "POST"},
{":path", "/mcp"},
{"content-type", "application/json"},
})
toolsListRequest := `{
"jsonrpc": "2.0",
"id": 1,
"method": "tools/list",
"params": {}
}`
action := host.CallOnHttpRequestBody([]byte(toolsListRequest))
require.Equal(t, types.ActionContinue, action)
host.CompleteHttp()
}
// TestProcessToolCallRequest tests processToolCallRequest function
func TestProcessToolCallRequest(t *testing.T) {
configBytes, _ := json.Marshal(McpConverterConfig{
Stage: ProcessRequest,
MaxHeaderLength: DefaultMaxHeaderLength,
AllowedMethods: []string{MethodToolCall},
})
host, status := test.NewTestHost(configBytes)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
host.InitHttp()
host.CallOnHttpRequestHeaders([][2]string{
{":authority", "mcp-server.example.com"},
{":method", "POST"},
{":path", "/mcp"},
{"content-type", "application/json"},
})
toolCallRequest := `{
"jsonrpc": "2.0",
"id": 1,
"method": "tools/call",
"params": {
"name": "test_tool",
"arguments": {"arg1": "value1"}
}
}`
action := host.CallOnHttpRequestBody([]byte(toolCallRequest))
require.Equal(t, types.ActionContinue, action)
host.CompleteHttp()
}
// TestProcessJsonRpcResponse tests processJsonRpcResponse function
func TestProcessJsonRpcResponse(t *testing.T) {
configBytes, _ := json.Marshal(McpConverterConfig{
Stage: ProcessResponse,
MaxHeaderLength: DefaultMaxHeaderLength,
AllowedMethods: []string{MethodToolList, MethodToolCall},
})
host, status := test.NewTestHost(configBytes)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
host.InitHttp()
host.CallOnHttpRequestHeaders([][2]string{
{":authority", "mcp-server.example.com"},
{":method", "POST"},
{":path", "/mcp"},
{"content-type", "application/json"},
})
responseBody := `{
"jsonrpc": "2.0",
"id": 1,
"result": {
"tools": [{"name": "test_tool"}]
}
}`
host.CallOnHttpResponseHeaders([][2]string{
{":status", "200"},
{"content-type", "application/json"},
})
host.CallOnHttpResponseBody([]byte(responseBody))
host.CompleteHttp()
}
// TestProcessToolListResponse tests processToolListResponse function
func TestProcessToolListResponse(t *testing.T) {
configBytes, _ := json.Marshal(McpConverterConfig{
Stage: ProcessResponse,
MaxHeaderLength: DefaultMaxHeaderLength,
AllowedMethods: []string{MethodToolList},
})
host, status := test.NewTestHost(configBytes)
defer host.Reset()
require.Equal(t, types.OnPluginStartStatusOK, status)
host.InitHttp()
host.CallOnHttpRequestHeaders([][2]string{
{":authority", "mcp-server.example.com"},
{":method", "POST"},
{":path", "/mcp"},
{"content-type", "application/json"},
})
responseBody := `{
"jsonrpc": "2.0",
"id": 1,
"result": {
"tools": [{"name": "test_tool"}]
}
}`
host.CallOnHttpResponseHeaders([][2]string{
{":status", "200"},
{"content-type", "application/json"},
})
host.CallOnHttpResponseBody([]byte(responseBody))
host.CompleteHttp()
}

View File

@@ -67,7 +67,7 @@ func genJWTs(keySets map[string]keySet) (jwts jwts) {
Expiry: jwt.NewNumericDate(time.Date(2034, 1, 1, 0, 0, 0, 0, time.UTC)),
NotBefore: jwt.NewNumericDate(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)),
},
"expried": {
"expired": {
Issuer: "higress-test",
Subject: "higress-test",
Audience: []string{"foo", "bar"},

View File

@@ -8,12 +8,12 @@
{
"alg": "RS256",
"token": "eyJhbGciOiJSUzI1NiIsImtpZCI6InJzYSIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsiZm9vIiwiYmFyIl0sImV4cCI6MTcwNDA2NzIwMCwiaXNzIjoiaGlncmVzcy10ZXN0IiwibmJmIjoxNzA0MDY3MjAwLCJzdWIiOiJoaWdyZXNzLXRlc3QifQ.jqzlhBPk9mmvtTT5aCYf-_5uXXSEU5bQ32fx78XeboCnjR9K1CsI4KYUIkXEX3bk66XJQUeSes7lz3gA4Yzkd-v9oADHTgpKnIxzv_5mD0_afIwEFjcalqVbSvCmro4PessQZDnmU7AIzoo3RPSqbmq8xbPVYUH9I-OO8aUu2ATd1HozgxJH1XnRU8k9KMkVW8XhvJXLKZJmnqe3Tu6pCU_tawFlBfBC4fAhMf0yX2CGE0ABAHubcdiI6JXObQmQQ9Or2a-g2a8g_Bw697PoPOsAn0YpTrHst9GcyTpkbNTAq9X8fc5EM7hiDM1FGeMYcaQTdMnOh4HBhP0p4YEhvA",
"type": "expried"
"type": "expired"
},
{
"alg": "ES256",
"token": "eyJhbGciOiJFUzI1NiIsImtpZCI6InAyNTYiLCJ0eXAiOiJKV1QifQ.eyJhdWQiOlsiZm9vIiwiYmFyIl0sImV4cCI6MTcwNDA2NzIwMCwiaXNzIjoiaGlncmVzcy10ZXN0IiwibmJmIjoxNzA0MDY3MjAwLCJzdWIiOiJoaWdyZXNzLXRlc3QifQ.9AnXd2rZ6FirHZQAoabyL4xZNz0jr-3LmcV4-pFV3JrdtUT4386Mw5Qan125fUB-rZf_ZBlv0Bft2tWY149fyg",
"type": "expried"
"type": "expired"
},
{
"alg": "ES256",

View File

@@ -2,9 +2,12 @@ module mcp-router
go 1.24.1
replace github.com/alibaba/higress/plugins/wasm-go/pkg/mcp => ../../pkg/mcp
require (
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20250611100342-5654e89a7a80
github.com/higress-group/wasm-go v1.0.2-0.20250911113549-cbf1cfcce774
github.com/alibaba/higress/plugins/wasm-go/pkg/mcp v0.0.0
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20251103120604-77e9cce339d2
github.com/higress-group/wasm-go v1.0.10-0.20260115123534-84ef43c39dc9
github.com/tidwall/gjson v1.18.0
github.com/tidwall/sjson v1.2.5
)

View File

@@ -20,12 +20,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/higress-group/gjson_template v0.0.0-20250413075336-4c4161ed428b h1:rRI9+ThQbe+nw4jUiYEyOFaREkXCMMW9k1X2gy2d6pE=
github.com/higress-group/gjson_template v0.0.0-20250413075336-4c4161ed428b/go.mod h1:rU3M+Tq5VrQOo0dxpKHGb03Ty0sdWIZfAH+YCOACx/Y=
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20250611100342-5654e89a7a80 h1:xqmtTZI0JQ2O+Lg9/CE6c+Tw9KD6FnvWw8EpLVuuvfg=
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20250611100342-5654e89a7a80/go.mod h1:tRI2LfMudSkKHhyv1uex3BWzcice2s/l8Ah8axporfA=
github.com/higress-group/wasm-go v1.0.2-0.20250807064511-eb1cd98e1f57 h1:WhNdnKSDtAQrh4Yil8HAtbl7VW+WC85m7WS8kirnHAA=
github.com/higress-group/wasm-go v1.0.2-0.20250807064511-eb1cd98e1f57/go.mod h1:9k7L730huS/q4V5iH9WLDgf5ZUHEtfhM/uXcegKDG/M=
github.com/higress-group/wasm-go v1.0.2-0.20250911113549-cbf1cfcce774 h1:2wlbNpFJCQNbPBFYgswz7Zvxo9O3L0PH0AJxwiCc5lk=
github.com/higress-group/wasm-go v1.0.2-0.20250911113549-cbf1cfcce774/go.mod h1:9k7L730huS/q4V5iH9WLDgf5ZUHEtfhM/uXcegKDG/M=
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20251103120604-77e9cce339d2 h1:NY33OrWCJJ+DFiLc+lsBY4Ywor2Ik61ssk6qkGF8Ypo=
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20251103120604-77e9cce339d2/go.mod h1:tRI2LfMudSkKHhyv1uex3BWzcice2s/l8Ah8axporfA=
github.com/higress-group/wasm-go v1.0.10-0.20260115123534-84ef43c39dc9 h1:sUuUXZwr50l3W1St7MESlFmxmUAu+QUNNfJXx4P6bas=
github.com/higress-group/wasm-go v1.0.10-0.20260115123534-84ef43c39dc9/go.mod h1:uKVYICbRaxTlKqdm8E0dpjbysxM8uCPb9LV26hF3Km8=
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E=

View File

@@ -22,8 +22,8 @@ import (
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm/types"
"github.com/higress-group/wasm-go/pkg/log"
"github.com/higress-group/wasm-go/pkg/mcp"
"github.com/higress-group/wasm-go/pkg/mcp/consts"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/consts"
"github.com/higress-group/wasm-go/pkg/wrapper"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"

View File

@@ -1,13 +1,16 @@
module all-in-one
module mcp-server
go 1.24.1
replace quark-search => ../quark-search
replace amap-tools => ../amap-tools
replace (
amap-tools => ../../mcp-servers/amap-tools
github.com/alibaba/higress/plugins/wasm-go/pkg/mcp => ../../pkg/mcp
quark-search => ../../mcp-servers/quark-search
)
require (
amap-tools v0.0.0-00010101000000-000000000000
github.com/alibaba/higress/plugins/wasm-go/pkg/mcp v0.0.0
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20251103120604-77e9cce339d2
github.com/higress-group/wasm-go v1.0.10-0.20260115123534-84ef43c39dc9
github.com/stretchr/testify v1.9.0

View File

@@ -22,10 +22,6 @@ github.com/higress-group/gjson_template v0.0.0-20250413075336-4c4161ed428b h1:rR
github.com/higress-group/gjson_template v0.0.0-20250413075336-4c4161ed428b/go.mod h1:rU3M+Tq5VrQOo0dxpKHGb03Ty0sdWIZfAH+YCOACx/Y=
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20251103120604-77e9cce339d2 h1:NY33OrWCJJ+DFiLc+lsBY4Ywor2Ik61ssk6qkGF8Ypo=
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20251103120604-77e9cce339d2/go.mod h1:tRI2LfMudSkKHhyv1uex3BWzcice2s/l8Ah8axporfA=
github.com/higress-group/wasm-go v1.0.9-0.20251223122142-eae11e33a500 h1:4BKKZ3BreIaIGub88nlvzihTK1uJmZYYoQ7r7Xkgb5Q=
github.com/higress-group/wasm-go v1.0.9-0.20251223122142-eae11e33a500/go.mod h1:uKVYICbRaxTlKqdm8E0dpjbysxM8uCPb9LV26hF3Km8=
github.com/higress-group/wasm-go v1.0.10-0.20260115083526-76699a1df2c1 h1:+usoX0B1cwECTA2qf73IaLGyCIMVopIMev5cBWGgEZk=
github.com/higress-group/wasm-go v1.0.10-0.20260115083526-76699a1df2c1/go.mod h1:uKVYICbRaxTlKqdm8E0dpjbysxM8uCPb9LV26hF3Km8=
github.com/higress-group/wasm-go v1.0.10-0.20260115123534-84ef43c39dc9 h1:sUuUXZwr50l3W1St7MESlFmxmUAu+QUNNfJXx4P6bas=
github.com/higress-group/wasm-go v1.0.10-0.20260115123534-84ef43c39dc9/go.mod h1:uKVYICbRaxTlKqdm8E0dpjbysxM8uCPb9LV26hF3Km8=
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=

View File

@@ -18,7 +18,7 @@ import (
amap "amap-tools/tools"
quark "quark-search/tools"
"github.com/higress-group/wasm-go/pkg/mcp"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp"
)
func main() {}

View File

@@ -1,14 +0,0 @@
# Use a minimal base image as we only need to store the wasm file.
FROM scratch
# Add build argument for the filter name. This will be passed by the Makefile.
ARG FILTER_NAME
# Copy the compiled WASM binary into the image's root directory.
# The wasm file will be named after the filter.
COPY ${FILTER_NAME}/main.wasm /plugin.wasm
# Metadata
LABEL org.opencontainers.image.title="${FILTER_NAME}"
LABEL org.opencontainers.image.description="Higress MCP filter - ${FILTER_NAME}"
LABEL org.opencontainers.image.source="https://github.com/alibaba/higress"

View File

@@ -1,54 +0,0 @@
# MCP Filter Makefile
# Variables
FILTER_NAME ?= mcp-router
REGISTRY ?= higress-registry.cn-hangzhou.cr.aliyuncs.com/plugins/
BUILD_TIME := $(shell date "+%Y%m%d-%H%M%S")
COMMIT_ID := $(shell git rev-parse --short HEAD 2>/dev/null)
IMAGE_TAG = $(if $(strip $(FILTER_VERSION)),${FILTER_VERSION},${BUILD_TIME}-${COMMIT_ID})
IMG ?= ${REGISTRY}${FILTER_NAME}:${IMAGE_TAG}
# Default target
.DEFAULT: build
build:
@echo "Building WASM binary for filter: ${FILTER_NAME}..."
@if [ ! -d "${FILTER_NAME}" ]; then \
echo "Error: Filter directory '${FILTER_NAME}' not found."; \
exit 1; \
fi
cd ${FILTER_NAME} && GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o main.wasm main.go
@echo ""
@echo "Output WASM file: ${FILTER_NAME}/main.wasm"
# Build Docker image (depends on build target to ensure WASM binary exists)
build-image: build
@echo "Building Docker image for ${FILTER_NAME}..."
docker build -t ${IMG} \
--build-arg FILTER_NAME=${FILTER_NAME} \
-f Dockerfile .
@echo ""
@echo "Image: ${IMG}"
# Build and push Docker image
build-push: build-image
docker push ${IMG}
# Clean build artifacts
clean:
@echo "Cleaning build artifacts for filter: ${FILTER_NAME}..."
rm -f ${FILTER_NAME}/main.wasm
# Help
help:
@echo "Available targets:"
@echo " build - Build WASM binary for a specific filter"
@echo " build-image - Build Docker image"
@echo " build-push - Build and push Docker image"
@echo " clean - Remove build artifacts for a specific filter"
@echo ""
@echo "Variables:"
@echo " FILTER_NAME - Name of the MCP filter to build (default: ${FILTER_NAME})"
@echo " REGISTRY - Docker registry (default: ${REGISTRY})"
@echo " FILTER_VERSION - Version tag for the image (default: timestamp-commit)"
@echo " IMG - Full image name (default: ${IMG})"

View File

@@ -80,8 +80,8 @@ import (
"net/http"
"my-mcp-server/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
// Define your tool structure with input parameters
@@ -145,8 +145,8 @@ For better organization, you can create a separate file to load all your tools:
package tools
import (
"github.com/higress-group/wasm-go/pkg/mcp"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
)
func LoadTools(server *mcp.MCPServer) server.Server {
@@ -170,7 +170,7 @@ import (
amap "amap-tools/tools"
quark "quark-search/tools"
"github.com/higress-group/wasm-go/pkg/mcp"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp"
)
func main() {}
@@ -375,7 +375,7 @@ package main
import (
"my-mcp-server/tools"
"github.com/higress-group/wasm-go/pkg/mcp"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp"
)
func main() {}

View File

@@ -2,9 +2,12 @@ module amap-tools
go 1.24.1
replace github.com/alibaba/higress/plugins/wasm-go/pkg/mcp => ../../pkg/mcp
require (
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20250611100342-5654e89a7a80
github.com/higress-group/wasm-go v1.0.0
github.com/alibaba/higress/plugins/wasm-go/pkg/mcp v0.0.0
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20251103120604-77e9cce339d2
github.com/higress-group/wasm-go v1.0.10-0.20260115123534-84ef43c39dc9
)
require (
@@ -23,6 +26,7 @@ require (
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/tetratelabs/wazero v1.7.2 // indirect
github.com/tidwall/gjson v1.18.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect

View File

@@ -17,7 +17,7 @@ package main
import (
"amap-tools/tools"
"github.com/higress-group/wasm-go/pkg/mcp"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp"
)
func main() {}

View File

@@ -15,8 +15,8 @@
package tools
import (
"github.com/higress-group/wasm-go/pkg/mcp"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
)
func LoadTools(server *mcp.MCPServer) server.Server {

View File

@@ -23,8 +23,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
var _ server.Tool = AroundSearchRequest{}

View File

@@ -23,8 +23,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
var _ server.Tool = BicyclingRequest{}

View File

@@ -23,8 +23,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
var _ server.Tool = DrivingRequest{}

View File

@@ -23,8 +23,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
var _ server.Tool = TransitIntegratedRequest{}

View File

@@ -23,8 +23,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
var _ server.Tool = WalkingRequest{}

View File

@@ -23,8 +23,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
var _ server.Tool = DistanceRequest{}

View File

@@ -23,8 +23,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
var _ server.Tool = GeoRequest{}

View File

@@ -24,8 +24,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm"
)

View File

@@ -23,8 +23,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
var _ server.Tool = ReGeocodeRequest{}

View File

@@ -23,8 +23,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
var _ server.Tool = SearchDetailRequest{}

View File

@@ -23,8 +23,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
var _ server.Tool = TextSearchRequest{}

View File

@@ -23,8 +23,8 @@ import (
"amap-tools/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
)
var _ server.Tool = WeatherRequest{}

View File

@@ -2,8 +2,11 @@ module quark-search
go 1.24.1
replace github.com/alibaba/higress/plugins/wasm-go/pkg/mcp => ../../pkg/mcp
require (
github.com/higress-group/wasm-go v1.0.0
github.com/alibaba/higress/plugins/wasm-go/pkg/mcp v0.0.0
github.com/higress-group/wasm-go v1.0.10-0.20260115123534-84ef43c39dc9
github.com/tidwall/gjson v1.18.0
)
@@ -16,7 +19,7 @@ require (
github.com/buger/jsonparser v1.1.1 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/higress-group/gjson_template v0.0.0-20250413075336-4c4161ed428b // indirect
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20250611100342-5654e89a7a80 // indirect
github.com/higress-group/proxy-wasm-go-sdk v0.0.0-20251103120604-77e9cce339d2 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/invopop/jsonschema v0.13.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@@ -24,6 +27,7 @@ require (
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/tetratelabs/wazero v1.7.2 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tidwall/resp v0.1.1 // indirect

View File

@@ -17,7 +17,7 @@ package main
import (
"quark-search/tools"
"github.com/higress-group/wasm-go/pkg/mcp"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp"
)
func main() {}

View File

@@ -15,8 +15,8 @@
package tools
import (
"github.com/higress-group/wasm-go/pkg/mcp"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
)
func LoadTools(server *mcp.MCPServer) server.Server {

View File

@@ -24,8 +24,8 @@ import (
"quark-search/config"
"github.com/higress-group/wasm-go/pkg/mcp/server"
"github.com/higress-group/wasm-go/pkg/mcp/utils"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/server"
"github.com/alibaba/higress/plugins/wasm-go/pkg/mcp/utils"
"github.com/tidwall/gjson"
)

View File

@@ -1,85 +0,0 @@
// Copyright (c) 2022 Alibaba Group Holding Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
type Log interface {
Trace(msg string)
Tracef(format string, args ...interface{})
Debug(msg string)
Debugf(format string, args ...interface{})
Info(msg string)
Infof(format string, args ...interface{})
Warn(msg string)
Warnf(format string, args ...interface{})
Error(msg string)
Errorf(format string, args ...interface{})
Critical(msg string)
Criticalf(format string, args ...interface{})
ResetID(pluginID string)
}
var pluginLog Log
func SetPluginLog(log Log) {
pluginLog = log
}
func Trace(msg string) {
pluginLog.Trace(msg)
}
func Tracef(format string, args ...interface{}) {
pluginLog.Tracef(format, args...)
}
func Debug(msg string) {
pluginLog.Debug(msg)
}
func Debugf(format string, args ...interface{}) {
pluginLog.Debugf(format, args...)
}
func Info(msg string) {
pluginLog.Info(msg)
}
func Infof(format string, args ...interface{}) {
pluginLog.Infof(format, args...)
}
func Warn(msg string) {
pluginLog.Warn(msg)
}
func Warnf(format string, args ...interface{}) {
pluginLog.Warnf(format, args...)
}
func Error(msg string) {
pluginLog.Error(msg)
}
func Errorf(format string, args ...interface{}) {
pluginLog.Errorf(format, args...)
}
func Critical(msg string) {
pluginLog.Critical(msg)
}
func Criticalf(format string, args ...interface{}) {
pluginLog.Criticalf(format, args...)
}

View File

@@ -1,300 +0,0 @@
// Copyright (c) 2022 Alibaba Group Holding Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package matcher
import (
"errors"
"fmt"
"strings"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm"
"github.com/higress-group/proxy-wasm-go-sdk/proxywasm/types"
"github.com/tidwall/gjson"
)
type Category int
const (
Route Category = iota
Host
Service
RoutePrefix
)
type MatchType int
const (
Prefix MatchType = iota
Exact
Suffix
)
const (
RULES_KEY = "_rules_"
MATCH_ROUTE_KEY = "_match_route_"
MATCH_DOMAIN_KEY = "_match_domain_"
MATCH_SERVICE_KEY = "_match_service_"
MATCH_ROUTE_PREFIX_KEY = "_match_route_prefix_"
)
type HostMatcher struct {
matchType MatchType
host string
}
type RuleConfig[PluginConfig any] struct {
category Category
routes map[string]struct{}
services map[string]struct{}
routePrefixs map[string]struct{}
hosts []HostMatcher
config PluginConfig
}
type RuleMatcher[PluginConfig any] struct {
ruleConfig []RuleConfig[PluginConfig]
globalConfig PluginConfig
hasGlobalConfig bool
}
func (m RuleMatcher[PluginConfig]) GetMatchConfig() (*PluginConfig, error) {
host, err := proxywasm.GetHttpRequestHeader(":authority")
if err != nil {
return nil, err
}
routeName, err := proxywasm.GetProperty([]string{"route_name"})
if err != nil && err != types.ErrorStatusNotFound {
return nil, err
}
serviceName, err := proxywasm.GetProperty([]string{"cluster_name"})
if err != nil && err != types.ErrorStatusNotFound {
return nil, err
}
for _, rule := range m.ruleConfig {
// category == Host
if rule.category == Host {
if m.hostMatch(rule, host) {
return &rule.config, nil
}
}
// category == Route
if rule.category == Route {
if _, ok := rule.routes[string(routeName)]; ok {
return &rule.config, nil
}
}
// category == RoutePrefix
if rule.category == RoutePrefix {
for routePrefix := range rule.routePrefixs {
if strings.HasPrefix(string(routeName), routePrefix) {
return &rule.config, nil
}
}
}
// category == Cluster
if m.serviceMatch(rule, string(serviceName)) {
return &rule.config, nil
}
}
if m.hasGlobalConfig {
return &m.globalConfig, nil
}
return nil, nil
}
func (m *RuleMatcher[PluginConfig]) ParseRuleConfig(config gjson.Result,
parsePluginConfig func(gjson.Result, *PluginConfig) error,
parseOverrideConfig func(gjson.Result, PluginConfig, *PluginConfig) error) error {
var rules []gjson.Result
obj := config.Map()
keyCount := len(obj)
if keyCount == 0 {
// enable globally for empty config
m.hasGlobalConfig = true
return parsePluginConfig(config, &m.globalConfig)
}
if rulesJson, ok := obj[RULES_KEY]; ok {
rules = rulesJson.Array()
keyCount--
}
var pluginConfig PluginConfig
var globalConfigError error
if keyCount > 0 {
err := parsePluginConfig(config, &pluginConfig)
if err != nil {
globalConfigError = err
} else {
m.globalConfig = pluginConfig
m.hasGlobalConfig = true
}
}
if len(rules) == 0 {
if m.hasGlobalConfig {
return nil
}
return fmt.Errorf("parse config failed, no valid rules; global config parse error:%v", globalConfigError)
}
for _, ruleJson := range rules {
var (
rule RuleConfig[PluginConfig]
err error
)
if parseOverrideConfig != nil {
err = parseOverrideConfig(ruleJson, m.globalConfig, &rule.config)
} else {
err = parsePluginConfig(ruleJson, &rule.config)
}
if err != nil {
return err
}
rule.routes = m.parseRouteMatchConfig(ruleJson)
rule.hosts = m.parseHostMatchConfig(ruleJson)
rule.services = m.parseServiceMatchConfig(ruleJson)
rule.routePrefixs = m.parseRoutePrefixMatchConfig(ruleJson)
noRoute := len(rule.routes) == 0
noHosts := len(rule.hosts) == 0
noService := len(rule.services) == 0
noRoutePrefix := len(rule.routePrefixs) == 0
if boolToInt(noRoute)+boolToInt(noService)+boolToInt(noHosts)+boolToInt(noRoutePrefix) != 3 {
return errors.New("there is only one of '_match_route_', '_match_domain_', '_match_service_' and '_match_route_prefix_' can present in configuration.")
}
if !noRoute {
rule.category = Route
} else if !noHosts {
rule.category = Host
} else if !noService {
rule.category = Service
} else {
rule.category = RoutePrefix
}
m.ruleConfig = append(m.ruleConfig, rule)
}
return nil
}
func (m RuleMatcher[PluginConfig]) parseRouteMatchConfig(config gjson.Result) map[string]struct{} {
keys := config.Get(MATCH_ROUTE_KEY).Array()
routes := make(map[string]struct{})
for _, item := range keys {
routeName := item.String()
if routeName != "" {
routes[routeName] = struct{}{}
}
}
return routes
}
func (m RuleMatcher[PluginConfig]) parseRoutePrefixMatchConfig(config gjson.Result) map[string]struct{} {
keys := config.Get(MATCH_ROUTE_PREFIX_KEY).Array()
routePrefixs := make(map[string]struct{})
for _, item := range keys {
routePrefix := item.String()
if routePrefix != "" {
routePrefixs[routePrefix] = struct{}{}
}
}
return routePrefixs
}
func (m RuleMatcher[PluginConfig]) parseServiceMatchConfig(config gjson.Result) map[string]struct{} {
keys := config.Get(MATCH_SERVICE_KEY).Array()
clusters := make(map[string]struct{})
for _, item := range keys {
clusterName := item.String()
if clusterName != "" {
clusters[clusterName] = struct{}{}
}
}
return clusters
}
func (m RuleMatcher[PluginConfig]) parseHostMatchConfig(config gjson.Result) []HostMatcher {
keys := config.Get(MATCH_DOMAIN_KEY).Array()
var hostMatchers []HostMatcher
for _, item := range keys {
host := item.String()
var hostMatcher HostMatcher
if strings.HasPrefix(host, "*") {
hostMatcher.matchType = Suffix
hostMatcher.host = host[1:]
} else if strings.HasSuffix(host, "*") {
hostMatcher.matchType = Prefix
hostMatcher.host = host[:len(host)-1]
} else {
hostMatcher.matchType = Exact
hostMatcher.host = host
}
hostMatchers = append(hostMatchers, hostMatcher)
}
return hostMatchers
}
func stripPortFromHost(reqHost string) string {
// Port removing code is inspired by
// https://github.com/envoyproxy/envoy/blob/v1.17.0/source/common/http/header_utility.cc#L219
portStart := strings.LastIndexByte(reqHost, ':')
if portStart != -1 {
// According to RFC3986 v6 address is always enclosed in "[]".
// section 3.2.2.
v6EndIndex := strings.LastIndexByte(reqHost, ']')
if v6EndIndex == -1 || v6EndIndex < portStart {
if portStart+1 <= len(reqHost) {
return reqHost[:portStart]
}
}
}
return reqHost
}
func (m RuleMatcher[PluginConfig]) hostMatch(rule RuleConfig[PluginConfig], reqHost string) bool {
reqHost = stripPortFromHost(reqHost)
for _, hostMatch := range rule.hosts {
switch hostMatch.matchType {
case Suffix:
if strings.HasSuffix(reqHost, hostMatch.host) {
return true
}
case Prefix:
if strings.HasPrefix(reqHost, hostMatch.host) {
return true
}
case Exact:
if reqHost == hostMatch.host {
return true
}
default:
return false
}
}
return false
}
func (m RuleMatcher[PluginConfig]) serviceMatch(rule RuleConfig[PluginConfig], serviceName string) bool {
parts := strings.Split(serviceName, "|")
if len(parts) != 4 {
return false
}
port := parts[1]
fqdn := parts[3]
for configServiceName := range rule.services {
colonIndex := strings.LastIndexByte(configServiceName, ':')
if colonIndex != -1 && fqdn == string(configServiceName[:colonIndex]) && port == string(configServiceName[colonIndex+1:]) {
return true
} else if fqdn == string(configServiceName) {
return true
}
}
return false
}

View File

@@ -1,438 +0,0 @@
// Copyright (c) 2022 Alibaba Group Holding Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package matcher
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
"github.com/tidwall/gjson"
)
type customConfig struct {
name string
age int64
}
func parseConfig(json gjson.Result, config *customConfig) error {
config.name = json.Get("name").String()
config.age = json.Get("age").Int()
return nil
}
func TestHostMatch(t *testing.T) {
cases := []struct {
name string
config RuleConfig[customConfig]
host string
result bool
}{
{
name: "prefix",
config: RuleConfig[customConfig]{
hosts: []HostMatcher{
{
matchType: Prefix,
host: "www.",
},
},
},
host: "www.test.com",
result: true,
},
{
name: "prefix failed",
config: RuleConfig[customConfig]{
hosts: []HostMatcher{
{
matchType: Prefix,
host: "www.",
},
},
},
host: "test.com",
result: false,
},
{
name: "suffix",
config: RuleConfig[customConfig]{
hosts: []HostMatcher{
{
matchType: Suffix,
host: ".example.com",
},
},
},
host: "www.example.com",
result: true,
},
{
name: "suffix failed",
config: RuleConfig[customConfig]{
hosts: []HostMatcher{
{
matchType: Suffix,
host: ".example.com",
},
},
},
host: "example.com",
result: false,
},
{
name: "exact",
config: RuleConfig[customConfig]{
hosts: []HostMatcher{
{
matchType: Exact,
host: "www.example.com",
},
},
},
host: "www.example.com",
result: true,
},
{
name: "exact failed",
config: RuleConfig[customConfig]{
hosts: []HostMatcher{
{
matchType: Exact,
host: "www.example.com",
},
},
},
host: "example.com",
result: false,
},
{
name: "exact port",
config: RuleConfig[customConfig]{
hosts: []HostMatcher{
{
matchType: Exact,
host: "www.example.com",
},
},
},
host: "www.example.com:8080",
result: true,
},
{
name: "any",
config: RuleConfig[customConfig]{
hosts: []HostMatcher{
{
matchType: Suffix,
host: "",
},
},
},
host: "www.example.com",
result: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var m RuleMatcher[customConfig]
assert.Equal(t, c.result, m.hostMatch(c.config, c.host))
})
}
}
func TestServiceMatch(t *testing.T) {
cases := []struct {
name string
config RuleConfig[customConfig]
service string
result bool
}{
{
name: "fqdn",
config: RuleConfig[customConfig]{
services: map[string]struct{}{
"qwen.dns": {},
},
},
service: "outbound|443||qwen.dns",
result: true,
},
{
name: "fqdn with port",
config: RuleConfig[customConfig]{
services: map[string]struct{}{
"qwen.dns:443": {},
},
},
service: "outbound|443||qwen.dns",
result: true,
},
{
name: "not match",
config: RuleConfig[customConfig]{
services: map[string]struct{}{
"moonshot.dns:443": {},
},
},
service: "outbound|443||qwen.dns",
result: false,
},
{
name: "error config format",
config: RuleConfig[customConfig]{
services: map[string]struct{}{
"qwen.dns:": {},
},
},
service: "outbound|443||qwen.dns",
result: false,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var m RuleMatcher[customConfig]
assert.Equal(t, c.result, m.serviceMatch(c.config, c.service))
})
}
}
func TestParseRuleConfig(t *testing.T) {
cases := []struct {
name string
config string
errMsg string
expected RuleMatcher[customConfig]
}{
{
name: "global config",
config: `{"name":"john", "age":18}`,
expected: RuleMatcher[customConfig]{
globalConfig: customConfig{
name: "john",
age: 18,
},
hasGlobalConfig: true,
},
},
{
name: "rules config",
config: `{"_rules_":[{"_match_domain_":["*.example.com","www.*","*","www.abc.com"],"name":"john", "age":18},{"_match_route_":["test1","test2"],"name":"ann", "age":16},{"_match_service_":["test1.dns","test2.static:8080"],"name":"ann", "age":16},{"_match_route_prefix_":["api1","api2"],"name":"ann", "age":16}]}`,
expected: RuleMatcher[customConfig]{
ruleConfig: []RuleConfig[customConfig]{
{
category: Host,
hosts: []HostMatcher{
{
matchType: Suffix,
host: ".example.com",
},
{
matchType: Prefix,
host: "www.",
},
{
matchType: Suffix,
host: "",
},
{
matchType: Exact,
host: "www.abc.com",
},
},
routes: map[string]struct{}{},
services: map[string]struct{}{},
routePrefixs: map[string]struct{}{},
config: customConfig{
name: "john",
age: 18,
},
},
{
category: Route,
routes: map[string]struct{}{
"test1": {},
"test2": {},
},
services: map[string]struct{}{},
routePrefixs: map[string]struct{}{},
config: customConfig{
name: "ann",
age: 16,
},
},
{
category: Service,
routes: map[string]struct{}{},
services: map[string]struct{}{
"test1.dns": {},
"test2.static:8080": {},
},
routePrefixs: map[string]struct{}{},
config: customConfig{
name: "ann",
age: 16,
},
},
{
category: RoutePrefix,
routes: map[string]struct{}{},
services: map[string]struct{}{},
routePrefixs: map[string]struct{}{
"api1": {},
"api2": {},
},
config: customConfig{
name: "ann",
age: 16,
},
},
},
},
},
{
name: "no rule",
config: `{"_rules_":[]}`,
errMsg: "parse config failed, no valid rules; global config parse error:<nil>",
},
{
name: "invalid rule",
config: `{"_rules_":[{"_match_domain_":["*"],"_match_route_":["test"]}]}`,
errMsg: "there is only one of '_match_route_', '_match_domain_', '_match_service_' and '_match_route_prefix_' can present in configuration.",
},
{
name: "invalid rule",
config: `{"_rules_":[{"_match_domain_":["*"],"_match_service_":["test.dns"]}]}`,
errMsg: "there is only one of '_match_route_', '_match_domain_', '_match_service_' and '_match_route_prefix_' can present in configuration.",
},
{
name: "invalid rule",
config: `{"_rules_":[{"age":16}]}`,
errMsg: "there is only one of '_match_route_', '_match_domain_', '_match_service_' and '_match_route_prefix_' can present in configuration.",
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var actual RuleMatcher[customConfig]
err := actual.ParseRuleConfig(gjson.Parse(c.config), parseConfig, nil)
if err != nil {
if c.errMsg == "" {
t.Errorf("parse failed: %v", err)
}
if err.Error() != c.errMsg {
t.Errorf("expect err: %s, actual err: %s", c.errMsg,
err.Error())
}
return
}
assert.Equal(t, c.expected, actual)
})
}
}
type completeConfig struct {
// global config
consumers []string
// rule config
allow []string
}
func parseGlobalConfig(json gjson.Result, global *completeConfig) error {
if json.Get("consumers").Exists() && json.Get("allow").Exists() {
return errors.New("consumers and allow should not be configured at the same level")
}
for _, item := range json.Get("consumers").Array() {
global.consumers = append(global.consumers, item.String())
}
return nil
}
func parseOverrideRuleConfig(json gjson.Result, global completeConfig, config *completeConfig) error {
if json.Get("consumers").Exists() && json.Get("allow").Exists() {
return errors.New("consumers and allow should not be configured at the same level")
}
// override config via global
*config = global
for _, item := range json.Get("allow").Array() {
config.allow = append(config.allow, item.String())
}
return nil
}
func TestParseOverrideConfig(t *testing.T) {
cases := []struct {
name string
config string
errMsg string
expected RuleMatcher[completeConfig]
}{
{
name: "override rule config",
config: `{"consumers":["c1","c2","c3"],"_rules_":[{"_match_route_":["r1","r2"],"allow":["c1","c3"]}]}`,
expected: RuleMatcher[completeConfig]{
ruleConfig: []RuleConfig[completeConfig]{
{
category: Route,
routes: map[string]struct{}{
"r1": {},
"r2": {},
},
services: map[string]struct{}{},
routePrefixs: map[string]struct{}{},
config: completeConfig{
consumers: []string{"c1", "c2", "c3"},
allow: []string{"c1", "c3"},
},
},
},
globalConfig: completeConfig{
consumers: []string{"c1", "c2", "c3"},
},
hasGlobalConfig: true,
},
},
{
name: "invalid config",
config: `{"consumers":["c1","c2","c3"],"allow":["c1"]}`,
errMsg: "parse config failed, no valid rules; global config parse error:consumers and allow should not be configured at the same level",
},
{
name: "invalid config",
config: `{"_rules_":[{"_match_route_":["r1","r2"],"consumers":["c1","c2"],"allow":["c1"]}]}`,
errMsg: "consumers and allow should not be configured at the same level",
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var actual RuleMatcher[completeConfig]
err := actual.ParseRuleConfig(gjson.Parse(c.config), parseGlobalConfig, parseOverrideRuleConfig)
if err != nil {
if c.errMsg == "" {
t.Errorf("parse failed: %v", err)
}
if err.Error() != c.errMsg {
t.Errorf("expect err: %s, actual err: %s", c.errMsg, err.Error())
}
return
}
assert.Equal(t, c.expected, actual)
})
}
}

Some files were not shown because too many files have changed in this diff Show More