feat(api): add qwen/ prefix routing for Alibaba DashScope provider

Users in Discord #clawcode-get-help (web3g) asked for Qwen 3.6 Plus via
native Alibaba DashScope API instead of OpenRouter, which has stricter
rate limits. This commit adds first-class routing for qwen/ and bare
qwen- prefixed model names.

Changes:
- DEFAULT_DASHSCOPE_BASE_URL constant: /compatible-mode/v1 endpoint
- OpenAiCompatConfig::dashscope() factory mirroring openai()/xai()
- DASHSCOPE_ENV_VARS + credential_env_vars() wiring
- metadata_for_model: qwen/ and qwen- prefix routes to DashScope with
  auth_env=DASHSCOPE_API_KEY, reuses ProviderKind::OpenAi because
  DashScope speaks the OpenAI REST shape
- is_reasoning_model: detect qwen-qwq, qwq-*, and *-thinking variants
  so tuning params (temperature, top_p, etc.) get stripped before
  payload assembly (same pattern as o1/o3/grok-3-mini)

Tests added:
- providers::tests::qwen_prefix_routes_to_dashscope_not_anthropic
- openai_compat::tests::qwen_reasoning_variants_are_detected

89 api lib tests passing, 0 failing. cargo fmt --check: clean.

Closes the user-reported gap: 'use Qwen 3.6 Plus via Alibaba API
directly, not OpenRouter' without needing OPENAI_BASE_URL override
or unsetting ANTHROPIC_API_KEY.
This commit is contained in:
YeonGyu-Kim
2026-04-08 14:06:26 +09:00
parent 006f7d7ee6
commit 3ac97e635e
2 changed files with 87 additions and 4 deletions

View File

@@ -181,6 +181,19 @@ pub fn metadata_for_model(model: &str) -> Option<ProviderMetadata> {
default_base_url: openai_compat::DEFAULT_OPENAI_BASE_URL,
});
}
// Alibaba DashScope compatible-mode endpoint. Routes qwen/* and bare
// qwen-* model names (qwen-max, qwen-plus, qwen-turbo, qwen-qwq, etc.)
// to the OpenAI-compat client pointed at DashScope's /compatible-mode/v1.
// Uses the OpenAi provider kind because DashScope speaks the OpenAI REST
// shape — only the base URL and auth env var differ.
if canonical.starts_with("qwen/") || canonical.starts_with("qwen-") {
return Some(ProviderMetadata {
provider: ProviderKind::OpenAi,
auth_env: "DASHSCOPE_API_KEY",
base_url_env: "DASHSCOPE_BASE_URL",
default_base_url: openai_compat::DEFAULT_DASHSCOPE_BASE_URL,
});
}
None
}
@@ -386,6 +399,36 @@ mod tests {
assert_eq!(kind2, ProviderKind::OpenAi);
}
#[test]
fn qwen_prefix_routes_to_dashscope_not_anthropic() {
// User request from Discord #clawcode-get-help: web3g wants to use
// Qwen 3.6 Plus via native Alibaba DashScope API (not OpenRouter,
// which has lower rate limits). metadata_for_model must route
// qwen/* and bare qwen-* to the OpenAi provider kind pointed at
// the DashScope compatible-mode endpoint, regardless of whether
// ANTHROPIC_API_KEY is present in the environment.
let meta = super::metadata_for_model("qwen/qwen-max")
.expect("qwen/ prefix must resolve to DashScope metadata");
assert_eq!(meta.provider, ProviderKind::OpenAi);
assert_eq!(meta.auth_env, "DASHSCOPE_API_KEY");
assert_eq!(meta.base_url_env, "DASHSCOPE_BASE_URL");
assert!(meta.default_base_url.contains("dashscope.aliyuncs.com"));
// Bare qwen- prefix also routes
let meta2 = super::metadata_for_model("qwen-plus")
.expect("qwen- prefix must resolve to DashScope metadata");
assert_eq!(meta2.provider, ProviderKind::OpenAi);
assert_eq!(meta2.auth_env, "DASHSCOPE_API_KEY");
// detect_provider_kind must agree even if ANTHROPIC_API_KEY is set
let kind = detect_provider_kind("qwen/qwen3-coder");
assert_eq!(
kind,
ProviderKind::OpenAi,
"qwen/ prefix must win over auth-sniffer order"
);
}
#[test]
fn keeps_existing_max_token_heuristic() {
assert_eq!(max_tokens_for_model("opus"), 32_000);

View File

@@ -18,6 +18,7 @@ use super::{preflight_message_request, Provider, ProviderFuture};
pub const DEFAULT_XAI_BASE_URL: &str = "https://api.x.ai/v1";
pub const DEFAULT_OPENAI_BASE_URL: &str = "https://api.openai.com/v1";
pub const DEFAULT_DASHSCOPE_BASE_URL: &str = "https://dashscope.aliyuncs.com/compatible-mode/v1";
const REQUEST_ID_HEADER: &str = "request-id";
const ALT_REQUEST_ID_HEADER: &str = "x-request-id";
const DEFAULT_INITIAL_BACKOFF: Duration = Duration::from_secs(1);
@@ -34,6 +35,7 @@ pub struct OpenAiCompatConfig {
const XAI_ENV_VARS: &[&str] = &["XAI_API_KEY"];
const OPENAI_ENV_VARS: &[&str] = &["OPENAI_API_KEY"];
const DASHSCOPE_ENV_VARS: &[&str] = &["DASHSCOPE_API_KEY"];
impl OpenAiCompatConfig {
#[must_use]
@@ -55,11 +57,27 @@ impl OpenAiCompatConfig {
default_base_url: DEFAULT_OPENAI_BASE_URL,
}
}
/// Alibaba DashScope compatible-mode endpoint (Qwen family models).
/// Uses the OpenAI-compatible REST shape at /compatible-mode/v1.
/// Requested via Discord #clawcode-get-help: native Alibaba API for
/// higher rate limits than going through OpenRouter.
#[must_use]
pub const fn dashscope() -> Self {
Self {
provider_name: "DashScope",
api_key_env: "DASHSCOPE_API_KEY",
base_url_env: "DASHSCOPE_BASE_URL",
default_base_url: DEFAULT_DASHSCOPE_BASE_URL,
}
}
#[must_use]
pub fn credential_env_vars(self) -> &'static [&'static str] {
match self.provider_name {
"xAI" => XAI_ENV_VARS,
"OpenAI" => OPENAI_ENV_VARS,
"DashScope" => DASHSCOPE_ENV_VARS,
_ => &[],
}
}
@@ -689,12 +707,18 @@ struct ErrorBody {
/// reasoning/chain-of-thought models with fixed sampling.
fn is_reasoning_model(model: &str) -> bool {
let lowered = model.to_ascii_lowercase();
// Strip any provider/ prefix for the check (e.g. qwen/qwen-qwq -> qwen-qwq)
let canonical = lowered.rsplit('/').next().unwrap_or(lowered.as_str());
// OpenAI reasoning models
lowered.starts_with("o1")
|| lowered.starts_with("o3")
|| lowered.starts_with("o4")
canonical.starts_with("o1")
|| canonical.starts_with("o3")
|| canonical.starts_with("o4")
// xAI reasoning: grok-3-mini always uses reasoning mode
|| lowered == "grok-3-mini"
|| canonical == "grok-3-mini"
// Alibaba DashScope reasoning variants (QwQ + Qwen3-Thinking family)
|| canonical.starts_with("qwen-qwq")
|| canonical.starts_with("qwq")
|| canonical.contains("thinking")
}
fn build_chat_completion_request(request: &MessageRequest, config: OpenAiCompatConfig) -> Value {
@@ -1259,6 +1283,22 @@ mod tests {
assert!(!is_reasoning_model("claude-sonnet-4-6"));
}
#[test]
fn qwen_reasoning_variants_are_detected() {
// QwQ reasoning model
assert!(is_reasoning_model("qwen-qwq-32b"));
assert!(is_reasoning_model("qwen/qwen-qwq-32b"));
// Qwen3 thinking family
assert!(is_reasoning_model("qwen3-30b-a3b-thinking"));
assert!(is_reasoning_model("qwen/qwen3-30b-a3b-thinking"));
// Bare qwq
assert!(is_reasoning_model("qwq-plus"));
// Regular Qwen models must NOT be classified as reasoning
assert!(!is_reasoning_model("qwen-max"));
assert!(!is_reasoning_model("qwen/qwen-plus"));
assert!(!is_reasoning_model("qwen-turbo"));
}
#[test]
fn tuning_params_omitted_from_payload_when_none() {
let request = MessageRequest {