cursor 中使用 sub2api 中的gpt 号池
- 内容介绍
- 文章标签
- 相关推荐
一、 cursor 中配置 sub2api 的url和key
注意这个地方只能写/v1,请求会走/v1/chat/completions
image1576×584 34.9 KB
二、配置模型gpt模型
这一步有两种实现方案
方案一(在cursor设置中添加自定义gpt模型)
这种方式比较简单,直接在cursor设置中添加自定义模型即可,然后在sub2api中添加映射,缺点是cursor自定义模型不能设置推理强度
image1472×312 11.6 KB
image1780×508 32.9 KB
方案二(使用cursor自带的gpt模型)推荐,可设置推理强度
这个方案麻烦点,不过可以不用手动在cursor中添加自定义模型,也不用再sub2api中添加模型映射。
不能直接使用的原因
之所以不能直接使用cursor自带的模型,是因为使用自带的模型后请求的body结构不一样,会出现如下错误:
{
"error": "ERROR_PROVIDER_ERROR",
"details": {
"title": "Provider Error",
"detail": "Provider returned error: {\"error\":{\"message\":\"Unsupported parameter: user\",\"type\":\"invalid_request_error\"}}",
"isRetryable": false,
"additionalInfo": {},
"buttons": [],
"planChoices": []
},
"isExpected": true
}
说白了就是不支持user参数
处理方案
既然不支持user 参数,那将这个参数去掉就好了,在cursor中不要直接配置sub2api的端口,中间在加一个代理,将这个user参数去除掉就一切正常了。
我自己是用go语言写了个简单的程序去处理这个,这样资源开销比较小
package main
import (
"bytes"
"encoding/json"
"errors"
"io"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strconv"
"strings"
"time"
)
const defaultMaxBodyBytes int64 = 64 << 20 // 64 MiB
func main() {
listenAddr := envString("LISTEN_ADDR", ":18081")
upstreamRaw := envString("UPSTREAM_URL", "http://127.0.0.1:8080")
maxBodyBytes := envInt64("MAX_BODY_BYTES", defaultMaxBodyBytes)
logRequestBody := envBool("LOG_REQUEST_BODY", true)
upstream, err := url.Parse(upstreamRaw)
if err != nil || upstream.Scheme == "" || upstream.Host == "" {
log.Fatalf("invalid UPSTREAM_URL %q: %v", upstreamRaw, err)
}
proxy := newReverseProxy(upstream)
handler := newProxyHandler(proxy, maxBodyBytes, logRequestBody)
server := &http.Server{
Addr: listenAddr,
Handler: handler,
ReadHeaderTimeout: 15 * time.Second,
}
log.Printf("sub2api proxy listening on %s, upstream=%s, log_request_body=%t", listenAddr, upstream.String(), logRequestBody)
if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
log.Fatalf("proxy server failed: %v", err)
}
}
func newProxyHandler(proxy *httputil.ReverseProxy, maxBodyBytes int64, logRequestBody bool) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
requestID := firstNonEmpty(r.Header.Get("X-Request-Id"), r.Header.Get("X-Request-ID"))
if shouldInspectBody(r) {
if err := inspectAndRestoreBody(r, maxBodyBytes, logRequestBody); err != nil {
log.Printf("request rejected method=%s path=%s request_id=%q error=%v", r.Method, r.URL.Path, requestID, err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
log.Printf("proxy request method=%s path=%s request_id=%q remote=%s", r.Method, r.URL.Path, requestID, clientIP(r))
proxy.ServeHTTP(w, r)
log.Printf("proxy completed method=%s path=%s request_id=%q latency_ms=%d", r.Method, r.URL.Path, requestID, time.Since(start).Milliseconds())
})
}
func shouldInspectBody(r *http.Request) bool {
return r.Body != nil && methodCanHaveBody(r.Method)
}
func methodCanHaveBody(method string) bool {
switch method {
case http.MethodPost, http.MethodPut, http.MethodPatch, http.MethodDelete:
return true
default:
return false
}
}
func inspectAndRestoreBody(r *http.Request, maxBodyBytes int64, logRequestBody bool) error {
if r.Body == nil {
return nil
}
defer r.Body.Close()
limited := http.MaxBytesReader(nil, r.Body, maxBodyBytes)
body, err := io.ReadAll(limited)
if err != nil {
return err
}
if isChatCompletionsRequest(r) {
filteredBody, removed, err := removeTopLevelJSONKeys(body, "user")
if err != nil {
log.Printf("chat parameter filter skipped path=%s error=%v", r.URL.Path, err)
} else if removed {
log.Printf("chat parameter filter removed_keys=user path=%s original_bytes=%d filtered_bytes=%d", r.URL.Path, len(body), len(filteredBody))
body = filteredBody
}
}
if len(body) > 0 {
logRequestBodyMetadata(r, body)
if logRequestBody {
log.Printf("request body method=%s path=%s body=%s", r.Method, r.URL.Path, string(body))
}
}
// Parameter filtering will be added here later. For now this is a no-op
// that proves the read -> restore -> forward flow works.
r.Body = io.NopCloser(bytes.NewReader(body))
r.ContentLength = int64(len(body))
r.GetBody = func() (io.ReadCloser, error) {
return io.NopCloser(bytes.NewReader(body)), nil
}
r.Header.Set("Content-Length", strconv.Itoa(len(body)))
return nil
}
func isChatCompletionsRequest(r *http.Request) bool {
return r.Method == http.MethodPost && r.URL.Path == "/v1/chat/completions"
}
func removeTopLevelJSONKeys(body []byte, keys ...string) ([]byte, bool, error) {
var payload map[string]json.RawMessage
if err := json.Unmarshal(body, &payload); err != nil {
return nil, false, err
}
removed := false
for _, key := range keys {
if _, ok := payload[key]; ok {
delete(payload, key)
removed = true
}
}
if !removed {
return body, false, nil
}
filteredBody, err := json.Marshal(payload)
if err != nil {
return nil, false, err
}
return filteredBody, true, nil
}
func logRequestBodyMetadata(r *http.Request, body []byte) {
var payload struct {
Model string `json:"model"`
Stream *bool `json:"stream"`
}
if err := json.Unmarshal(body, &payload); err != nil {
log.Printf("request body metadata method=%s path=%s body_bytes=%d json_valid=false", r.Method, r.URL.Path, len(body))
return
}
stream := "unset"
if payload.Stream != nil {
stream = strconv.FormatBool(*payload.Stream)
}
log.Printf("request body metadata method=%s path=%s model=%q stream=%s body_bytes=%d json_valid=true", r.Method, r.URL.Path, payload.Model, stream, len(body))
}
func newReverseProxy(upstream *url.URL) *httputil.ReverseProxy {
proxy := httputil.NewSingleHostReverseProxy(upstream)
originalDirector := proxy.Director
proxy.Director = func(r *http.Request) {
originalHost := r.Host
originalDirector(r)
r.Host = upstream.Host
if originalHost != "" {
r.Header.Set("X-Forwarded-Host", originalHost)
}
r.Header.Set("X-Forwarded-Proto", forwardedProto(r))
}
proxy.FlushInterval = -1
proxy.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
log.Printf("upstream error method=%s path=%s error=%v", r.Method, r.URL.Path, err)
http.Error(w, "upstream unavailable", http.StatusBadGateway)
}
return proxy
}
func forwardedProto(r *http.Request) string {
if proto := r.Header.Get("X-Forwarded-Proto"); proto != "" {
return proto
}
if r.TLS != nil {
return "https"
}
return "http"
}
func clientIP(r *http.Request) string {
if value := r.Header.Get("X-Forwarded-For"); value != "" {
parts := strings.Split(value, ",")
return strings.TrimSpace(parts[0])
}
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return r.RemoteAddr
}
return host
}
func envString(key, fallback string) string {
if value := strings.TrimSpace(os.Getenv(key)); value != "" {
return value
}
return fallback
}
func envInt64(key string, fallback int64) int64 {
value := strings.TrimSpace(os.Getenv(key))
if value == "" {
return fallback
}
parsed, err := strconv.ParseInt(value, 10, 64)
if err != nil || parsed <= 0 {
log.Printf("invalid %s=%q, using default %d", key, value, fallback)
return fallback
}
return parsed
}
func envBool(key string, fallback bool) bool {
value := strings.ToLower(strings.TrimSpace(os.Getenv(key)))
if value == "" {
return fallback
}
switch value {
case "1", "true", "yes", "y", "on":
return true
case "0", "false", "no", "n", "off":
return false
default:
log.Printf("invalid %s=%q, using default %t", key, value, fallback)
return fallback
}
}
func firstNonEmpty(values ...string) string {
for _, value := range values {
if value != "" {
return value
}
}
return ""
}
最终效果:
image1620×974 27.1 KB
image1862×940 113 KB
网友解答:--【壹】--:
cursor是pro或pro以上才能自己接模型吗,free能接吗?
--【贰】--:
Cursor++ | 极为顺滑的 BYOK Server 集成
--【叁】--:
不行,free 只能使用 auto ,选择其它模型会出现下面的错误
image1296×324 22.2 KB
一、 cursor 中配置 sub2api 的url和key
注意这个地方只能写/v1,请求会走/v1/chat/completions
image1576×584 34.9 KB
二、配置模型gpt模型
这一步有两种实现方案
方案一(在cursor设置中添加自定义gpt模型)
这种方式比较简单,直接在cursor设置中添加自定义模型即可,然后在sub2api中添加映射,缺点是cursor自定义模型不能设置推理强度
image1472×312 11.6 KB
image1780×508 32.9 KB
方案二(使用cursor自带的gpt模型)推荐,可设置推理强度
这个方案麻烦点,不过可以不用手动在cursor中添加自定义模型,也不用再sub2api中添加模型映射。
不能直接使用的原因
之所以不能直接使用cursor自带的模型,是因为使用自带的模型后请求的body结构不一样,会出现如下错误:
{
"error": "ERROR_PROVIDER_ERROR",
"details": {
"title": "Provider Error",
"detail": "Provider returned error: {\"error\":{\"message\":\"Unsupported parameter: user\",\"type\":\"invalid_request_error\"}}",
"isRetryable": false,
"additionalInfo": {},
"buttons": [],
"planChoices": []
},
"isExpected": true
}
说白了就是不支持user参数
处理方案
既然不支持user 参数,那将这个参数去掉就好了,在cursor中不要直接配置sub2api的端口,中间在加一个代理,将这个user参数去除掉就一切正常了。
我自己是用go语言写了个简单的程序去处理这个,这样资源开销比较小
package main
import (
"bytes"
"encoding/json"
"errors"
"io"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strconv"
"strings"
"time"
)
const defaultMaxBodyBytes int64 = 64 << 20 // 64 MiB
func main() {
listenAddr := envString("LISTEN_ADDR", ":18081")
upstreamRaw := envString("UPSTREAM_URL", "http://127.0.0.1:8080")
maxBodyBytes := envInt64("MAX_BODY_BYTES", defaultMaxBodyBytes)
logRequestBody := envBool("LOG_REQUEST_BODY", true)
upstream, err := url.Parse(upstreamRaw)
if err != nil || upstream.Scheme == "" || upstream.Host == "" {
log.Fatalf("invalid UPSTREAM_URL %q: %v", upstreamRaw, err)
}
proxy := newReverseProxy(upstream)
handler := newProxyHandler(proxy, maxBodyBytes, logRequestBody)
server := &http.Server{
Addr: listenAddr,
Handler: handler,
ReadHeaderTimeout: 15 * time.Second,
}
log.Printf("sub2api proxy listening on %s, upstream=%s, log_request_body=%t", listenAddr, upstream.String(), logRequestBody)
if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
log.Fatalf("proxy server failed: %v", err)
}
}
func newProxyHandler(proxy *httputil.ReverseProxy, maxBodyBytes int64, logRequestBody bool) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
requestID := firstNonEmpty(r.Header.Get("X-Request-Id"), r.Header.Get("X-Request-ID"))
if shouldInspectBody(r) {
if err := inspectAndRestoreBody(r, maxBodyBytes, logRequestBody); err != nil {
log.Printf("request rejected method=%s path=%s request_id=%q error=%v", r.Method, r.URL.Path, requestID, err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
log.Printf("proxy request method=%s path=%s request_id=%q remote=%s", r.Method, r.URL.Path, requestID, clientIP(r))
proxy.ServeHTTP(w, r)
log.Printf("proxy completed method=%s path=%s request_id=%q latency_ms=%d", r.Method, r.URL.Path, requestID, time.Since(start).Milliseconds())
})
}
func shouldInspectBody(r *http.Request) bool {
return r.Body != nil && methodCanHaveBody(r.Method)
}
func methodCanHaveBody(method string) bool {
switch method {
case http.MethodPost, http.MethodPut, http.MethodPatch, http.MethodDelete:
return true
default:
return false
}
}
func inspectAndRestoreBody(r *http.Request, maxBodyBytes int64, logRequestBody bool) error {
if r.Body == nil {
return nil
}
defer r.Body.Close()
limited := http.MaxBytesReader(nil, r.Body, maxBodyBytes)
body, err := io.ReadAll(limited)
if err != nil {
return err
}
if isChatCompletionsRequest(r) {
filteredBody, removed, err := removeTopLevelJSONKeys(body, "user")
if err != nil {
log.Printf("chat parameter filter skipped path=%s error=%v", r.URL.Path, err)
} else if removed {
log.Printf("chat parameter filter removed_keys=user path=%s original_bytes=%d filtered_bytes=%d", r.URL.Path, len(body), len(filteredBody))
body = filteredBody
}
}
if len(body) > 0 {
logRequestBodyMetadata(r, body)
if logRequestBody {
log.Printf("request body method=%s path=%s body=%s", r.Method, r.URL.Path, string(body))
}
}
// Parameter filtering will be added here later. For now this is a no-op
// that proves the read -> restore -> forward flow works.
r.Body = io.NopCloser(bytes.NewReader(body))
r.ContentLength = int64(len(body))
r.GetBody = func() (io.ReadCloser, error) {
return io.NopCloser(bytes.NewReader(body)), nil
}
r.Header.Set("Content-Length", strconv.Itoa(len(body)))
return nil
}
func isChatCompletionsRequest(r *http.Request) bool {
return r.Method == http.MethodPost && r.URL.Path == "/v1/chat/completions"
}
func removeTopLevelJSONKeys(body []byte, keys ...string) ([]byte, bool, error) {
var payload map[string]json.RawMessage
if err := json.Unmarshal(body, &payload); err != nil {
return nil, false, err
}
removed := false
for _, key := range keys {
if _, ok := payload[key]; ok {
delete(payload, key)
removed = true
}
}
if !removed {
return body, false, nil
}
filteredBody, err := json.Marshal(payload)
if err != nil {
return nil, false, err
}
return filteredBody, true, nil
}
func logRequestBodyMetadata(r *http.Request, body []byte) {
var payload struct {
Model string `json:"model"`
Stream *bool `json:"stream"`
}
if err := json.Unmarshal(body, &payload); err != nil {
log.Printf("request body metadata method=%s path=%s body_bytes=%d json_valid=false", r.Method, r.URL.Path, len(body))
return
}
stream := "unset"
if payload.Stream != nil {
stream = strconv.FormatBool(*payload.Stream)
}
log.Printf("request body metadata method=%s path=%s model=%q stream=%s body_bytes=%d json_valid=true", r.Method, r.URL.Path, payload.Model, stream, len(body))
}
func newReverseProxy(upstream *url.URL) *httputil.ReverseProxy {
proxy := httputil.NewSingleHostReverseProxy(upstream)
originalDirector := proxy.Director
proxy.Director = func(r *http.Request) {
originalHost := r.Host
originalDirector(r)
r.Host = upstream.Host
if originalHost != "" {
r.Header.Set("X-Forwarded-Host", originalHost)
}
r.Header.Set("X-Forwarded-Proto", forwardedProto(r))
}
proxy.FlushInterval = -1
proxy.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
log.Printf("upstream error method=%s path=%s error=%v", r.Method, r.URL.Path, err)
http.Error(w, "upstream unavailable", http.StatusBadGateway)
}
return proxy
}
func forwardedProto(r *http.Request) string {
if proto := r.Header.Get("X-Forwarded-Proto"); proto != "" {
return proto
}
if r.TLS != nil {
return "https"
}
return "http"
}
func clientIP(r *http.Request) string {
if value := r.Header.Get("X-Forwarded-For"); value != "" {
parts := strings.Split(value, ",")
return strings.TrimSpace(parts[0])
}
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return r.RemoteAddr
}
return host
}
func envString(key, fallback string) string {
if value := strings.TrimSpace(os.Getenv(key)); value != "" {
return value
}
return fallback
}
func envInt64(key string, fallback int64) int64 {
value := strings.TrimSpace(os.Getenv(key))
if value == "" {
return fallback
}
parsed, err := strconv.ParseInt(value, 10, 64)
if err != nil || parsed <= 0 {
log.Printf("invalid %s=%q, using default %d", key, value, fallback)
return fallback
}
return parsed
}
func envBool(key string, fallback bool) bool {
value := strings.ToLower(strings.TrimSpace(os.Getenv(key)))
if value == "" {
return fallback
}
switch value {
case "1", "true", "yes", "y", "on":
return true
case "0", "false", "no", "n", "off":
return false
default:
log.Printf("invalid %s=%q, using default %t", key, value, fallback)
return fallback
}
}
func firstNonEmpty(values ...string) string {
for _, value := range values {
if value != "" {
return value
}
}
return ""
}
最终效果:
image1620×974 27.1 KB
image1862×940 113 KB
网友解答:--【壹】--:
cursor是pro或pro以上才能自己接模型吗,free能接吗?
--【贰】--:
Cursor++ | 极为顺滑的 BYOK Server 集成
--【叁】--:
不行,free 只能使用 auto ,选择其它模型会出现下面的错误
image1296×324 22.2 KB

