Skip to content

Commit 2bdd177

Browse files
author
root
committed
feat: upgrade image routing and reference workflows
1 parent 1bc09a1 commit 2bdd177

22 files changed

Lines changed: 1798 additions & 56 deletions

.gitignore

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,10 @@ wheels/
99
# Virtual environments
1010
.venv
1111
.idea
12+
.env
13+
.env.*
14+
token/
1215
data
1316
config.json
1417

15-
docker-compose-local.yml
18+
docker-compose-local.yml

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ ChatGPT 图片生成代理与账号池管理面板,提供账号维护、额度
99
- 批量导入和管理 `access_token`
1010
- 自动刷新账号邮箱、类型、图片额度、恢复时间
1111
- 轮询可用账号进行图片生成
12+
- `gpt-image-2` 账号优先级路由、自动降级、后台自动补池
1213
- 失效 Token 自动剔除
1314
- 提供 Web 后台管理账号和生成图片
1415

@@ -54,6 +55,7 @@ git clone git@github.com:basketikun/chatgpt2api.git
5455
cd chatgpt2api
5556
export CHATGPT2API_AUTH_KEY='replace-with-your-auth-key'
5657
export CHATGPT2API_PUBLIC_BASE_URL='http://37.114.42.229:9090'
58+
export CHATGPT2API_IMG2_PROBE_INTERVAL_SECONDS='900'
5759
docker compose up --build -d
5860
```
5961

config.example.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,8 @@
33
"public-base-url": "http://127.0.0.1:9090",
44
"image-ttl-hours": 360,
55
"image-cleanup-interval-seconds": 300,
6+
"img2-probe-interval-seconds": 900,
7+
"img2-probe-runs": 3,
8+
"img2-probe-concurrency": 4,
69
"tls-verify": true
710
}

docker-compose.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@ services:
99
CHATGPT2API_PUBLIC_BASE_URL: ${CHATGPT2API_PUBLIC_BASE_URL:-}
1010
CHATGPT2API_IMAGE_TTL_HOURS: ${CHATGPT2API_IMAGE_TTL_HOURS:-360}
1111
CHATGPT2API_IMAGE_CLEANUP_INTERVAL_SECONDS: ${CHATGPT2API_IMAGE_CLEANUP_INTERVAL_SECONDS:-300}
12+
CHATGPT2API_IMG2_PROBE_INTERVAL_SECONDS: ${CHATGPT2API_IMG2_PROBE_INTERVAL_SECONDS:-900}
13+
CHATGPT2API_IMG2_PROBE_RUNS: ${CHATGPT2API_IMG2_PROBE_RUNS:-3}
14+
CHATGPT2API_IMG2_PROBE_CONCURRENCY: ${CHATGPT2API_IMG2_PROBE_CONCURRENCY:-4}
1215
ports:
1316
- "9090:80"
1417
volumes:

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ requires-python = ">=3.13"
77
dependencies = [
88
"curl-cffi>=0.15.0",
99
"fastapi>=0.136.0",
10+
"python-multipart>=0.0.20",
1011
"pybase64>=1.4.3",
1112
"uvicorn>=0.44.0",
1213
]

services/account_service.py

Lines changed: 190 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ def __init__(self, store_file: Path):
3030
self._lock = Lock()
3131
self._index = 0
3232
self._accounts = self._load_accounts()
33+
self._backfill_missing_preferred_models()
3334

3435
@staticmethod
3536
def _clean_token(value: Any) -> str:
@@ -59,6 +60,16 @@ def _is_image_account_available(account: dict) -> bool:
5960
return False
6061
return int(account.get("quota") or 0) > 0
6162

63+
@staticmethod
64+
def _should_default_to_img1(account: dict) -> bool:
65+
if not isinstance(account, dict):
66+
return False
67+
if account.get("preferred_models"):
68+
return False
69+
if str(account.get("status") or "").strip() in {"禁用", "异常"}:
70+
return False
71+
return int(account.get("quota") or 0) > 0
72+
6273
def _decode_access_token_payload(self, access_token: str) -> dict[str, Any]:
6374
parts = self._clean_token(access_token).split(".")
6475
if len(parts) < 2:
@@ -138,6 +149,27 @@ def _normalize_account(self, item: dict) -> dict | None:
138149
]
139150
else:
140151
normalized["preferred_models"] = []
152+
if self._should_default_to_img1(normalized):
153+
normalized["preferred_models"] = ["gpt-image-1"]
154+
routing_priority = normalized.get("routing_priority")
155+
if routing_priority is None or str(routing_priority).strip() == "":
156+
routing_priority = 100 if "gpt-image-2" in normalized["preferred_models"] else 0
157+
normalized["routing_priority"] = int(routing_priority)
158+
recent_img2_results = normalized.get("recent_img2_results")
159+
if isinstance(recent_img2_results, list):
160+
normalized["recent_img2_results"] = [
161+
self._clean_token(item)
162+
for item in recent_img2_results
163+
if self._clean_token(item) in {"gptimage1", "gptimage2"}
164+
]
165+
else:
166+
normalized["recent_img2_results"] = []
167+
normalized["consecutive_img1_results"] = max(0, int(normalized.get("consecutive_img1_results") or 0))
168+
normalized["auto_managed"] = bool(normalized.get("auto_managed") or False)
169+
normalized["last_probe_at"] = self._clean_token(normalized.get("last_probe_at")) or None
170+
normalized["last_probe_result"] = self._clean_token(normalized.get("last_probe_result")) or None
171+
normalized["promotion_count"] = int(normalized.get("promotion_count") or 0)
172+
normalized["demotion_count"] = int(normalized.get("demotion_count") or 0)
141173
normalized["restore_at"] = self._clean_token(normalized.get("restore_at")) or None
142174
normalized["success"] = int(normalized.get("success") or 0)
143175
normalized["fail"] = int(normalized.get("fail") or 0)
@@ -167,6 +199,20 @@ def _load_accounts(self) -> list[dict]:
167199
return []
168200
return [normalized for item in data if (normalized := self._normalize_account(item)) is not None]
169201

202+
def _backfill_missing_preferred_models(self) -> None:
203+
changed = False
204+
next_accounts: list[dict] = []
205+
for item in self._accounts:
206+
next_item = dict(item)
207+
if self._should_default_to_img1(next_item):
208+
next_item["preferred_models"] = ["gpt-image-1"]
209+
next_item["routing_priority"] = int(next_item.get("routing_priority") or 0)
210+
changed = True
211+
next_accounts.append(next_item)
212+
if changed:
213+
self._accounts = next_accounts
214+
self._save_accounts()
215+
170216
def _save_accounts(self) -> None:
171217
self.store_file.parent.mkdir(parents=True, exist_ok=True)
172218
self.store_file.write_text(
@@ -218,6 +264,14 @@ def _public_items(self, accounts: list[dict]) -> list[dict]:
218264
"limits_progress": account.get("limits_progress") or [],
219265
"default_model_slug": account.get("default_model_slug"),
220266
"preferred_models": account.get("preferred_models") or [],
267+
"routing_priority": int(account.get("routing_priority") or 0),
268+
"recent_img2_results": account.get("recent_img2_results") or [],
269+
"consecutive_img1_results": int(account.get("consecutive_img1_results") or 0),
270+
"auto_managed": bool(account.get("auto_managed") or False),
271+
"last_probe_at": account.get("last_probe_at"),
272+
"last_probe_result": account.get("last_probe_result"),
273+
"promotion_count": int(account.get("promotion_count") or 0),
274+
"demotion_count": int(account.get("demotion_count") or 0),
221275
"restoreAt": account.get("restore_at"),
222276
"success": int(account.get("success") or 0),
223277
"fail": int(account.get("fail") or 0),
@@ -246,13 +300,19 @@ def next_token(self, excluded_tokens: set[str] | None = None, model: str | None
246300
raise RuntimeError(f"No available tokens found in {self.store_file}")
247301
if model_name:
248302
preferred = [
249-
token
303+
(item, token)
250304
for item, token in candidates
251305
if model_name in (item.get("preferred_models") or [])
252306
]
253-
tokens = preferred or [token for _, token in candidates]
307+
selected_candidates = preferred or candidates
254308
else:
255-
tokens = [token for _, token in candidates]
309+
selected_candidates = candidates
310+
max_priority = max(int(item.get("routing_priority") or 0) for item, _ in selected_candidates)
311+
tokens = [
312+
token
313+
for item, token in selected_candidates
314+
if int(item.get("routing_priority") or 0) == max_priority
315+
]
256316
access_token = tokens[self._index % len(tokens)]
257317
self._index += 1
258318
return access_token
@@ -271,6 +331,20 @@ def list_accounts(self) -> list[dict]:
271331
with self._lock:
272332
return self._public_items(self._accounts)
273333

334+
def list_available_accounts(self, model: str | None = None) -> list[dict]:
335+
model_name = self._clean_token(model)
336+
with self._lock:
337+
items = [
338+
dict(item)
339+
for item in self._accounts
340+
if self._is_image_account_available(item)
341+
and (not model_name or model_name in (item.get("preferred_models") or []))
342+
]
343+
return items
344+
345+
def count_available_accounts(self, model: str | None = None) -> int:
346+
return len(self.list_available_accounts(model=model))
347+
274348
def list_limited_tokens(self) -> list[str]:
275349
with self._lock:
276350
return [
@@ -374,6 +448,119 @@ def mark_image_result(self, access_token: str, success: bool) -> dict | None:
374448
return dict(account)
375449
return None
376450

451+
def demote_to_img1(self, access_token: str, reason: str = "auto") -> dict | None:
452+
access_token = self._clean_token(access_token)
453+
if not access_token:
454+
return None
455+
with self._lock:
456+
index = self._find_account_index(access_token)
457+
if index < 0:
458+
return None
459+
next_item = dict(self._accounts[index])
460+
next_item["preferred_models"] = ["gpt-image-1"]
461+
next_item["routing_priority"] = 0
462+
next_item["auto_managed"] = True
463+
next_item["last_probe_result"] = self._clean_token(reason) or "auto_demoted"
464+
next_item["demotion_count"] = int(next_item.get("demotion_count") or 0) + 1
465+
account = self._normalize_account(next_item)
466+
if account is None:
467+
return None
468+
self._accounts[index] = account
469+
self._save_accounts()
470+
return dict(account)
471+
472+
def promote_to_img2(self, access_token: str, priority: int = 100, reason: str = "probe_promoted") -> dict | None:
473+
access_token = self._clean_token(access_token)
474+
if not access_token:
475+
return None
476+
with self._lock:
477+
index = self._find_account_index(access_token)
478+
if index < 0:
479+
return None
480+
next_item = dict(self._accounts[index])
481+
next_item["preferred_models"] = ["gpt-image-2"]
482+
next_item["routing_priority"] = int(priority)
483+
next_item["auto_managed"] = True
484+
next_item["last_probe_at"] = datetime.now().isoformat(timespec="seconds")
485+
next_item["last_probe_result"] = self._clean_token(reason) or "probe_promoted"
486+
next_item["promotion_count"] = int(next_item.get("promotion_count") or 0) + 1
487+
next_item["consecutive_img1_results"] = 0
488+
next_item["recent_img2_results"] = []
489+
account = self._normalize_account(next_item)
490+
if account is None:
491+
return None
492+
self._accounts[index] = account
493+
self._save_accounts()
494+
return dict(account)
495+
496+
def update_probe_result(self, access_token: str, result: str) -> dict | None:
497+
access_token = self._clean_token(access_token)
498+
if not access_token:
499+
return None
500+
with self._lock:
501+
index = self._find_account_index(access_token)
502+
if index < 0:
503+
return None
504+
next_item = dict(self._accounts[index])
505+
next_item["last_probe_at"] = datetime.now().isoformat(timespec="seconds")
506+
next_item["last_probe_result"] = self._clean_token(result) or None
507+
account = self._normalize_account(next_item)
508+
if account is None:
509+
return None
510+
self._accounts[index] = account
511+
self._save_accounts()
512+
return dict(account)
513+
514+
def record_img2_outcome(
515+
self,
516+
access_token: str,
517+
classification: str,
518+
*,
519+
recent_window: int = 6,
520+
recent_img1_threshold: int = 4,
521+
consecutive_img1_threshold: int = 3,
522+
) -> dict | None:
523+
access_token = self._clean_token(access_token)
524+
classification = self._clean_token(classification)
525+
if not access_token or classification not in {"gptimage1", "gptimage2"}:
526+
return None
527+
with self._lock:
528+
index = self._find_account_index(access_token)
529+
if index < 0:
530+
return None
531+
next_item = dict(self._accounts[index])
532+
history = list(next_item.get("recent_img2_results") or [])
533+
history.append(classification)
534+
if recent_window > 0:
535+
history = history[-recent_window:]
536+
next_item["recent_img2_results"] = history
537+
if classification == "gptimage1":
538+
next_item["consecutive_img1_results"] = int(next_item.get("consecutive_img1_results") or 0) + 1
539+
else:
540+
next_item["consecutive_img1_results"] = 0
541+
542+
should_demote = (
543+
"gpt-image-2" in (next_item.get("preferred_models") or [])
544+
and (
545+
int(next_item.get("consecutive_img1_results") or 0) >= consecutive_img1_threshold
546+
or history.count("gptimage1") >= recent_img1_threshold
547+
)
548+
)
549+
if should_demote:
550+
next_item["preferred_models"] = ["gpt-image-1"]
551+
next_item["routing_priority"] = 0
552+
next_item["auto_managed"] = True
553+
next_item["last_probe_result"] = "auto_demoted"
554+
next_item["demotion_count"] = int(next_item.get("demotion_count") or 0) + 1
555+
556+
account = self._normalize_account(next_item)
557+
if account is None:
558+
return None
559+
account["_auto_demoted"] = should_demote
560+
self._accounts[index] = {k: v for k, v in account.items() if not str(k).startswith("_")}
561+
self._save_accounts()
562+
return dict(account)
563+
377564
def fetch_remote_info(self, access_token: str) -> dict[str, Any]:
378565
access_token = self._clean_token(access_token)
379566
if not access_token:

0 commit comments

Comments
 (0)