Update app.py
Browse files
app.py
CHANGED
|
@@ -15,7 +15,7 @@ from apscheduler.schedulers.background import BackgroundScheduler
|
|
| 15 |
from flask import Flask, request, jsonify, Response, stream_with_context
|
| 16 |
|
| 17 |
os.environ['TZ'] = 'Asia/Shanghai'
|
| 18 |
-
time.tzset()
|
| 19 |
|
| 20 |
logging.basicConfig(level=logging.INFO,
|
| 21 |
format='%(asctime)s - %(levelname)s - %(message)s')
|
|
@@ -858,6 +858,9 @@ def handsome_images_generations():
|
|
| 858 |
siliconflow_data["safety_tolerance"] = data.get("safety_tolerance", 2)
|
| 859 |
siliconflow_data["interval"] = data.get("interval", 2)
|
| 860 |
siliconflow_data["output_format"] = data.get("output_format", "png")
|
|
|
|
|
|
|
|
|
|
| 861 |
|
| 862 |
if siliconflow_data["width"] < 256 or siliconflow_data["width"] > 1440 or siliconflow_data["width"] % 32 != 0:
|
| 863 |
siliconflow_data["width"] = 1024
|
|
@@ -898,7 +901,7 @@ def handsome_images_generations():
|
|
| 898 |
siliconflow_data["guidance_scale"] = 0
|
| 899 |
if siliconflow_data["guidance_scale"] > 100:
|
| 900 |
siliconflow_data["guidance_scale"] = 100
|
| 901 |
-
|
| 902 |
if "image_size" in siliconflow_data and siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024","960x1280", "720x1440", "720x1280"]:
|
| 903 |
siliconflow_data["image_size"] = "1024x1024"
|
| 904 |
|
|
@@ -1047,7 +1050,9 @@ def handsome_chat_completions():
|
|
| 1047 |
siliconflow_data["safety_tolerance"] = data.get("safety_tolerance", 2)
|
| 1048 |
siliconflow_data["interval"] = data.get("interval", 2)
|
| 1049 |
siliconflow_data["output_format"] = data.get("output_format", "png")
|
| 1050 |
-
|
|
|
|
|
|
|
| 1051 |
if siliconflow_data["width"] < 256 or siliconflow_data["width"] > 1440 or siliconflow_data["width"] % 32 != 0:
|
| 1052 |
siliconflow_data["width"] = 1024
|
| 1053 |
if siliconflow_data["height"] < 256 or siliconflow_data["height"] > 1440 or siliconflow_data["height"] % 32 != 0:
|
|
@@ -1067,7 +1072,7 @@ def handsome_chat_completions():
|
|
| 1067 |
siliconflow_data["num_inference_steps"] = 20
|
| 1068 |
siliconflow_data["guidance_scale"] = 7.5
|
| 1069 |
siliconflow_data["prompt_enhancement"] = False
|
| 1070 |
-
|
| 1071 |
if data.get("size"):
|
| 1072 |
siliconflow_data["image_size"] = data.get("size")
|
| 1073 |
if data.get("n"):
|
|
@@ -1097,7 +1102,7 @@ def handsome_chat_completions():
|
|
| 1097 |
siliconflow_data["guidance_scale"] = 0
|
| 1098 |
if siliconflow_data["guidance_scale"] > 100:
|
| 1099 |
siliconflow_data["guidance_scale"] = 100
|
| 1100 |
-
|
| 1101 |
if siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024", "960x1280", "720x1440", "720x1280"]:
|
| 1102 |
siliconflow_data["image_size"] = "1024x1024"
|
| 1103 |
|
|
@@ -1188,7 +1193,6 @@ def handsome_chat_completions():
|
|
| 1188 |
]
|
| 1189 |
}
|
| 1190 |
yield f"data: {json.dumps(end_chunk_data)}\n\n".encode('utf-8')
|
| 1191 |
-
|
| 1192 |
with data_lock:
|
| 1193 |
request_timestamps.append(time.time())
|
| 1194 |
token_counts.append(0)
|
|
@@ -1204,15 +1208,34 @@ def handsome_chat_completions():
|
|
| 1204 |
"index": 0,
|
| 1205 |
"delta": {
|
| 1206 |
"role": "assistant",
|
| 1207 |
-
"content": "
|
| 1208 |
},
|
| 1209 |
-
"finish_reason":
|
| 1210 |
}
|
| 1211 |
]
|
| 1212 |
}
|
| 1213 |
yield f"data: {json.dumps(error_chunk_data)}\n\n".encode('utf-8')
|
| 1214 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1215 |
return Response(stream_with_context(generate()), content_type='text/event-stream')
|
|
|
|
| 1216 |
else:
|
| 1217 |
response.raise_for_status()
|
| 1218 |
end_time = time.time()
|
|
@@ -1274,12 +1297,14 @@ def handsome_chat_completions():
|
|
| 1274 |
f"总共用时: {total_time:.4f}秒, "
|
| 1275 |
f"使用的模型: {model_name}"
|
| 1276 |
)
|
| 1277 |
-
|
| 1278 |
with data_lock:
|
| 1279 |
request_timestamps.append(time.time())
|
| 1280 |
token_counts.append(0)
|
| 1281 |
-
|
| 1282 |
return jsonify(response_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1283 |
else:
|
| 1284 |
try:
|
| 1285 |
start_time = time.time()
|
|
@@ -1394,11 +1419,9 @@ def handsome_chat_completions():
|
|
| 1394 |
f"总共用时: {total_time:.4f}秒, "
|
| 1395 |
f"使用的模型: {model_name}"
|
| 1396 |
)
|
| 1397 |
-
|
| 1398 |
with data_lock:
|
| 1399 |
request_timestamps.append(time.time())
|
| 1400 |
token_counts.append(0)
|
| 1401 |
-
|
| 1402 |
return jsonify(response_data)
|
| 1403 |
except requests.exceptions.RequestException as e:
|
| 1404 |
logging.error(f"请求转发异常: {e}")
|
|
|
|
| 15 |
from flask import Flask, request, jsonify, Response, stream_with_context
|
| 16 |
|
| 17 |
os.environ['TZ'] = 'Asia/Shanghai'
|
| 18 |
+
# time.tzset()
|
| 19 |
|
| 20 |
logging.basicConfig(level=logging.INFO,
|
| 21 |
format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
| 858 |
siliconflow_data["safety_tolerance"] = data.get("safety_tolerance", 2)
|
| 859 |
siliconflow_data["interval"] = data.get("interval", 2)
|
| 860 |
siliconflow_data["output_format"] = data.get("output_format", "png")
|
| 861 |
+
seed = data.get("seed")
|
| 862 |
+
if isinstance(seed, int) and 0 < seed < 9999999999:
|
| 863 |
+
siliconflow_data["seed"] = seed
|
| 864 |
|
| 865 |
if siliconflow_data["width"] < 256 or siliconflow_data["width"] > 1440 or siliconflow_data["width"] % 32 != 0:
|
| 866 |
siliconflow_data["width"] = 1024
|
|
|
|
| 901 |
siliconflow_data["guidance_scale"] = 0
|
| 902 |
if siliconflow_data["guidance_scale"] > 100:
|
| 903 |
siliconflow_data["guidance_scale"] = 100
|
| 904 |
+
|
| 905 |
if "image_size" in siliconflow_data and siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024","960x1280", "720x1440", "720x1280"]:
|
| 906 |
siliconflow_data["image_size"] = "1024x1024"
|
| 907 |
|
|
|
|
| 1050 |
siliconflow_data["safety_tolerance"] = data.get("safety_tolerance", 2)
|
| 1051 |
siliconflow_data["interval"] = data.get("interval", 2)
|
| 1052 |
siliconflow_data["output_format"] = data.get("output_format", "png")
|
| 1053 |
+
seed = data.get("seed")
|
| 1054 |
+
if isinstance(seed, int) and 0 < seed < 9999999999:
|
| 1055 |
+
siliconflow_data["seed"] = seed
|
| 1056 |
if siliconflow_data["width"] < 256 or siliconflow_data["width"] > 1440 or siliconflow_data["width"] % 32 != 0:
|
| 1057 |
siliconflow_data["width"] = 1024
|
| 1058 |
if siliconflow_data["height"] < 256 or siliconflow_data["height"] > 1440 or siliconflow_data["height"] % 32 != 0:
|
|
|
|
| 1072 |
siliconflow_data["num_inference_steps"] = 20
|
| 1073 |
siliconflow_data["guidance_scale"] = 7.5
|
| 1074 |
siliconflow_data["prompt_enhancement"] = False
|
| 1075 |
+
|
| 1076 |
if data.get("size"):
|
| 1077 |
siliconflow_data["image_size"] = data.get("size")
|
| 1078 |
if data.get("n"):
|
|
|
|
| 1102 |
siliconflow_data["guidance_scale"] = 0
|
| 1103 |
if siliconflow_data["guidance_scale"] > 100:
|
| 1104 |
siliconflow_data["guidance_scale"] = 100
|
| 1105 |
+
|
| 1106 |
if siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024", "960x1280", "720x1440", "720x1280"]:
|
| 1107 |
siliconflow_data["image_size"] = "1024x1024"
|
| 1108 |
|
|
|
|
| 1193 |
]
|
| 1194 |
}
|
| 1195 |
yield f"data: {json.dumps(end_chunk_data)}\n\n".encode('utf-8')
|
|
|
|
| 1196 |
with data_lock:
|
| 1197 |
request_timestamps.append(time.time())
|
| 1198 |
token_counts.append(0)
|
|
|
|
| 1208 |
"index": 0,
|
| 1209 |
"delta": {
|
| 1210 |
"role": "assistant",
|
| 1211 |
+
"content": f"Error: {str(e)}"
|
| 1212 |
},
|
| 1213 |
+
"finish_reason": None
|
| 1214 |
}
|
| 1215 |
]
|
| 1216 |
}
|
| 1217 |
yield f"data: {json.dumps(error_chunk_data)}\n\n".encode('utf-8')
|
| 1218 |
+
end_chunk_data = {
|
| 1219 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
| 1220 |
+
"object": "chat.completion.chunk",
|
| 1221 |
+
"created": int(time.time()),
|
| 1222 |
+
"model": model_name,
|
| 1223 |
+
"choices": [
|
| 1224 |
+
{
|
| 1225 |
+
"index": 0,
|
| 1226 |
+
"delta": {},
|
| 1227 |
+
"finish_reason": "stop"
|
| 1228 |
+
}
|
| 1229 |
+
]
|
| 1230 |
+
}
|
| 1231 |
+
yield f"data: {json.dumps(end_chunk_data)}\n\n".encode('utf-8')
|
| 1232 |
+
logging.info(
|
| 1233 |
+
f"使用的key: {api_key}, "
|
| 1234 |
+
f"使用的模��: {model_name}"
|
| 1235 |
+
)
|
| 1236 |
+
yield "data: [DONE]\n\n".encode('utf-8')
|
| 1237 |
return Response(stream_with_context(generate()), content_type='text/event-stream')
|
| 1238 |
+
|
| 1239 |
else:
|
| 1240 |
response.raise_for_status()
|
| 1241 |
end_time = time.time()
|
|
|
|
| 1297 |
f"总共用时: {total_time:.4f}秒, "
|
| 1298 |
f"使用的模型: {model_name}"
|
| 1299 |
)
|
|
|
|
| 1300 |
with data_lock:
|
| 1301 |
request_timestamps.append(time.time())
|
| 1302 |
token_counts.append(0)
|
|
|
|
| 1303 |
return jsonify(response_data)
|
| 1304 |
+
|
| 1305 |
+
except requests.exceptions.RequestException as e:
|
| 1306 |
+
logging.error(f"请求转发异常: {e}")
|
| 1307 |
+
return jsonify({"error": str(e)}), 500
|
| 1308 |
else:
|
| 1309 |
try:
|
| 1310 |
start_time = time.time()
|
|
|
|
| 1419 |
f"总共用时: {total_time:.4f}秒, "
|
| 1420 |
f"使用的模型: {model_name}"
|
| 1421 |
)
|
|
|
|
| 1422 |
with data_lock:
|
| 1423 |
request_timestamps.append(time.time())
|
| 1424 |
token_counts.append(0)
|
|
|
|
| 1425 |
return jsonify(response_data)
|
| 1426 |
except requests.exceptions.RequestException as e:
|
| 1427 |
logging.error(f"请求转发异常: {e}")
|