Update app.py
Browse files
app.py
CHANGED
|
@@ -15,7 +15,7 @@ from apscheduler.schedulers.background import BackgroundScheduler
|
|
| 15 |
from flask import Flask, request, jsonify, Response, stream_with_context
|
| 16 |
|
| 17 |
os.environ['TZ'] = 'Asia/Shanghai'
|
| 18 |
-
|
| 19 |
|
| 20 |
logging.basicConfig(level=logging.INFO,
|
| 21 |
format='%(asctime)s - %(levelname)s - %(message)s')
|
|
@@ -858,9 +858,6 @@ def handsome_images_generations():
|
|
| 858 |
siliconflow_data["safety_tolerance"] = data.get("safety_tolerance", 2)
|
| 859 |
siliconflow_data["interval"] = data.get("interval", 2)
|
| 860 |
siliconflow_data["output_format"] = data.get("output_format", "png")
|
| 861 |
-
seed = data.get("seed")
|
| 862 |
-
if isinstance(seed, int) and 0 < seed < 9999999999:
|
| 863 |
-
siliconflow_data["seed"] = seed
|
| 864 |
|
| 865 |
if siliconflow_data["width"] < 256 or siliconflow_data["width"] > 1440 or siliconflow_data["width"] % 32 != 0:
|
| 866 |
siliconflow_data["width"] = 1024
|
|
@@ -901,7 +898,7 @@ def handsome_images_generations():
|
|
| 901 |
siliconflow_data["guidance_scale"] = 0
|
| 902 |
if siliconflow_data["guidance_scale"] > 100:
|
| 903 |
siliconflow_data["guidance_scale"] = 100
|
| 904 |
-
|
| 905 |
if "image_size" in siliconflow_data and siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024","960x1280", "720x1440", "720x1280"]:
|
| 906 |
siliconflow_data["image_size"] = "1024x1024"
|
| 907 |
|
|
@@ -913,7 +910,7 @@ def handsome_images_generations():
|
|
| 913 |
json=siliconflow_data,
|
| 914 |
timeout=120
|
| 915 |
)
|
| 916 |
-
|
| 917 |
if response.status_code == 429:
|
| 918 |
return jsonify(response.json()), 429
|
| 919 |
|
|
@@ -946,6 +943,7 @@ def handsome_images_generations():
|
|
| 946 |
logging.error(f"无效的图片数据: {item}")
|
| 947 |
openai_images.append({"url": item})
|
| 948 |
|
|
|
|
| 949 |
response_data = {
|
| 950 |
"created": int(time.time()),
|
| 951 |
"data": openai_images
|
|
@@ -1049,9 +1047,7 @@ def handsome_chat_completions():
|
|
| 1049 |
siliconflow_data["safety_tolerance"] = data.get("safety_tolerance", 2)
|
| 1050 |
siliconflow_data["interval"] = data.get("interval", 2)
|
| 1051 |
siliconflow_data["output_format"] = data.get("output_format", "png")
|
| 1052 |
-
|
| 1053 |
-
if isinstance(seed, int) and 0 < seed < 9999999999:
|
| 1054 |
-
siliconflow_data["seed"] = seed
|
| 1055 |
if siliconflow_data["width"] < 256 or siliconflow_data["width"] > 1440 or siliconflow_data["width"] % 32 != 0:
|
| 1056 |
siliconflow_data["width"] = 1024
|
| 1057 |
if siliconflow_data["height"] < 256 or siliconflow_data["height"] > 1440 or siliconflow_data["height"] % 32 != 0:
|
|
@@ -1071,7 +1067,7 @@ def handsome_chat_completions():
|
|
| 1071 |
siliconflow_data["num_inference_steps"] = 20
|
| 1072 |
siliconflow_data["guidance_scale"] = 7.5
|
| 1073 |
siliconflow_data["prompt_enhancement"] = False
|
| 1074 |
-
|
| 1075 |
if data.get("size"):
|
| 1076 |
siliconflow_data["image_size"] = data.get("size")
|
| 1077 |
if data.get("n"):
|
|
@@ -1101,7 +1097,7 @@ def handsome_chat_completions():
|
|
| 1101 |
siliconflow_data["guidance_scale"] = 0
|
| 1102 |
if siliconflow_data["guidance_scale"] > 100:
|
| 1103 |
siliconflow_data["guidance_scale"] = 100
|
| 1104 |
-
|
| 1105 |
if siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024", "960x1280", "720x1440", "720x1280"]:
|
| 1106 |
siliconflow_data["image_size"] = "1024x1024"
|
| 1107 |
|
|
@@ -1192,6 +1188,7 @@ def handsome_chat_completions():
|
|
| 1192 |
]
|
| 1193 |
}
|
| 1194 |
yield f"data: {json.dumps(end_chunk_data)}\n\n".encode('utf-8')
|
|
|
|
| 1195 |
with data_lock:
|
| 1196 |
request_timestamps.append(time.time())
|
| 1197 |
token_counts.append(0)
|
|
@@ -1207,34 +1204,15 @@ def handsome_chat_completions():
|
|
| 1207 |
"index": 0,
|
| 1208 |
"delta": {
|
| 1209 |
"role": "assistant",
|
| 1210 |
-
"content":
|
| 1211 |
},
|
| 1212 |
-
"finish_reason":
|
| 1213 |
}
|
| 1214 |
]
|
| 1215 |
}
|
| 1216 |
yield f"data: {json.dumps(error_chunk_data)}\n\n".encode('utf-8')
|
| 1217 |
-
|
| 1218 |
-
"id": f"chatcmpl-{uuid.uuid4()}",
|
| 1219 |
-
"object": "chat.completion.chunk",
|
| 1220 |
-
"created": int(time.time()),
|
| 1221 |
-
"model": model_name,
|
| 1222 |
-
"choices": [
|
| 1223 |
-
{
|
| 1224 |
-
"index": 0,
|
| 1225 |
-
"delta": {},
|
| 1226 |
-
"finish_reason": "stop"
|
| 1227 |
-
}
|
| 1228 |
-
]
|
| 1229 |
-
}
|
| 1230 |
-
yield f"data: {json.dumps(end_chunk_data)}\n\n".encode('utf-8')
|
| 1231 |
-
logging.info(
|
| 1232 |
-
f"使用的key: {api_key}, "
|
| 1233 |
-
f"使用的模型: {model_name}"
|
| 1234 |
-
)
|
| 1235 |
-
yield "data: [DONE]\n\n".encode('utf-8')
|
| 1236 |
return Response(stream_with_context(generate()), content_type='text/event-stream')
|
| 1237 |
-
|
| 1238 |
else:
|
| 1239 |
response.raise_for_status()
|
| 1240 |
end_time = time.time()
|
|
@@ -1296,14 +1274,12 @@ def handsome_chat_completions():
|
|
| 1296 |
f"总共用时: {total_time:.4f}秒, "
|
| 1297 |
f"使用的模型: {model_name}"
|
| 1298 |
)
|
|
|
|
| 1299 |
with data_lock:
|
| 1300 |
request_timestamps.append(time.time())
|
| 1301 |
token_counts.append(0)
|
| 1302 |
-
return jsonify(response_data)
|
| 1303 |
|
| 1304 |
-
|
| 1305 |
-
logging.error(f"请求转发异常: {e}")
|
| 1306 |
-
return jsonify({"error": str(e)}), 500
|
| 1307 |
else:
|
| 1308 |
try:
|
| 1309 |
start_time = time.time()
|
|
@@ -1418,14 +1394,16 @@ def handsome_chat_completions():
|
|
| 1418 |
f"总共用时: {total_time:.4f}秒, "
|
| 1419 |
f"使用的模型: {model_name}"
|
| 1420 |
)
|
|
|
|
| 1421 |
with data_lock:
|
| 1422 |
request_timestamps.append(time.time())
|
| 1423 |
token_counts.append(0)
|
|
|
|
| 1424 |
return jsonify(response_data)
|
| 1425 |
except requests.exceptions.RequestException as e:
|
| 1426 |
logging.error(f"请求转发异常: {e}")
|
| 1427 |
return jsonify({"error": str(e)}), 500
|
| 1428 |
-
|
| 1429 |
if __name__ == '__main__':
|
| 1430 |
import json
|
| 1431 |
logging.info(f"环境变量:{os.environ}")
|
|
|
|
| 15 |
from flask import Flask, request, jsonify, Response, stream_with_context
|
| 16 |
|
| 17 |
os.environ['TZ'] = 'Asia/Shanghai'
|
| 18 |
+
time.tzset()
|
| 19 |
|
| 20 |
logging.basicConfig(level=logging.INFO,
|
| 21 |
format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
| 858 |
siliconflow_data["safety_tolerance"] = data.get("safety_tolerance", 2)
|
| 859 |
siliconflow_data["interval"] = data.get("interval", 2)
|
| 860 |
siliconflow_data["output_format"] = data.get("output_format", "png")
|
|
|
|
|
|
|
|
|
|
| 861 |
|
| 862 |
if siliconflow_data["width"] < 256 or siliconflow_data["width"] > 1440 or siliconflow_data["width"] % 32 != 0:
|
| 863 |
siliconflow_data["width"] = 1024
|
|
|
|
| 898 |
siliconflow_data["guidance_scale"] = 0
|
| 899 |
if siliconflow_data["guidance_scale"] > 100:
|
| 900 |
siliconflow_data["guidance_scale"] = 100
|
| 901 |
+
# Validate image_size
|
| 902 |
if "image_size" in siliconflow_data and siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024","960x1280", "720x1440", "720x1280"]:
|
| 903 |
siliconflow_data["image_size"] = "1024x1024"
|
| 904 |
|
|
|
|
| 910 |
json=siliconflow_data,
|
| 911 |
timeout=120
|
| 912 |
)
|
| 913 |
+
|
| 914 |
if response.status_code == 429:
|
| 915 |
return jsonify(response.json()), 429
|
| 916 |
|
|
|
|
| 943 |
logging.error(f"无效的图片数据: {item}")
|
| 944 |
openai_images.append({"url": item})
|
| 945 |
|
| 946 |
+
|
| 947 |
response_data = {
|
| 948 |
"created": int(time.time()),
|
| 949 |
"data": openai_images
|
|
|
|
| 1047 |
siliconflow_data["safety_tolerance"] = data.get("safety_tolerance", 2)
|
| 1048 |
siliconflow_data["interval"] = data.get("interval", 2)
|
| 1049 |
siliconflow_data["output_format"] = data.get("output_format", "png")
|
| 1050 |
+
|
|
|
|
|
|
|
| 1051 |
if siliconflow_data["width"] < 256 or siliconflow_data["width"] > 1440 or siliconflow_data["width"] % 32 != 0:
|
| 1052 |
siliconflow_data["width"] = 1024
|
| 1053 |
if siliconflow_data["height"] < 256 or siliconflow_data["height"] > 1440 or siliconflow_data["height"] % 32 != 0:
|
|
|
|
| 1067 |
siliconflow_data["num_inference_steps"] = 20
|
| 1068 |
siliconflow_data["guidance_scale"] = 7.5
|
| 1069 |
siliconflow_data["prompt_enhancement"] = False
|
| 1070 |
+
|
| 1071 |
if data.get("size"):
|
| 1072 |
siliconflow_data["image_size"] = data.get("size")
|
| 1073 |
if data.get("n"):
|
|
|
|
| 1097 |
siliconflow_data["guidance_scale"] = 0
|
| 1098 |
if siliconflow_data["guidance_scale"] > 100:
|
| 1099 |
siliconflow_data["guidance_scale"] = 100
|
| 1100 |
+
|
| 1101 |
if siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024", "960x1280", "720x1440", "720x1280"]:
|
| 1102 |
siliconflow_data["image_size"] = "1024x1024"
|
| 1103 |
|
|
|
|
| 1188 |
]
|
| 1189 |
}
|
| 1190 |
yield f"data: {json.dumps(end_chunk_data)}\n\n".encode('utf-8')
|
| 1191 |
+
|
| 1192 |
with data_lock:
|
| 1193 |
request_timestamps.append(time.time())
|
| 1194 |
token_counts.append(0)
|
|
|
|
| 1204 |
"index": 0,
|
| 1205 |
"delta": {
|
| 1206 |
"role": "assistant",
|
| 1207 |
+
"content": "Failed to process image data"
|
| 1208 |
},
|
| 1209 |
+
"finish_reason": "stop"
|
| 1210 |
}
|
| 1211 |
]
|
| 1212 |
}
|
| 1213 |
yield f"data: {json.dumps(error_chunk_data)}\n\n".encode('utf-8')
|
| 1214 |
+
yield "data: [DONE]\n\n".encode('utf-8')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1215 |
return Response(stream_with_context(generate()), content_type='text/event-stream')
|
|
|
|
| 1216 |
else:
|
| 1217 |
response.raise_for_status()
|
| 1218 |
end_time = time.time()
|
|
|
|
| 1274 |
f"总共用时: {total_time:.4f}秒, "
|
| 1275 |
f"使用的模型: {model_name}"
|
| 1276 |
)
|
| 1277 |
+
|
| 1278 |
with data_lock:
|
| 1279 |
request_timestamps.append(time.time())
|
| 1280 |
token_counts.append(0)
|
|
|
|
| 1281 |
|
| 1282 |
+
return jsonify(response_data)
|
|
|
|
|
|
|
| 1283 |
else:
|
| 1284 |
try:
|
| 1285 |
start_time = time.time()
|
|
|
|
| 1394 |
f"总共用时: {total_time:.4f}秒, "
|
| 1395 |
f"使用的模型: {model_name}"
|
| 1396 |
)
|
| 1397 |
+
|
| 1398 |
with data_lock:
|
| 1399 |
request_timestamps.append(time.time())
|
| 1400 |
token_counts.append(0)
|
| 1401 |
+
|
| 1402 |
return jsonify(response_data)
|
| 1403 |
except requests.exceptions.RequestException as e:
|
| 1404 |
logging.error(f"请求转发异常: {e}")
|
| 1405 |
return jsonify({"error": str(e)}), 500
|
| 1406 |
+
|
| 1407 |
if __name__ == '__main__':
|
| 1408 |
import json
|
| 1409 |
logging.info(f"环境变量:{os.environ}")
|