good version for 算法注册

This commit is contained in:
2026-02-15 21:23:28 +08:00
parent 3c03777b97
commit 62ea5d36a5
115 changed files with 9566 additions and 1576 deletions

View File

@@ -0,0 +1,16 @@
FROM python:3.9-slim
WORKDIR /app
# 安装依赖
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# 复制代码
COPY . .
# 暴露端口
EXPOSE 8000
# 启动服务
CMD ["python", "main.py"]

View File

@@ -0,0 +1,174 @@
import logging
import os
from typing import List, Dict, Any, Optional
import openai
from .config import settings
logger = logging.getLogger(__name__)
class OpenAIProxy:
"""OpenAI代理"""
def __init__(self):
"""初始化OpenAI代理"""
logger.info("初始化OpenAI代理")
# 设置API密钥
openai.api_key = settings.API_KEY
if settings.API_BASE:
openai.api_base = settings.API_BASE
def complete(self, model: str, messages: list, temperature: float = 0.7,
max_tokens: int = 1000) -> Dict[str, Any]:
"""完成聊天请求
Args:
model: 模型名称
messages: 消息列表
temperature: 温度参数
max_tokens: 最大令牌数
Returns:
完成结果
"""
try:
response = openai.chat.completions.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens
)
# 转换为字典格式
return {
"id": response.id,
"object": response.object,
"created": response.created,
"model": response.model,
"choices": [
{
"index": choice.index,
"message": {
"role": choice.message.role,
"content": choice.message.content
},
"finish_reason": choice.finish_reason
}
for choice in response.choices
],
"usage": {
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
"total_tokens": response.usage.total_tokens
}
}
except Exception as e:
logger.error(f"OpenAI completion error: {str(e)}")
# 返回模拟响应,用于演示
return self._mock_completion(messages, model)
def generate_simulation_input(self, prompt: str, input_type: str = "text") -> Dict[str, Any]:
"""生成仿真输入数据
Args:
prompt: 用户描述的场景
input_type: 输入类型,支持 "text", "image", "table"
Returns:
生成的仿真输入数据
"""
try:
# 根据输入类型构建不同的提示词
if input_type == "text":
system_prompt = "你是一个文本数据生成器,根据用户描述生成相应的文本数据"
user_prompt = f"请根据以下描述生成文本数据:{prompt}"
elif input_type == "image":
system_prompt = "你是一个图像描述生成器,根据用户描述生成详细的图像描述"
user_prompt = f"请根据以下描述生成详细的图像描述:{prompt}"
elif input_type == "table":
system_prompt = "你是一个表格数据生成器,根据用户描述生成结构化的表格数据"
user_prompt = f"请根据以下描述生成结构化的表格数据:{prompt}"
else:
system_prompt = "你是一个数据生成器,根据用户描述生成相应的数据"
user_prompt = f"请根据以下描述生成数据:{prompt}"
# 调用OpenAI API
response = openai.chat.completions.create(
model=settings.MODEL,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
temperature=settings.TEMPERATURE,
max_tokens=settings.MAX_TOKENS
)
# 处理响应
generated_content = response.choices[0].message.content
return {
"success": True,
"data": generated_content,
"input_type": input_type
}
except Exception as e:
logger.error(f"OpenAI simulation input generation error: {str(e)}")
# 返回模拟响应,用于演示
return self._mock_simulation_input(prompt, input_type)
def _mock_completion(self, messages: list, model: str) -> Dict[str, Any]:
"""模拟完成响应,用于演示
Args:
messages: 消息列表
model: 模型名称
Returns:
模拟的完成结果
"""
return {
"id": "chat-mock-123",
"object": "chat.completion",
"created": 1677825464,
"model": model,
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "这是一个模拟的响应用于演示OpenAI代理服务"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 20,
"total_tokens": 30
}
}
def _mock_simulation_input(self, prompt: str, input_type: str) -> Dict[str, Any]:
"""模拟生成仿真输入数据,用于演示
Args:
prompt: 用户描述的场景
input_type: 输入类型
Returns:
模拟的生成结果
"""
if input_type == "text":
data = f"这是根据描述生成的文本数据:{prompt}"
elif input_type == "image":
data = f"这是根据描述生成的图像描述:{prompt}"
elif input_type == "table":
data = f"这是根据描述生成的表格数据:{prompt}"
else:
data = f"这是根据描述生成的数据:{prompt}"
return {
"success": True,
"data": data,
"input_type": input_type
}

View File

@@ -0,0 +1,31 @@
from pydantic_settings import BaseSettings
from typing import Optional
class Settings(BaseSettings):
"""服务配置"""
# 服务基本配置
HOST: str = "0.0.0.0"
PORT: int = 8004
DEBUG: bool = True
# 服务名称
SERVICE_NAME: str = "openai-proxy"
# 日志配置
LOG_LEVEL: str = "info"
# OpenAI配置
API_KEY: Optional[str] = None
API_BASE: str = "https://api.openai.com/v1"
MODEL: str = "gpt-3.5-turbo"
TEMPERATURE: float = 0.7
MAX_TOKENS: int = 1000
class Config:
env_file = ".env"
case_sensitive = True
# 创建全局配置实例
settings = Settings()

View File

@@ -0,0 +1,109 @@
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import uvicorn
import json
import logging
from .ai_algorithm import OpenAIProxy
from .config import settings
# 配置日志
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# 初始化FastAPI应用
app = FastAPI(
title="OpenAI代理服务",
description="提供OpenAI API代理功能的服务",
version="1.0.0"
)
# 初始化代理
openai_proxy = OpenAIProxy()
# 定义请求模型
class CompletionRequest(BaseModel):
model: str = "gpt-3.5-turbo"
messages: list
temperature: float = 0.7
max_tokens: int = 1000
# 定义响应模型
class CompletionResponse(BaseModel):
id: str
object: str
created: int
model: str
choices: list
usage: dict
# 定义生成仿真输入请求模型
class GenerateSimulationInputRequest(BaseModel):
prompt: str
input_type: str = "text"
@app.post("/v1/chat/completions")
async def chat_completions(request: CompletionRequest):
"""OpenAI聊天完成接口"""
try:
logger.info(f"Received chat completion request for model: {request.model}")
response = openai_proxy.complete(
model=request.model,
messages=request.messages,
temperature=request.temperature,
max_tokens=request.max_tokens
)
logger.info(f"Chat completion completed")
return response
except Exception as e:
logger.error(f"Chat completion error: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/generate-simulation-input")
async def generate_simulation_input(request: GenerateSimulationInputRequest):
"""生成仿真输入数据"""
try:
logger.info(f"Received simulation input generation request")
response = openai_proxy.generate_simulation_input(
prompt=request.prompt,
input_type=request.input_type
)
logger.info(f"Simulation input generation completed")
return response
except Exception as e:
logger.error(f"Simulation input generation error: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
async def health_check():
"""健康检查接口"""
return {
"status": "healthy",
"service": "openai-proxy",
"version": "1.0.0"
}
@app.get("/info")
async def service_info():
"""服务信息接口"""
return {
"name": "OpenAI代理服务",
"description": "提供OpenAI API代理功能的服务",
"version": "1.0.0",
"endpoints": {
"/v1/chat/completions": "POST - OpenAI聊天完成",
"/generate-simulation-input": "POST - 生成仿真输入数据",
"/health": "GET - 健康检查",
"/info": "GET - 服务信息"
}
}
if __name__ == "__main__":
uvicorn.run(
"main:app",
host=settings.HOST,
port=settings.PORT,
reload=settings.DEBUG
)

View File

@@ -0,0 +1,6 @@
fastapi==0.104.1
uvicorn==0.24.0.post1
pydantic==2.5.2
pydantic-settings==2.1.0
openai==1.3.5
python-dotenv==1.0.0

View File

@@ -0,0 +1,24 @@
#!/bin/bash
# 启动OpenAI代理服务
# 进入服务目录
cd "$(dirname "$0")"
# 检查虚拟环境是否存在
if [ ! -d "venv" ]; then
echo "创建虚拟环境..."
python3 -m venv venv
fi
# 激活虚拟环境
echo "激活虚拟环境..."
source venv/bin/activate
# 安装依赖
echo "安装依赖..."
pip install --no-cache-dir -r requirements.txt
# 启动服务
echo "启动OpenAI代理服务..."
python main.py