Files
algorithm/services/openai-proxy/main.py

110 lines
3.0 KiB
Python

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import uvicorn
import json
import logging
from .ai_algorithm import OpenAIProxy
from .config import settings
# 配置日志
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# 初始化FastAPI应用
app = FastAPI(
title="OpenAI代理服务",
description="提供OpenAI API代理功能的服务",
version="1.0.0"
)
# 初始化代理
openai_proxy = OpenAIProxy()
# 定义请求模型
class CompletionRequest(BaseModel):
model: str = "gpt-3.5-turbo"
messages: list
temperature: float = 0.7
max_tokens: int = 1000
# 定义响应模型
class CompletionResponse(BaseModel):
id: str
object: str
created: int
model: str
choices: list
usage: dict
# 定义生成仿真输入请求模型
class GenerateSimulationInputRequest(BaseModel):
prompt: str
input_type: str = "text"
@app.post("/v1/chat/completions")
async def chat_completions(request: CompletionRequest):
"""OpenAI聊天完成接口"""
try:
logger.info(f"Received chat completion request for model: {request.model}")
response = openai_proxy.complete(
model=request.model,
messages=request.messages,
temperature=request.temperature,
max_tokens=request.max_tokens
)
logger.info(f"Chat completion completed")
return response
except Exception as e:
logger.error(f"Chat completion error: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/generate-simulation-input")
async def generate_simulation_input(request: GenerateSimulationInputRequest):
"""生成仿真输入数据"""
try:
logger.info(f"Received simulation input generation request")
response = openai_proxy.generate_simulation_input(
prompt=request.prompt,
input_type=request.input_type
)
logger.info(f"Simulation input generation completed")
return response
except Exception as e:
logger.error(f"Simulation input generation error: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
async def health_check():
"""健康检查接口"""
return {
"status": "healthy",
"service": "openai-proxy",
"version": "1.0.0"
}
@app.get("/info")
async def service_info():
"""服务信息接口"""
return {
"name": "OpenAI代理服务",
"description": "提供OpenAI API代理功能的服务",
"version": "1.0.0",
"endpoints": {
"/v1/chat/completions": "POST - OpenAI聊天完成",
"/generate-simulation-input": "POST - 生成仿真输入数据",
"/health": "GET - 健康检查",
"/info": "GET - 服务信息"
}
}
if __name__ == "__main__":
uvicorn.run(
"main:app",
host=settings.HOST,
port=settings.PORT,
reload=settings.DEBUG
)