delete cpmments
This commit is contained in:
parent
1f0224139f
commit
dbbd366953
13
Dockerfile
13
Dockerfile
|
|
@ -1,27 +1,18 @@
|
|||
# Multi-stage build for Brief Bench FastAPI
|
||||
|
||||
FROM python:3.11-slim as base
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
gcc \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy requirements
|
||||
COPY requirements.txt .
|
||||
|
||||
# Install Python dependencies
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy application code
|
||||
COPY app /app/app
|
||||
COPY static /app/static
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8000
|
||||
EXPOSE 8092
|
||||
|
||||
# Run uvicorn
|
||||
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
|
||||
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8092"]
|
||||
|
|
|
|||
|
|
@ -31,14 +31,14 @@ async def login(
|
|||
HTTPException 400: If login format is invalid
|
||||
HTTPException 500: If DB API call fails
|
||||
"""
|
||||
# Get client IP
|
||||
|
||||
client_ip = request.client.host
|
||||
|
||||
# Create auth service
|
||||
|
||||
auth_service = AuthService(db_client)
|
||||
|
||||
try:
|
||||
# Authenticate user
|
||||
|
||||
response = await auth_service.login(login, client_ip)
|
||||
return response
|
||||
except ValueError as e:
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ async def bench_query(
|
|||
user_id = current_user["user_id"]
|
||||
environment = request.environment.lower()
|
||||
|
||||
# Валидация окружения
|
||||
|
||||
if environment not in ['ift', 'psi', 'prod']:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
|
|
@ -56,7 +56,7 @@ async def bench_query(
|
|||
)
|
||||
|
||||
try:
|
||||
# Получить настройки пользователя из DB API
|
||||
|
||||
user_settings_response = await db_client.get_user_settings(user_id)
|
||||
env_settings = user_settings_response.settings.get(environment)
|
||||
|
||||
|
|
@ -66,14 +66,14 @@ async def bench_query(
|
|||
detail=f"Settings not found for environment: {environment}"
|
||||
)
|
||||
|
||||
# Проверить что apiMode = bench
|
||||
|
||||
if env_settings.apiMode != 'bench':
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Environment {environment} is not configured for bench mode"
|
||||
)
|
||||
|
||||
# Сгенерировать request_id
|
||||
|
||||
request_id = str(uuid.uuid4())
|
||||
|
||||
logger.info(
|
||||
|
|
@ -81,7 +81,7 @@ async def bench_query(
|
|||
f"{len(request.questions)} questions, request_id={request_id}"
|
||||
)
|
||||
|
||||
# Отправить запрос к RAG backend
|
||||
|
||||
env_settings_dict = env_settings.model_dump()
|
||||
response_data = await rag_service.send_bench_query(
|
||||
environment=environment,
|
||||
|
|
@ -90,7 +90,7 @@ async def bench_query(
|
|||
request_id=request_id
|
||||
)
|
||||
|
||||
# Формируем ответ
|
||||
|
||||
return QueryResponse(
|
||||
request_id=request_id,
|
||||
timestamp=datetime.utcnow().isoformat() + "Z",
|
||||
|
|
@ -135,7 +135,7 @@ async def backend_query(
|
|||
user_id = current_user["user_id"]
|
||||
environment = request.environment.lower()
|
||||
|
||||
# Валидация окружения
|
||||
|
||||
if environment not in ['ift', 'psi', 'prod']:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
|
|
@ -143,7 +143,7 @@ async def backend_query(
|
|||
)
|
||||
|
||||
try:
|
||||
# Получить настройки пользователя из DB API
|
||||
|
||||
user_settings_response = await db_client.get_user_settings(user_id)
|
||||
env_settings = user_settings_response.settings.get(environment)
|
||||
|
||||
|
|
@ -153,14 +153,14 @@ async def backend_query(
|
|||
detail=f"Settings not found for environment: {environment}"
|
||||
)
|
||||
|
||||
# Проверить что apiMode = backend
|
||||
|
||||
if env_settings.apiMode != 'backend':
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Environment {environment} is not configured for backend mode"
|
||||
)
|
||||
|
||||
# Сгенерировать request_id
|
||||
|
||||
request_id = str(uuid.uuid4())
|
||||
|
||||
logger.info(
|
||||
|
|
@ -169,7 +169,7 @@ async def backend_query(
|
|||
f"reset_session={request.reset_session}"
|
||||
)
|
||||
|
||||
# Отправить запросы к RAG backend
|
||||
|
||||
env_settings_dict = env_settings.model_dump()
|
||||
response_data = await rag_service.send_backend_query(
|
||||
environment=environment,
|
||||
|
|
@ -178,12 +178,12 @@ async def backend_query(
|
|||
reset_session=request.reset_session
|
||||
)
|
||||
|
||||
# Формируем ответ
|
||||
|
||||
return QueryResponse(
|
||||
request_id=request_id,
|
||||
timestamp=datetime.utcnow().isoformat() + "Z",
|
||||
environment=environment,
|
||||
response={"answers": response_data} # Оборачиваем в объект
|
||||
response={"answers": response_data}
|
||||
)
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
|
|
|
|||
|
|
@ -12,20 +12,16 @@ class Settings(BaseSettings):
|
|||
case_sensitive=False,
|
||||
)
|
||||
|
||||
# Application settings
|
||||
APP_NAME: str = "Brief Bench API"
|
||||
APP_NAME: str = "Brief Bench Rags"
|
||||
DEBUG: bool = False
|
||||
|
||||
# JWT Authentication
|
||||
JWT_SECRET_KEY: str
|
||||
JWT_ALGORITHM: str = "HS256"
|
||||
JWT_EXPIRE_MINUTES: int = 43200 # 30 days
|
||||
JWT_EXPIRE_MINUTES: int = 43200
|
||||
|
||||
# DB API Service (external)
|
||||
DB_API_URL: str
|
||||
DB_API_TIMEOUT: int = 30
|
||||
|
||||
# RAG Backend - IFT Environment
|
||||
IFT_RAG_HOST: str
|
||||
IFT_RAG_PORT: int
|
||||
IFT_RAG_ENDPOINT: str
|
||||
|
|
@ -33,7 +29,6 @@ class Settings(BaseSettings):
|
|||
IFT_RAG_CERT_KEY: str = ""
|
||||
IFT_RAG_CERT_CERT: str = ""
|
||||
|
||||
# RAG Backend - PSI Environment
|
||||
PSI_RAG_HOST: str
|
||||
PSI_RAG_PORT: int
|
||||
PSI_RAG_ENDPOINT: str
|
||||
|
|
@ -41,7 +36,6 @@ class Settings(BaseSettings):
|
|||
PSI_RAG_CERT_KEY: str = ""
|
||||
PSI_RAG_CERT_CERT: str = ""
|
||||
|
||||
# RAG Backend - PROD Environment
|
||||
PROD_RAG_HOST: str
|
||||
PROD_RAG_PORT: int
|
||||
PROD_RAG_ENDPOINT: str
|
||||
|
|
@ -49,9 +43,7 @@ class Settings(BaseSettings):
|
|||
PROD_RAG_CERT_KEY: str = ""
|
||||
PROD_RAG_CERT_CERT: str = ""
|
||||
|
||||
# Request Timeouts
|
||||
RAG_REQUEST_TIMEOUT: int = 1800 # 30 minutes in seconds
|
||||
RAG_REQUEST_TIMEOUT: int = 1800
|
||||
|
||||
|
||||
# Global settings instance
|
||||
settings = Settings()
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ class TgBackendInterface:
|
|||
"""
|
||||
self.api_prefix = api_prefix.rstrip('/')
|
||||
|
||||
# Настройка httpx.AsyncClient
|
||||
|
||||
self.client = httpx.AsyncClient(
|
||||
timeout=httpx.Timeout(timeout),
|
||||
transport=httpx.AsyncHTTPTransport(retries=max_retries),
|
||||
|
|
@ -148,11 +148,11 @@ class TgBackendInterface:
|
|||
)
|
||||
raise
|
||||
|
||||
# Если ответ пустой (204 No Content), вернуть пустой dict
|
||||
|
||||
if response.status_code == 204 or len(response.content) == 0:
|
||||
return {}
|
||||
|
||||
# Парсим JSON
|
||||
|
||||
try:
|
||||
data = response.json()
|
||||
except Exception as e:
|
||||
|
|
@ -160,7 +160,7 @@ class TgBackendInterface:
|
|||
logger.debug(f"Response content: {response.text}")
|
||||
raise
|
||||
|
||||
# Десериализуем в Pydantic модель если нужно
|
||||
|
||||
return self._deserialize_response(data, response_model)
|
||||
|
||||
async def get(
|
||||
|
|
|
|||
|
|
@ -8,29 +8,25 @@ from fastapi.responses import FileResponse
|
|||
from app.api.v1 import auth, settings as settings_router, query, analysis
|
||||
from app.config import settings
|
||||
|
||||
# Create FastAPI app
|
||||
app = FastAPI(
|
||||
title=settings.APP_NAME,
|
||||
debug=settings.DEBUG,
|
||||
version="1.0.0"
|
||||
)
|
||||
|
||||
# CORS middleware
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"], # TODO: Configure properly in production
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# API v1 routes
|
||||
app.include_router(auth.router, prefix="/api/v1")
|
||||
app.include_router(settings_router.router, prefix="/api/v1")
|
||||
app.include_router(query.router, prefix="/api/v1")
|
||||
app.include_router(analysis.router, prefix="/api/v1")
|
||||
|
||||
# Serve static files (frontend)
|
||||
app.mount("/static", StaticFiles(directory="static"), name="static")
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -32,4 +32,4 @@ class TokenPayload(BaseModel):
|
|||
|
||||
user_id: str
|
||||
login: str
|
||||
exp: int # Expiration timestamp
|
||||
exp: int
|
||||
|
|
|
|||
|
|
@ -32,4 +32,4 @@ class QueryResponse(BaseModel):
|
|||
request_id: str
|
||||
timestamp: str
|
||||
environment: str
|
||||
response: dict # RagResponseBenchList or converted format
|
||||
response: dict
|
||||
|
|
|
|||
|
|
@ -32,15 +32,15 @@ class AuthService:
|
|||
ValueError: If login format is invalid
|
||||
Exception: If DB API call fails
|
||||
"""
|
||||
# Validate login format
|
||||
|
||||
if not (login.isdigit() and len(login) == 8):
|
||||
raise ValueError("Login must be 8 digits")
|
||||
|
||||
# Call DB API to validate and record login
|
||||
|
||||
request = LoginRequest(login=login, client_ip=client_ip)
|
||||
user: UserResponse = await self.db_client.login_user(request)
|
||||
|
||||
# Generate JWT token
|
||||
|
||||
token_data = {
|
||||
"user_id": user.user_id,
|
||||
"login": user.login
|
||||
|
|
|
|||
|
|
@ -47,12 +47,12 @@ class RagService:
|
|||
"""
|
||||
env_upper = environment.upper()
|
||||
|
||||
# Получить пути к сертификатам из config
|
||||
|
||||
cert_cert_path = getattr(settings, f"{env_upper}_RAG_CERT_CERT", "")
|
||||
cert_key_path = getattr(settings, f"{env_upper}_RAG_CERT_KEY", "")
|
||||
cert_ca_path = getattr(settings, f"{env_upper}_RAG_CERT_CA", "")
|
||||
|
||||
# Настройка mTLS
|
||||
|
||||
cert = None
|
||||
verify = True
|
||||
|
||||
|
|
@ -65,7 +65,7 @@ class RagService:
|
|||
logger.info(f"Custom CA for {environment}: {cert_ca_path}")
|
||||
|
||||
return httpx.AsyncClient(
|
||||
timeout=httpx.Timeout(1800.0), # 30 minutes for long-running requests
|
||||
timeout=httpx.Timeout(1800.0),
|
||||
cert=cert,
|
||||
verify=verify,
|
||||
follow_redirects=True
|
||||
|
|
@ -128,7 +128,7 @@ class RagService:
|
|||
"""
|
||||
base_url = self._get_base_url(environment)
|
||||
|
||||
# Endpoints из настроек пользователя или дефолтные
|
||||
|
||||
ask_endpoint = user_settings.get('backendAskEndpoint', 'ask')
|
||||
reset_endpoint = user_settings.get('backendResetEndpoint', 'reset')
|
||||
|
||||
|
|
@ -160,7 +160,7 @@ class RagService:
|
|||
"System-Id": f"brief-bench-{environment}"
|
||||
}
|
||||
|
||||
# Добавить опциональные headers из настроек пользователя
|
||||
|
||||
if user_settings.get('bearerToken'):
|
||||
headers["Authorization"] = f"Bearer {user_settings['bearerToken']}"
|
||||
|
||||
|
|
@ -220,7 +220,7 @@ class RagService:
|
|||
url = self._get_bench_endpoint(environment)
|
||||
headers = self._build_bench_headers(environment, user_settings, request_id)
|
||||
|
||||
# Сериализуем вопросы в JSON
|
||||
|
||||
body = [q.model_dump() for q in questions]
|
||||
|
||||
logger.info(f"Sending bench query to {environment}: {len(questions)} questions")
|
||||
|
|
@ -273,7 +273,7 @@ class RagService:
|
|||
responses = []
|
||||
|
||||
for idx, question in enumerate(questions, start=1):
|
||||
# Формируем body для запроса
|
||||
|
||||
now = datetime.utcnow().isoformat() + "Z"
|
||||
body = {
|
||||
"question": question.body,
|
||||
|
|
@ -285,13 +285,13 @@ class RagService:
|
|||
logger.debug(f"Sending question {idx}/{len(questions)}: {question.body[:50]}...")
|
||||
|
||||
try:
|
||||
# Отправляем вопрос
|
||||
|
||||
response = await client.post(endpoints['ask'], json=body, headers=headers)
|
||||
response.raise_for_status()
|
||||
response_data = response.json()
|
||||
responses.append(response_data)
|
||||
|
||||
# Сбрасываем сессию если нужно
|
||||
|
||||
if reset_session and user_settings.get('resetSessionMode', True):
|
||||
reset_body = {"user_message_datetime": now}
|
||||
logger.debug(f"Resetting session after question {idx}")
|
||||
|
|
|
|||
|
|
@ -5,12 +5,12 @@ services:
|
|||
build: .
|
||||
container_name: brief-bench-fastapi
|
||||
ports:
|
||||
- "8000:8000"
|
||||
- "8092:8092"
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./certs:/app/certs:ro # mTLS сертификаты (read-only)
|
||||
- ./static:/app/static # Frontend files
|
||||
- ./certs:/app/certs:ro
|
||||
- ./static:/app/static
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- brief-bench-network
|
||||
|
|
|
|||
|
|
@ -1,9 +1,5 @@
|
|||
# Development and Testing Dependencies
|
||||
|
||||
# Include production requirements
|
||||
-r requirements.txt
|
||||
|
||||
# Testing framework
|
||||
pytest
|
||||
pytest-asyncio
|
||||
pytest-cov
|
||||
|
|
@ -15,5 +11,4 @@ flake8
|
|||
mypy
|
||||
isort
|
||||
|
||||
# Coverage reporting
|
||||
coverage[toml]
|
||||
|
|
|
|||
|
|
@ -36,11 +36,11 @@ class TestAuthenticationErrors:
|
|||
|
||||
def test_session_access_without_auth(self, e2e_client, setup_test_settings):
|
||||
"""Test that session endpoints require authentication."""
|
||||
# Try to list sessions without auth
|
||||
|
||||
response = e2e_client.get("/api/v1/analysis/sessions")
|
||||
assert response.status_code == 401
|
||||
|
||||
# Try to create session without auth
|
||||
|
||||
session_data = {
|
||||
"environment": "ift",
|
||||
"api_mode": "bench",
|
||||
|
|
@ -73,7 +73,7 @@ class TestValidationErrors:
|
|||
json=query_data,
|
||||
headers=e2e_auth_headers
|
||||
)
|
||||
assert response.status_code == 422 # Validation error
|
||||
assert response.status_code == 422
|
||||
|
||||
@pytest.mark.usefixtures("check_prerequisites")
|
||||
def test_empty_questions_list(
|
||||
|
|
@ -103,7 +103,7 @@ class TestValidationErrors:
|
|||
setup_test_settings
|
||||
):
|
||||
"""Test query with missing required fields."""
|
||||
# Missing 'questions' field
|
||||
|
||||
query_data = {"environment": "ift"}
|
||||
|
||||
response = e2e_client.post(
|
||||
|
|
@ -125,7 +125,7 @@ class TestValidationErrors:
|
|||
"environment": "ift",
|
||||
"questions": [
|
||||
{"body": "Valid question", "with_docs": True},
|
||||
{"with_docs": True} # Missing 'body'
|
||||
{"with_docs": True}
|
||||
]
|
||||
}
|
||||
|
||||
|
|
@ -144,10 +144,10 @@ class TestValidationErrors:
|
|||
setup_test_settings
|
||||
):
|
||||
"""Test creating session with invalid data."""
|
||||
# Missing required fields
|
||||
|
||||
invalid_session = {
|
||||
"environment": "ift"
|
||||
# Missing api_mode, request, response
|
||||
|
||||
}
|
||||
|
||||
response = e2e_client.post(
|
||||
|
|
@ -170,7 +170,7 @@ class TestModeCompatibilityErrors:
|
|||
setup_test_settings
|
||||
):
|
||||
"""Test that bench query fails when environment is in backend mode."""
|
||||
# Change IFT to backend mode
|
||||
|
||||
settings_update = {
|
||||
"settings": {
|
||||
"ift": {
|
||||
|
|
@ -193,7 +193,7 @@ class TestModeCompatibilityErrors:
|
|||
)
|
||||
assert update_response.status_code == 200
|
||||
|
||||
# Try bench query (should fail)
|
||||
|
||||
query_data = {
|
||||
"environment": "ift",
|
||||
"questions": [{"body": "Test", "with_docs": True}]
|
||||
|
|
@ -204,7 +204,7 @@ class TestModeCompatibilityErrors:
|
|||
json=query_data,
|
||||
headers=e2e_auth_headers
|
||||
)
|
||||
# Should fail because IFT is in backend mode
|
||||
|
||||
assert response.status_code in [400, 500, 502]
|
||||
|
||||
@pytest.mark.usefixtures("check_prerequisites")
|
||||
|
|
@ -215,7 +215,7 @@ class TestModeCompatibilityErrors:
|
|||
setup_test_settings
|
||||
):
|
||||
"""Test that backend query fails when environment is in bench mode."""
|
||||
# IFT is in bench mode by default (from setup_test_settings)
|
||||
|
||||
query_data = {
|
||||
"environment": "ift",
|
||||
"questions": [{"body": "Test", "with_docs": True}],
|
||||
|
|
@ -227,7 +227,7 @@ class TestModeCompatibilityErrors:
|
|||
json=query_data,
|
||||
headers=e2e_auth_headers
|
||||
)
|
||||
# Should fail because IFT is in bench mode
|
||||
|
||||
assert response.status_code in [400, 500, 502]
|
||||
|
||||
|
||||
|
|
@ -281,7 +281,7 @@ class TestResourceNotFoundErrors:
|
|||
f"/api/v1/analysis/sessions/{invalid_session_id}",
|
||||
headers=e2e_auth_headers
|
||||
)
|
||||
# Could be 404 or 422 depending on validation
|
||||
|
||||
assert response.status_code in [404, 422]
|
||||
|
||||
|
||||
|
|
@ -335,7 +335,7 @@ class TestSettingsErrors:
|
|||
json=invalid_settings,
|
||||
headers=e2e_auth_headers
|
||||
)
|
||||
# Could be 422 (validation) or 400 (bad request)
|
||||
|
||||
assert response.status_code in [400, 422]
|
||||
|
||||
|
||||
|
|
@ -351,7 +351,7 @@ class TestEdgeCases:
|
|||
setup_test_settings
|
||||
):
|
||||
"""Test query with very long question text."""
|
||||
long_question = "Тест " * 1000 # Very long question
|
||||
long_question = "Тест " * 1000
|
||||
|
||||
query_data = {
|
||||
"environment": "ift",
|
||||
|
|
@ -364,7 +364,7 @@ class TestEdgeCases:
|
|||
headers=e2e_auth_headers,
|
||||
timeout=120.0
|
||||
)
|
||||
# Should either succeed or fail gracefully
|
||||
|
||||
assert response.status_code in [200, 400, 413, 422, 502]
|
||||
|
||||
@pytest.mark.usefixtures("check_prerequisites")
|
||||
|
|
@ -388,7 +388,7 @@ class TestEdgeCases:
|
|||
headers=e2e_auth_headers,
|
||||
timeout=120.0
|
||||
)
|
||||
# Should handle special characters properly
|
||||
|
||||
assert response.status_code in [200, 400, 422, 502]
|
||||
|
||||
@pytest.mark.usefixtures("check_prerequisites")
|
||||
|
|
@ -402,7 +402,7 @@ class TestEdgeCases:
|
|||
"""Test query with many questions."""
|
||||
questions = [
|
||||
{"body": f"Вопрос номер {i}", "with_docs": i % 2 == 0}
|
||||
for i in range(50) # 50 questions
|
||||
for i in range(50)
|
||||
]
|
||||
|
||||
query_data = {
|
||||
|
|
@ -414,9 +414,9 @@ class TestEdgeCases:
|
|||
"/api/v1/query/bench",
|
||||
json=query_data,
|
||||
headers=e2e_auth_headers,
|
||||
timeout=180.0 # Longer timeout for many questions
|
||||
timeout=180.0
|
||||
)
|
||||
# Should either succeed or fail gracefully
|
||||
|
||||
assert response.status_code in [200, 400, 413, 422, 502, 504]
|
||||
|
||||
@pytest.mark.usefixtures("check_prerequisites")
|
||||
|
|
@ -428,21 +428,21 @@ class TestEdgeCases:
|
|||
cleanup_test_sessions
|
||||
):
|
||||
"""Test session list pagination with edge case limits."""
|
||||
# Test with limit=0
|
||||
|
||||
response = e2e_client.get(
|
||||
"/api/v1/analysis/sessions?limit=0",
|
||||
headers=e2e_auth_headers
|
||||
)
|
||||
assert response.status_code in [200, 400, 422]
|
||||
|
||||
# Test with very large limit
|
||||
|
||||
response = e2e_client.get(
|
||||
"/api/v1/analysis/sessions?limit=10000",
|
||||
headers=e2e_auth_headers
|
||||
)
|
||||
assert response.status_code in [200, 400, 422]
|
||||
|
||||
# Test with negative offset
|
||||
|
||||
response = e2e_client.get(
|
||||
"/api/v1/analysis/sessions?offset=-1",
|
||||
headers=e2e_auth_headers
|
||||
|
|
|
|||
|
|
@ -29,9 +29,9 @@ class TestCompleteUserFlow:
|
|||
5. Retrieve session
|
||||
6. Delete session
|
||||
"""
|
||||
# 1. Authentication already done via fixture
|
||||
|
||||
|
||||
# 2. Verify settings
|
||||
|
||||
settings_response = e2e_client.get(
|
||||
"/api/v1/settings",
|
||||
headers=e2e_auth_headers
|
||||
|
|
@ -41,7 +41,7 @@ class TestCompleteUserFlow:
|
|||
assert "ift" in settings["settings"]
|
||||
assert settings["settings"]["ift"]["apiMode"] == "bench"
|
||||
|
||||
# 3. Send bench query to IFT RAG
|
||||
|
||||
query_data = {
|
||||
"environment": "ift",
|
||||
"questions": [
|
||||
|
|
@ -54,7 +54,7 @@ class TestCompleteUserFlow:
|
|||
"/api/v1/query/bench",
|
||||
json=query_data,
|
||||
headers=e2e_auth_headers,
|
||||
timeout=120.0 # RAG can be slow
|
||||
timeout=120.0
|
||||
)
|
||||
|
||||
assert query_response.status_code == 200
|
||||
|
|
@ -64,7 +64,7 @@ class TestCompleteUserFlow:
|
|||
assert "timestamp" in query_result
|
||||
assert query_result["environment"] == "ift"
|
||||
|
||||
# 4. Save analysis session
|
||||
|
||||
session_data = {
|
||||
"environment": "ift",
|
||||
"api_mode": "bench",
|
||||
|
|
@ -88,7 +88,7 @@ class TestCompleteUserFlow:
|
|||
assert "session_id" in session
|
||||
session_id = session["session_id"]
|
||||
|
||||
# 5. Retrieve session
|
||||
|
||||
get_session_response = e2e_client.get(
|
||||
f"/api/v1/analysis/sessions/{session_id}",
|
||||
headers=e2e_auth_headers
|
||||
|
|
@ -100,7 +100,7 @@ class TestCompleteUserFlow:
|
|||
assert retrieved_session["environment"] == "ift"
|
||||
assert retrieved_session["api_mode"] == "bench"
|
||||
|
||||
# 6. Delete session
|
||||
|
||||
delete_response = e2e_client.delete(
|
||||
f"/api/v1/analysis/sessions/{session_id}",
|
||||
headers=e2e_auth_headers
|
||||
|
|
@ -108,7 +108,7 @@ class TestCompleteUserFlow:
|
|||
|
||||
assert delete_response.status_code == 204
|
||||
|
||||
# Verify deletion
|
||||
|
||||
verify_response = e2e_client.get(
|
||||
f"/api/v1/analysis/sessions/{session_id}",
|
||||
headers=e2e_auth_headers
|
||||
|
|
@ -131,7 +131,7 @@ class TestCompleteUserFlow:
|
|||
3. Send backend query to PSI RAG
|
||||
4. Save and verify session
|
||||
"""
|
||||
# 1. Verify PSI settings
|
||||
|
||||
settings_response = e2e_client.get(
|
||||
"/api/v1/settings",
|
||||
headers=e2e_auth_headers
|
||||
|
|
@ -140,7 +140,7 @@ class TestCompleteUserFlow:
|
|||
settings = settings_response.json()
|
||||
assert settings["settings"]["psi"]["apiMode"] == "backend"
|
||||
|
||||
# 2. Send backend query to PSI RAG
|
||||
|
||||
query_data = {
|
||||
"environment": "psi",
|
||||
"questions": [
|
||||
|
|
@ -161,7 +161,7 @@ class TestCompleteUserFlow:
|
|||
assert query_result["environment"] == "psi"
|
||||
assert "response" in query_result
|
||||
|
||||
# 3. Save session
|
||||
|
||||
session_data = {
|
||||
"environment": "psi",
|
||||
"api_mode": "backend",
|
||||
|
|
@ -190,7 +190,7 @@ class TestCompleteUserFlow:
|
|||
cleanup_test_sessions
|
||||
):
|
||||
"""Test that changing settings affects subsequent queries."""
|
||||
# 1. Get current settings
|
||||
|
||||
settings_response = e2e_client.get(
|
||||
"/api/v1/settings",
|
||||
headers=e2e_auth_headers
|
||||
|
|
@ -198,7 +198,7 @@ class TestCompleteUserFlow:
|
|||
assert settings_response.status_code == 200
|
||||
original_settings = settings_response.json()
|
||||
|
||||
# 2. Change IFT to backend mode
|
||||
|
||||
updated_settings = {
|
||||
"settings": {
|
||||
"ift": {
|
||||
|
|
@ -215,7 +215,7 @@ class TestCompleteUserFlow:
|
|||
)
|
||||
assert update_response.status_code == 200
|
||||
|
||||
# 3. Try bench query (should fail - wrong mode)
|
||||
|
||||
bench_query = {
|
||||
"environment": "ift",
|
||||
"questions": [{"body": "Test", "with_docs": True}]
|
||||
|
|
@ -226,10 +226,10 @@ class TestCompleteUserFlow:
|
|||
json=bench_query,
|
||||
headers=e2e_auth_headers
|
||||
)
|
||||
# Should fail because IFT is now in backend mode
|
||||
|
||||
assert bench_response.status_code in [400, 500]
|
||||
|
||||
# 4. Backend query should work
|
||||
|
||||
backend_query = {
|
||||
"environment": "ift",
|
||||
"questions": [{"body": "Test", "with_docs": True}],
|
||||
|
|
@ -244,7 +244,7 @@ class TestCompleteUserFlow:
|
|||
)
|
||||
assert backend_response.status_code == 200
|
||||
|
||||
# 5. Restore original settings
|
||||
|
||||
restore_response = e2e_client.put(
|
||||
"/api/v1/settings",
|
||||
json={"settings": {"ift": original_settings["settings"]["ift"]}},
|
||||
|
|
@ -263,7 +263,7 @@ class TestCompleteUserFlow:
|
|||
"""Test creating and managing multiple analysis sessions."""
|
||||
session_ids = []
|
||||
|
||||
# Create multiple sessions
|
||||
|
||||
for i, env in enumerate(["ift", "psi", "prod"]):
|
||||
session_data = {
|
||||
"environment": env,
|
||||
|
|
@ -285,7 +285,7 @@ class TestCompleteUserFlow:
|
|||
assert response.status_code == 201
|
||||
session_ids.append(response.json()["session_id"])
|
||||
|
||||
# List all sessions
|
||||
|
||||
list_response = e2e_client.get(
|
||||
"/api/v1/analysis/sessions?limit=50",
|
||||
headers=e2e_auth_headers
|
||||
|
|
@ -295,14 +295,14 @@ class TestCompleteUserFlow:
|
|||
sessions_list = list_response.json()
|
||||
assert sessions_list["total"] >= 3
|
||||
|
||||
# Filter by environment
|
||||
|
||||
ift_sessions = e2e_client.get(
|
||||
"/api/v1/analysis/sessions?environment=ift&limit=50",
|
||||
headers=e2e_auth_headers
|
||||
)
|
||||
assert ift_sessions.status_code == 200
|
||||
|
||||
# Delete all created sessions
|
||||
|
||||
for session_id in session_ids:
|
||||
delete_response = e2e_client.delete(
|
||||
f"/api/v1/analysis/sessions/{session_id}",
|
||||
|
|
@ -319,7 +319,7 @@ class TestCompleteUserFlow:
|
|||
cleanup_test_sessions
|
||||
):
|
||||
"""Test that user data is properly isolated (sessions, settings)."""
|
||||
# Create a session
|
||||
|
||||
session_data = {
|
||||
"environment": "ift",
|
||||
"api_mode": "bench",
|
||||
|
|
@ -337,20 +337,20 @@ class TestCompleteUserFlow:
|
|||
assert create_response.status_code == 201
|
||||
session_id = create_response.json()["session_id"]
|
||||
|
||||
# Verify we can access our session
|
||||
|
||||
get_response = e2e_client.get(
|
||||
f"/api/v1/analysis/sessions/{session_id}",
|
||||
headers=e2e_auth_headers
|
||||
)
|
||||
assert get_response.status_code == 200
|
||||
|
||||
# Try to access without auth (should fail)
|
||||
|
||||
unauth_response = e2e_client.get(
|
||||
f"/api/v1/analysis/sessions/{session_id}"
|
||||
)
|
||||
assert unauth_response.status_code == 401
|
||||
|
||||
# Cleanup
|
||||
|
||||
e2e_client.delete(
|
||||
f"/api/v1/analysis/sessions/{session_id}",
|
||||
headers=e2e_auth_headers
|
||||
|
|
|
|||
|
|
@ -40,13 +40,13 @@ class TestRagBackendsE2E:
|
|||
assert response.status_code == 200
|
||||
result = response.json()
|
||||
|
||||
# Verify response structure
|
||||
|
||||
assert result["environment"] == "ift"
|
||||
assert "request_id" in result
|
||||
assert "response" in result
|
||||
assert "timestamp" in result
|
||||
|
||||
# Response should contain answers for all questions
|
||||
|
||||
assert isinstance(result["response"], (dict, list))
|
||||
|
||||
@pytest.mark.e2e_psi
|
||||
|
|
@ -90,7 +90,7 @@ class TestRagBackendsE2E:
|
|||
cleanup_test_sessions
|
||||
):
|
||||
"""Test PSI backend mode with session reset."""
|
||||
# First query
|
||||
|
||||
query_data_1 = {
|
||||
"environment": "psi",
|
||||
"questions": [{"body": "Первый вопрос с контекстом", "with_docs": True}],
|
||||
|
|
@ -105,7 +105,7 @@ class TestRagBackendsE2E:
|
|||
)
|
||||
assert response_1.status_code == 200
|
||||
|
||||
# Second query with reset
|
||||
|
||||
query_data_2 = {
|
||||
"environment": "psi",
|
||||
"questions": [{"body": "Второй вопрос после сброса", "with_docs": True}],
|
||||
|
|
@ -214,7 +214,7 @@ class TestRagBackendsE2E:
|
|||
cleanup_test_sessions
|
||||
):
|
||||
"""Test queries to different environments in sequence."""
|
||||
environments = ["ift", "prod"] # PSI uses backend mode, skip for this test
|
||||
environments = ["ift", "prod"]
|
||||
|
||||
for env in environments:
|
||||
query_data = {
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
# Integration tests environment variables
|
||||
# Copy this file to .env.integration and update with your values
|
||||
|
||||
# DB API URL for integration tests
|
||||
|
||||
|
||||
|
||||
TEST_DB_API_URL=http://localhost:8081/api/v1
|
||||
|
||||
# Test user login (8-digit)
|
||||
|
||||
TEST_LOGIN=99999999
|
||||
|
|
|
|||
|
|
@ -9,9 +9,9 @@ from app.dependencies import get_db_client
|
|||
from app.interfaces.db_api_client import DBApiClient
|
||||
|
||||
|
||||
# Test configuration
|
||||
|
||||
TEST_DB_API_URL = os.getenv("TEST_DB_API_URL", "http://localhost:8081/api/v1")
|
||||
TEST_LOGIN = os.getenv("TEST_LOGIN", "99999999") # Test user login
|
||||
TEST_LOGIN = os.getenv("TEST_LOGIN", "99999999")
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
|
|
@ -31,13 +31,13 @@ def db_client(db_api_url):
|
|||
"""Real DB API client for integration tests."""
|
||||
client = DBApiClient(api_prefix=db_api_url, timeout=30.0)
|
||||
yield client
|
||||
# Cleanup is handled by async context manager or manual close
|
||||
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def client(db_api_url):
|
||||
"""FastAPI test client with real DB API integration."""
|
||||
# Override get_db_client to use real DB API
|
||||
|
||||
def get_real_db_client():
|
||||
return DBApiClient(api_prefix=db_api_url, timeout=30.0)
|
||||
|
||||
|
|
@ -46,7 +46,7 @@ def client(db_api_url):
|
|||
with TestClient(app) as test_client:
|
||||
yield test_client
|
||||
|
||||
# Cleanup
|
||||
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
|
||||
|
|
@ -89,13 +89,13 @@ def clean_test_sessions(db_client, test_user_id):
|
|||
"""Clean up test sessions after test."""
|
||||
yield
|
||||
|
||||
# Cleanup: delete all test sessions
|
||||
|
||||
try:
|
||||
sessions = db_client.get_sessions(test_user_id, limit=200)
|
||||
for session in sessions.sessions:
|
||||
db_client.delete_session(test_user_id, session.session_id)
|
||||
except Exception:
|
||||
pass # Ignore cleanup errors
|
||||
pass
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ class TestAnalysisIntegration:
|
|||
|
||||
def test_get_sessions_list(self, client, auth_headers, clean_test_sessions):
|
||||
"""Test getting list of sessions."""
|
||||
# Create test sessions
|
||||
|
||||
for env in ["ift", "psi", "prod"]:
|
||||
session_data = {
|
||||
"environment": env,
|
||||
|
|
@ -63,7 +63,7 @@ class TestAnalysisIntegration:
|
|||
headers=auth_headers
|
||||
)
|
||||
|
||||
# Get all sessions
|
||||
|
||||
response = client.get("/api/v1/analysis/sessions", headers=auth_headers)
|
||||
|
||||
assert response.status_code == 200
|
||||
|
|
@ -74,7 +74,7 @@ class TestAnalysisIntegration:
|
|||
assert data["total"] >= 3
|
||||
assert len(data["sessions"]) >= 3
|
||||
|
||||
# Verify session structure
|
||||
|
||||
for session in data["sessions"]:
|
||||
assert "session_id" in session
|
||||
assert "environment" in session
|
||||
|
|
@ -83,7 +83,7 @@ class TestAnalysisIntegration:
|
|||
|
||||
def test_get_sessions_with_filter(self, client, auth_headers, clean_test_sessions):
|
||||
"""Test filtering sessions by environment."""
|
||||
# Create sessions for different environments
|
||||
|
||||
for env in ["ift", "psi"]:
|
||||
for i in range(2):
|
||||
session_data = {
|
||||
|
|
@ -99,7 +99,7 @@ class TestAnalysisIntegration:
|
|||
headers=auth_headers
|
||||
)
|
||||
|
||||
# Filter by IFT
|
||||
|
||||
response = client.get(
|
||||
"/api/v1/analysis/sessions?environment=ift",
|
||||
headers=auth_headers
|
||||
|
|
@ -110,13 +110,13 @@ class TestAnalysisIntegration:
|
|||
data = response.json()
|
||||
assert data["total"] >= 2
|
||||
|
||||
# All returned sessions should be IFT
|
||||
|
||||
for session in data["sessions"]:
|
||||
assert session["environment"] == "ift"
|
||||
|
||||
def test_get_sessions_pagination(self, client, auth_headers, clean_test_sessions):
|
||||
"""Test session pagination."""
|
||||
# Create 5 test sessions
|
||||
|
||||
for i in range(5):
|
||||
session_data = {
|
||||
"environment": "ift",
|
||||
|
|
@ -131,7 +131,7 @@ class TestAnalysisIntegration:
|
|||
headers=auth_headers
|
||||
)
|
||||
|
||||
# Get first 3
|
||||
|
||||
response = client.get(
|
||||
"/api/v1/analysis/sessions?limit=3&offset=0",
|
||||
headers=auth_headers
|
||||
|
|
@ -141,7 +141,7 @@ class TestAnalysisIntegration:
|
|||
data = response.json()
|
||||
assert len(data["sessions"]) <= 3
|
||||
|
||||
# Get next 3
|
||||
|
||||
response = client.get(
|
||||
"/api/v1/analysis/sessions?limit=3&offset=3",
|
||||
headers=auth_headers
|
||||
|
|
@ -151,7 +151,7 @@ class TestAnalysisIntegration:
|
|||
|
||||
def test_get_session_by_id(self, client, auth_headers, clean_test_sessions):
|
||||
"""Test getting specific session by ID."""
|
||||
# Create session
|
||||
|
||||
session_data = {
|
||||
"environment": "psi",
|
||||
"api_mode": "backend",
|
||||
|
|
@ -177,7 +177,7 @@ class TestAnalysisIntegration:
|
|||
assert create_response.status_code == 201
|
||||
session_id = create_response.json()["session_id"]
|
||||
|
||||
# Get session by ID
|
||||
|
||||
get_response = client.get(
|
||||
f"/api/v1/analysis/sessions/{session_id}",
|
||||
headers=auth_headers
|
||||
|
|
@ -195,7 +195,7 @@ class TestAnalysisIntegration:
|
|||
|
||||
def test_delete_session(self, client, auth_headers, clean_test_sessions):
|
||||
"""Test deleting a session."""
|
||||
# Create session
|
||||
|
||||
session_data = {
|
||||
"environment": "prod",
|
||||
"api_mode": "bench",
|
||||
|
|
@ -213,7 +213,7 @@ class TestAnalysisIntegration:
|
|||
assert create_response.status_code == 201
|
||||
session_id = create_response.json()["session_id"]
|
||||
|
||||
# Delete session
|
||||
|
||||
delete_response = client.delete(
|
||||
f"/api/v1/analysis/sessions/{session_id}",
|
||||
headers=auth_headers
|
||||
|
|
@ -221,7 +221,7 @@ class TestAnalysisIntegration:
|
|||
|
||||
assert delete_response.status_code == 204
|
||||
|
||||
# Verify deletion
|
||||
|
||||
get_response = client.get(
|
||||
f"/api/v1/analysis/sessions/{session_id}",
|
||||
headers=auth_headers
|
||||
|
|
@ -243,7 +243,7 @@ class TestAnalysisIntegration:
|
|||
def test_create_session_invalid_environment(self, client, auth_headers):
|
||||
"""Test creating session with invalid environment."""
|
||||
session_data = {
|
||||
"environment": "invalid", # Invalid
|
||||
"environment": "invalid",
|
||||
"api_mode": "bench",
|
||||
"request": [],
|
||||
"response": {},
|
||||
|
|
@ -256,27 +256,27 @@ class TestAnalysisIntegration:
|
|||
headers=auth_headers
|
||||
)
|
||||
|
||||
# Should fail validation (either FastAPI or DB API)
|
||||
|
||||
assert response.status_code in [400, 422]
|
||||
|
||||
def test_sessions_require_authentication(self, client):
|
||||
"""Test that session endpoints require authentication."""
|
||||
# Create without auth
|
||||
|
||||
response = client.post(
|
||||
"/api/v1/analysis/sessions",
|
||||
json={"environment": "ift", "api_mode": "bench", "request": [], "response": {}, "annotations": {}}
|
||||
)
|
||||
assert response.status_code == 401
|
||||
|
||||
# List without auth
|
||||
|
||||
response = client.get("/api/v1/analysis/sessions")
|
||||
assert response.status_code == 401
|
||||
|
||||
# Get by ID without auth
|
||||
|
||||
response = client.get("/api/v1/analysis/sessions/some-id")
|
||||
assert response.status_code == 401
|
||||
|
||||
# Delete without auth
|
||||
|
||||
response = client.delete("/api/v1/analysis/sessions/some-id")
|
||||
assert response.status_code == 401
|
||||
|
||||
|
|
@ -302,12 +302,12 @@ class TestAnalysisIntegration:
|
|||
assert response.status_code == 201
|
||||
session_ids.append(response.json()["session_id"])
|
||||
|
||||
# Verify all sessions exist
|
||||
|
||||
list_response = client.get("/api/v1/analysis/sessions", headers=auth_headers)
|
||||
assert list_response.status_code == 200
|
||||
assert list_response.json()["total"] >= 3
|
||||
|
||||
# Verify each session is unique
|
||||
|
||||
assert len(set(session_ids)) == 3
|
||||
|
||||
def test_session_data_integrity(self, client, auth_headers, clean_test_sessions):
|
||||
|
|
@ -353,7 +353,7 @@ class TestAnalysisIntegration:
|
|||
}
|
||||
}
|
||||
|
||||
# Create session
|
||||
|
||||
create_response = client.post(
|
||||
"/api/v1/analysis/sessions",
|
||||
json=complex_data,
|
||||
|
|
@ -363,7 +363,7 @@ class TestAnalysisIntegration:
|
|||
assert create_response.status_code == 201
|
||||
session_id = create_response.json()["session_id"]
|
||||
|
||||
# Retrieve and verify
|
||||
|
||||
get_response = client.get(
|
||||
f"/api/v1/analysis/sessions/{session_id}",
|
||||
headers=auth_headers
|
||||
|
|
@ -372,7 +372,7 @@ class TestAnalysisIntegration:
|
|||
assert get_response.status_code == 200
|
||||
retrieved_data = get_response.json()
|
||||
|
||||
# Verify complex data integrity
|
||||
|
||||
assert "Привет" in retrieved_data["request"][0]["body"]
|
||||
assert retrieved_data["response"]["sources"][0]["doc_id"] == "doc1"
|
||||
assert retrieved_data["annotations"]["nested"]["level1"]["level2"]["value"] == "deep nested value"
|
||||
|
|
|
|||
|
|
@ -31,20 +31,20 @@ class TestAuthIntegration:
|
|||
"""Test login with invalid format."""
|
||||
response = client.post(
|
||||
"/api/v1/auth/login",
|
||||
params={"login": "123"} # Too short
|
||||
params={"login": "123"}
|
||||
)
|
||||
|
||||
assert response.status_code == 422 # Validation error
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_login_nonexistent_user(self, client):
|
||||
"""Test login with non-existent user."""
|
||||
response = client.post(
|
||||
"/api/v1/auth/login",
|
||||
params={"login": "00000000"} # Likely doesn't exist
|
||||
params={"login": "00000000"}
|
||||
)
|
||||
|
||||
# Should return 404 if user doesn't exist in DB API
|
||||
# Or create user if DB API auto-creates
|
||||
|
||||
|
||||
assert response.status_code in [200, 404]
|
||||
|
||||
def test_token_contains_user_info(self, client, test_login):
|
||||
|
|
@ -59,7 +59,7 @@ class TestAuthIntegration:
|
|||
assert response.status_code == 200
|
||||
token = response.json()["access_token"]
|
||||
|
||||
# Decode token
|
||||
|
||||
payload = decode_access_token(token)
|
||||
assert payload["login"] == test_login
|
||||
assert "user_id" in payload
|
||||
|
|
@ -75,7 +75,7 @@ class TestAuthIntegration:
|
|||
"""Test accessing protected endpoint with valid token."""
|
||||
response = client.get("/api/v1/settings", headers=auth_headers)
|
||||
|
||||
# Should return 200 (or 404 if no settings yet)
|
||||
|
||||
assert response.status_code in [200, 404]
|
||||
|
||||
def test_protected_endpoint_with_invalid_token(self, client):
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ class TestQueryDBApiIntegration:
|
|||
|
||||
def test_bench_query_retrieves_user_settings(self, client, auth_headers):
|
||||
"""Test that bench query retrieves user settings from DB API."""
|
||||
# First, set up user settings for bench mode
|
||||
|
||||
settings_data = {
|
||||
"settings": {
|
||||
"ift": {
|
||||
|
|
@ -32,7 +32,7 @@ class TestQueryDBApiIntegration:
|
|||
|
||||
client.put("/api/v1/settings", json=settings_data, headers=auth_headers)
|
||||
|
||||
# Mock RAG service to avoid actual RAG calls
|
||||
|
||||
with patch('app.api.v1.query.RagService') as MockRagService:
|
||||
mock_rag = AsyncMock()
|
||||
mock_rag.send_bench_query = AsyncMock(return_value={
|
||||
|
|
@ -41,7 +41,7 @@ class TestQueryDBApiIntegration:
|
|||
mock_rag.close = AsyncMock()
|
||||
MockRagService.return_value = mock_rag
|
||||
|
||||
# Send bench query
|
||||
|
||||
query_data = {
|
||||
"environment": "ift",
|
||||
"questions": [{"body": "Test question", "with_docs": True}]
|
||||
|
|
@ -53,10 +53,10 @@ class TestQueryDBApiIntegration:
|
|||
headers=auth_headers
|
||||
)
|
||||
|
||||
# Should succeed (settings retrieved from DB API)
|
||||
|
||||
assert response.status_code == 200
|
||||
|
||||
# Verify RAG service was called with correct settings
|
||||
|
||||
mock_rag.send_bench_query.assert_called_once()
|
||||
call_kwargs = mock_rag.send_bench_query.call_args[1]
|
||||
assert call_kwargs["environment"] == "ift"
|
||||
|
|
@ -66,7 +66,7 @@ class TestQueryDBApiIntegration:
|
|||
|
||||
def test_backend_query_retrieves_user_settings(self, client, auth_headers):
|
||||
"""Test that backend query retrieves user settings from DB API."""
|
||||
# Set up user settings for backend mode
|
||||
|
||||
settings_data = {
|
||||
"settings": {
|
||||
"psi": {
|
||||
|
|
@ -84,7 +84,7 @@ class TestQueryDBApiIntegration:
|
|||
|
||||
client.put("/api/v1/settings", json=settings_data, headers=auth_headers)
|
||||
|
||||
# Mock RAG service
|
||||
|
||||
with patch('app.api.v1.query.RagService') as MockRagService:
|
||||
mock_rag = AsyncMock()
|
||||
mock_rag.send_backend_query = AsyncMock(return_value=[
|
||||
|
|
@ -93,7 +93,7 @@ class TestQueryDBApiIntegration:
|
|||
mock_rag.close = AsyncMock()
|
||||
MockRagService.return_value = mock_rag
|
||||
|
||||
# Send backend query
|
||||
|
||||
query_data = {
|
||||
"environment": "psi",
|
||||
"questions": [{"body": "Test question", "with_docs": True}],
|
||||
|
|
@ -108,7 +108,7 @@ class TestQueryDBApiIntegration:
|
|||
|
||||
assert response.status_code == 200
|
||||
|
||||
# Verify RAG service was called with correct settings
|
||||
|
||||
mock_rag.send_backend_query.assert_called_once()
|
||||
call_kwargs = mock_rag.send_backend_query.call_args[1]
|
||||
assert call_kwargs["environment"] == "psi"
|
||||
|
|
@ -119,11 +119,11 @@ class TestQueryDBApiIntegration:
|
|||
|
||||
def test_bench_query_wrong_api_mode(self, client, auth_headers):
|
||||
"""Test bench query fails when settings configured for backend mode."""
|
||||
# Set up settings for backend mode
|
||||
|
||||
settings_data = {
|
||||
"settings": {
|
||||
"ift": {
|
||||
"apiMode": "backend", # Wrong mode for bench query
|
||||
"apiMode": "backend",
|
||||
"bearerToken": "token",
|
||||
"systemPlatform": "platform",
|
||||
"systemPlatformUser": "user",
|
||||
|
|
@ -137,7 +137,7 @@ class TestQueryDBApiIntegration:
|
|||
|
||||
client.put("/api/v1/settings", json=settings_data, headers=auth_headers)
|
||||
|
||||
# Try bench query
|
||||
|
||||
query_data = {
|
||||
"environment": "ift",
|
||||
"questions": [{"body": "Test", "with_docs": True}]
|
||||
|
|
@ -149,16 +149,16 @@ class TestQueryDBApiIntegration:
|
|||
headers=auth_headers
|
||||
)
|
||||
|
||||
# Should fail due to wrong API mode
|
||||
|
||||
assert response.status_code in [400, 500]
|
||||
|
||||
def test_backend_query_wrong_api_mode(self, client, auth_headers):
|
||||
"""Test backend query fails when settings configured for bench mode."""
|
||||
# Set up settings for bench mode
|
||||
|
||||
settings_data = {
|
||||
"settings": {
|
||||
"prod": {
|
||||
"apiMode": "bench", # Wrong mode for backend query
|
||||
"apiMode": "bench",
|
||||
"bearerToken": "token",
|
||||
"systemPlatform": "platform",
|
||||
"systemPlatformUser": "user",
|
||||
|
|
@ -172,7 +172,7 @@ class TestQueryDBApiIntegration:
|
|||
|
||||
client.put("/api/v1/settings", json=settings_data, headers=auth_headers)
|
||||
|
||||
# Try backend query
|
||||
|
||||
query_data = {
|
||||
"environment": "prod",
|
||||
"questions": [{"body": "Test", "with_docs": True}],
|
||||
|
|
@ -185,7 +185,7 @@ class TestQueryDBApiIntegration:
|
|||
headers=auth_headers
|
||||
)
|
||||
|
||||
# Should fail due to wrong API mode
|
||||
|
||||
assert response.status_code in [400, 500]
|
||||
|
||||
def test_query_invalid_environment(self, client, auth_headers):
|
||||
|
|
@ -195,7 +195,7 @@ class TestQueryDBApiIntegration:
|
|||
"questions": [{"body": "Test", "with_docs": True}]
|
||||
}
|
||||
|
||||
# Bench query
|
||||
|
||||
response = client.post(
|
||||
"/api/v1/query/bench",
|
||||
json=query_data,
|
||||
|
|
@ -203,7 +203,7 @@ class TestQueryDBApiIntegration:
|
|||
)
|
||||
assert response.status_code == 400
|
||||
|
||||
# Backend query
|
||||
|
||||
query_data["reset_session"] = True
|
||||
response = client.post(
|
||||
"/api/v1/query/backend",
|
||||
|
|
@ -219,18 +219,18 @@ class TestQueryDBApiIntegration:
|
|||
"questions": [{"body": "Test", "with_docs": True}]
|
||||
}
|
||||
|
||||
# Bench without auth
|
||||
|
||||
response = client.post("/api/v1/query/bench", json=query_data)
|
||||
assert response.status_code == 401
|
||||
|
||||
# Backend without auth
|
||||
|
||||
query_data["reset_session"] = True
|
||||
response = client.post("/api/v1/query/backend", json=query_data)
|
||||
assert response.status_code == 401
|
||||
|
||||
def test_settings_update_affects_query(self, client, auth_headers):
|
||||
"""Test that updating settings affects subsequent queries."""
|
||||
# Initial settings
|
||||
|
||||
initial_settings = {
|
||||
"settings": {
|
||||
"ift": {
|
||||
|
|
@ -248,14 +248,14 @@ class TestQueryDBApiIntegration:
|
|||
|
||||
client.put("/api/v1/settings", json=initial_settings, headers=auth_headers)
|
||||
|
||||
# Mock RAG service
|
||||
|
||||
with patch('app.api.v1.query.RagService') as MockRagService:
|
||||
mock_rag = AsyncMock()
|
||||
mock_rag.send_bench_query = AsyncMock(return_value={"answers": []})
|
||||
mock_rag.close = AsyncMock()
|
||||
MockRagService.return_value = mock_rag
|
||||
|
||||
# First query
|
||||
|
||||
query_data = {
|
||||
"environment": "ift",
|
||||
"questions": [{"body": "Test", "with_docs": True}]
|
||||
|
|
@ -263,11 +263,11 @@ class TestQueryDBApiIntegration:
|
|||
|
||||
client.post("/api/v1/query/bench", json=query_data, headers=auth_headers)
|
||||
|
||||
# Check first call
|
||||
|
||||
first_call = mock_rag.send_bench_query.call_args[1]
|
||||
assert first_call["user_settings"]["bearerToken"] == "initial-token"
|
||||
|
||||
# Update settings
|
||||
|
||||
updated_settings = {
|
||||
"settings": {
|
||||
"ift": {
|
||||
|
|
@ -285,11 +285,11 @@ class TestQueryDBApiIntegration:
|
|||
|
||||
client.put("/api/v1/settings", json=updated_settings, headers=auth_headers)
|
||||
|
||||
# Second query
|
||||
|
||||
mock_rag.send_bench_query.reset_mock()
|
||||
client.post("/api/v1/query/bench", json=query_data, headers=auth_headers)
|
||||
|
||||
# Check second call uses updated settings
|
||||
|
||||
second_call = mock_rag.send_bench_query.call_args[1]
|
||||
assert second_call["user_settings"]["bearerToken"] == "updated-token"
|
||||
assert second_call["user_settings"]["withClassify"] is True
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class TestSettingsIntegration:
|
|||
"""Test getting user settings from real DB API."""
|
||||
response = client.get("/api/v1/settings", headers=auth_headers)
|
||||
|
||||
# Should return 200 with settings or 404 if no settings yet
|
||||
|
||||
assert response.status_code in [200, 404]
|
||||
|
||||
if response.status_code == 200:
|
||||
|
|
@ -20,7 +20,7 @@ class TestSettingsIntegration:
|
|||
assert "settings" in data
|
||||
assert "updated_at" in data
|
||||
|
||||
# Check settings structure
|
||||
|
||||
settings = data["settings"]
|
||||
for env in ["ift", "psi", "prod"]:
|
||||
if env in settings:
|
||||
|
|
@ -78,7 +78,7 @@ class TestSettingsIntegration:
|
|||
assert "settings" in data
|
||||
assert "updated_at" in data
|
||||
|
||||
# Verify settings were updated
|
||||
|
||||
assert data["settings"]["ift"]["apiMode"] == "bench"
|
||||
assert data["settings"]["ift"]["bearerToken"] == "test-token-ift"
|
||||
assert data["settings"]["psi"]["apiMode"] == "backend"
|
||||
|
|
@ -115,7 +115,7 @@ class TestSettingsIntegration:
|
|||
|
||||
def test_update_then_get_settings(self, client, auth_headers):
|
||||
"""Test updating settings and then retrieving them."""
|
||||
# Update settings
|
||||
|
||||
update_data = {
|
||||
"settings": {
|
||||
"ift": {
|
||||
|
|
@ -139,7 +139,7 @@ class TestSettingsIntegration:
|
|||
|
||||
assert put_response.status_code == 200
|
||||
|
||||
# Get settings
|
||||
|
||||
get_response = client.get("/api/v1/settings", headers=auth_headers)
|
||||
|
||||
assert get_response.status_code == 200
|
||||
|
|
@ -153,7 +153,7 @@ class TestSettingsIntegration:
|
|||
update_data = {
|
||||
"settings": {
|
||||
"ift": {
|
||||
"apiMode": "invalid_mode", # Invalid
|
||||
"apiMode": "invalid_mode",
|
||||
"bearerToken": "",
|
||||
"systemPlatform": "",
|
||||
"systemPlatformUser": "",
|
||||
|
|
@ -171,13 +171,13 @@ class TestSettingsIntegration:
|
|||
headers=auth_headers
|
||||
)
|
||||
|
||||
# Should accept any string (no validation on FastAPI side)
|
||||
# DB API might validate
|
||||
|
||||
|
||||
assert response.status_code in [200, 400]
|
||||
|
||||
def test_settings_persistence(self, client, auth_headers):
|
||||
"""Test that settings persist across requests."""
|
||||
# Set unique value
|
||||
|
||||
unique_token = "persistence-test-token-12345"
|
||||
update_data = {
|
||||
"settings": {
|
||||
|
|
@ -194,10 +194,10 @@ class TestSettingsIntegration:
|
|||
}
|
||||
}
|
||||
|
||||
# Update
|
||||
|
||||
client.put("/api/v1/settings", json=update_data, headers=auth_headers)
|
||||
|
||||
# Get multiple times to verify persistence
|
||||
|
||||
for _ in range(3):
|
||||
response = client.get("/api/v1/settings", headers=auth_headers)
|
||||
assert response.status_code == 200
|
||||
|
|
@ -206,10 +206,10 @@ class TestSettingsIntegration:
|
|||
|
||||
def test_settings_require_authentication(self, client):
|
||||
"""Test that settings endpoints require authentication."""
|
||||
# GET without auth
|
||||
|
||||
response = client.get("/api/v1/settings")
|
||||
assert response.status_code == 401
|
||||
|
||||
# PUT without auth
|
||||
|
||||
response = client.put("/api/v1/settings", json={"settings": {}})
|
||||
assert response.status_code == 401
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ class TestAnalysisEndpoints:
|
|||
|
||||
response = client.post("/api/v1/analysis/sessions", json=session_data)
|
||||
|
||||
assert response.status_code in [400, 422] # 422 for validation error
|
||||
assert response.status_code in [400, 422]
|
||||
|
||||
def test_get_sessions_success(self, client, mock_db_client):
|
||||
"""Test getting list of sessions."""
|
||||
|
|
@ -114,16 +114,16 @@ class TestAnalysisEndpoints:
|
|||
mock_sessions = SessionList(sessions=[], total=0)
|
||||
mock_db_client.get_sessions = AsyncMock(return_value=mock_sessions)
|
||||
|
||||
# Test default values
|
||||
|
||||
response = client.get("/api/v1/analysis/sessions")
|
||||
assert response.status_code == 200
|
||||
mock_db_client.get_sessions.assert_called_with(
|
||||
"test-user-123", None, 50, 0
|
||||
)
|
||||
|
||||
# Test max limit (200)
|
||||
|
||||
response = client.get("/api/v1/analysis/sessions?limit=250")
|
||||
assert response.status_code == 422 # Validation error, exceeds max
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_get_session_by_id_success(self, client, mock_db_client):
|
||||
"""Test getting specific session by ID."""
|
||||
|
|
@ -185,19 +185,19 @@ class TestAnalysisEndpoints:
|
|||
|
||||
def test_analysis_endpoints_require_auth(self, unauthenticated_client):
|
||||
"""Test that all analysis endpoints require authentication."""
|
||||
# POST /sessions
|
||||
|
||||
response = unauthenticated_client.post("/api/v1/analysis/sessions", json={})
|
||||
assert response.status_code == 401 # HTTPBearer returns 401
|
||||
assert response.status_code == 401
|
||||
|
||||
# GET /sessions
|
||||
|
||||
response = unauthenticated_client.get("/api/v1/analysis/sessions")
|
||||
assert response.status_code == 401
|
||||
|
||||
# GET /sessions/{id}
|
||||
|
||||
response = unauthenticated_client.get("/api/v1/analysis/sessions/test")
|
||||
assert response.status_code == 401
|
||||
|
||||
# DELETE /sessions/{id}
|
||||
|
||||
response = unauthenticated_client.delete("/api/v1/analysis/sessions/test")
|
||||
assert response.status_code == 401
|
||||
|
||||
|
|
|
|||
|
|
@ -11,10 +11,10 @@ class TestAuthEndpoints:
|
|||
|
||||
def test_login_success(self, unauthenticated_client, mock_db_client, test_user_response):
|
||||
"""Test successful login with valid 8-digit login."""
|
||||
# Mock DB client response
|
||||
|
||||
mock_db_client.login_user = AsyncMock(return_value=test_user_response)
|
||||
|
||||
# Override dependency
|
||||
|
||||
from app.main import app
|
||||
from app.dependencies import get_db_client
|
||||
app.dependency_overrides[get_db_client] = lambda: mock_db_client
|
||||
|
|
@ -30,29 +30,29 @@ class TestAuthEndpoints:
|
|||
assert "user" in data
|
||||
assert data["user"]["login"] == "12345678"
|
||||
|
||||
# Verify DB client was called
|
||||
|
||||
mock_db_client.login_user.assert_called_once()
|
||||
finally:
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
def test_login_invalid_format(self, unauthenticated_client):
|
||||
"""Test login with invalid format (not 8 digits)."""
|
||||
# Test with 7 digits
|
||||
|
||||
response = unauthenticated_client.post("/api/v1/auth/login?login=1234567")
|
||||
assert response.status_code == 400
|
||||
assert "must be 8 digits" in response.json()["detail"].lower()
|
||||
|
||||
# Test with 9 digits
|
||||
|
||||
response = unauthenticated_client.post("/api/v1/auth/login?login=123456789")
|
||||
assert response.status_code == 400
|
||||
|
||||
# Test with letters
|
||||
|
||||
response = unauthenticated_client.post("/api/v1/auth/login?login=abcd1234")
|
||||
assert response.status_code == 400
|
||||
|
||||
def test_login_db_api_error(self, unauthenticated_client, mock_db_client):
|
||||
"""Test login when DB API fails."""
|
||||
# Mock DB client to raise exception
|
||||
|
||||
mock_db_client.login_user = AsyncMock(side_effect=Exception("DB API unavailable"))
|
||||
|
||||
from app.main import app
|
||||
|
|
@ -84,7 +84,7 @@ class TestAuthService:
|
|||
assert result.user.login == "12345678"
|
||||
assert result.user.user_id == "test-user-123"
|
||||
|
||||
# Verify DB client was called with correct params
|
||||
|
||||
call_args = mock_db_client.login_user.call_args[0][0]
|
||||
assert call_args.login == "12345678"
|
||||
assert call_args.client_ip == "192.168.1.1"
|
||||
|
|
@ -100,7 +100,7 @@ class TestAuthService:
|
|||
with pytest.raises(ValueError, match="8 digits"):
|
||||
await auth_service.login("abcd1234", "192.168.1.1")
|
||||
|
||||
# Verify DB client was never called
|
||||
|
||||
mock_db_client.login_user.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ class TestTgBackendInterface:
|
|||
assert interface.api_prefix == "http://api.example.com/v1"
|
||||
MockClient.assert_called_once()
|
||||
|
||||
# Verify timeout and retries configured
|
||||
|
||||
call_kwargs = MockClient.call_args[1]
|
||||
assert call_kwargs['follow_redirects'] is True
|
||||
assert isinstance(call_kwargs['timeout'], httpx.Timeout)
|
||||
|
|
@ -48,7 +48,7 @@ class TestTgBackendInterface:
|
|||
)
|
||||
|
||||
call_kwargs = MockClient.call_args[1]
|
||||
# Timeout object is created, just verify it exists
|
||||
|
||||
assert isinstance(call_kwargs['timeout'], httpx.Timeout)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
|
@ -69,7 +69,7 @@ class TestTgBackendInterface:
|
|||
async with TgBackendInterface(api_prefix="http://api.example.com") as interface:
|
||||
assert interface is not None
|
||||
|
||||
# Should close on exit
|
||||
|
||||
mock_client.aclose.assert_called_once()
|
||||
|
||||
def test_build_url_with_leading_slash(self):
|
||||
|
|
@ -133,7 +133,7 @@ class TestTgBackendInterface:
|
|||
"""Test deserialization with validation error."""
|
||||
with patch('app.interfaces.base.httpx.AsyncClient'):
|
||||
interface = TgBackendInterface(api_prefix="http://api.example.com")
|
||||
# Invalid data: missing 'value' field
|
||||
|
||||
data = {"name": "test"}
|
||||
|
||||
with pytest.raises(ValidationError):
|
||||
|
|
@ -147,7 +147,7 @@ class TestTgBackendInterface:
|
|||
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.content = b'{"name": "test", "value": 42}' # Non-empty content
|
||||
mock_response.content = b'{"name": "test", "value": 42}'
|
||||
mock_response.json.return_value = {"name": "test", "value": 42}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
|
||||
|
|
@ -229,7 +229,7 @@ class TestTgBackendInterface:
|
|||
mock_client = AsyncMock()
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.content = b'{"name": "test", "value": 42}' # Non-empty content
|
||||
mock_response.content = b'{"name": "test", "value": 42}'
|
||||
mock_response.json.return_value = {"name": "test", "value": 42}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
mock_client.get.return_value = mock_response
|
||||
|
|
@ -252,7 +252,7 @@ class TestTgBackendInterface:
|
|||
mock_client = AsyncMock()
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 201
|
||||
mock_response.content = b'{"name": "created", "value": 100}' # Non-empty content
|
||||
mock_response.content = b'{"name": "created", "value": 100}'
|
||||
mock_response.json.return_value = {"name": "created", "value": 100}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
mock_client.post.return_value = mock_response
|
||||
|
|
@ -276,7 +276,7 @@ class TestTgBackendInterface:
|
|||
mock_client = AsyncMock()
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.content = b'{"result": "ok"}' # Non-empty content
|
||||
mock_response.content = b'{"result": "ok"}'
|
||||
mock_response.json.return_value = {"result": "ok"}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
mock_client.post.return_value = mock_response
|
||||
|
|
@ -296,7 +296,7 @@ class TestTgBackendInterface:
|
|||
mock_client = AsyncMock()
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.content = b'{"name": "updated", "value": 75}' # Non-empty content
|
||||
mock_response.content = b'{"name": "updated", "value": 75}'
|
||||
mock_response.json.return_value = {"name": "updated", "value": 75}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
mock_client.put.return_value = mock_response
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class TestDBApiClient:
|
|||
with patch('app.interfaces.base.httpx.AsyncClient'):
|
||||
client = DBApiClient(api_prefix="http://db-api:8080/api/v1")
|
||||
|
||||
# Mock the post method
|
||||
|
||||
mock_user_response = UserResponse(
|
||||
user_id="user-123",
|
||||
login="12345678",
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@ class TestGetCurrentUser:
|
|||
@pytest.mark.asyncio
|
||||
async def test_get_current_user_valid_token(self):
|
||||
"""Test getting current user with valid token."""
|
||||
# Mock valid token payload
|
||||
|
||||
valid_payload = {
|
||||
"user_id": "user-123",
|
||||
"login": "12345678",
|
||||
"exp": 9999999999 # Far future
|
||||
"exp": 9999999999
|
||||
}
|
||||
|
||||
credentials = MagicMock(spec=HTTPAuthorizationCredentials)
|
||||
|
|
@ -38,7 +38,7 @@ class TestGetCurrentUser:
|
|||
credentials = MagicMock(spec=HTTPAuthorizationCredentials)
|
||||
credentials.credentials = "invalid.token"
|
||||
|
||||
# Mock invalid token (returns None)
|
||||
|
||||
with patch('app.dependencies.decode_access_token', return_value=None):
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await get_current_user(credentials)
|
||||
|
|
@ -52,7 +52,7 @@ class TestGetCurrentUser:
|
|||
credentials = MagicMock(spec=HTTPAuthorizationCredentials)
|
||||
credentials.credentials = "expired.token"
|
||||
|
||||
# Expired tokens return None from decode
|
||||
|
||||
with patch('app.dependencies.decode_access_token', return_value=None):
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await get_current_user(credentials)
|
||||
|
|
@ -89,5 +89,5 @@ class TestGetDbClient:
|
|||
|
||||
client = get_db_client()
|
||||
|
||||
# Check that client was created with correct URL
|
||||
|
||||
assert client.api_prefix == "http://test-api:9999/api/v1"
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class TestMainEndpoints:
|
|||
"""Test /app endpoint serves frontend."""
|
||||
from fastapi.responses import FileResponse
|
||||
|
||||
# Get the endpoint function
|
||||
|
||||
for route in app.routes:
|
||||
if hasattr(route, 'path') and route.path == '/app':
|
||||
result = await route.endpoint()
|
||||
|
|
@ -36,7 +36,7 @@ class TestMainEndpoints:
|
|||
"""Test / endpoint serves frontend."""
|
||||
from fastapi.responses import FileResponse
|
||||
|
||||
# Get the endpoint function
|
||||
|
||||
for route in app.routes:
|
||||
if hasattr(route, 'path') and route.path == '/':
|
||||
result = await route.endpoint()
|
||||
|
|
|
|||
|
|
@ -22,11 +22,11 @@ class TestAuthModels:
|
|||
|
||||
def test_login_request_invalid_format(self):
|
||||
"""Test LoginRequest with invalid login format."""
|
||||
# Not 8 digits
|
||||
|
||||
with pytest.raises(ValidationError):
|
||||
LoginRequest(login="1234567", client_ip="192.168.1.1")
|
||||
|
||||
# Contains letters
|
||||
|
||||
with pytest.raises(ValidationError):
|
||||
LoginRequest(login="abcd1234", client_ip="192.168.1.1")
|
||||
|
||||
|
|
@ -79,7 +79,7 @@ class TestQueryModels:
|
|||
"""Test QuestionRequest with default with_docs."""
|
||||
question = QuestionRequest(body="Test question")
|
||||
|
||||
assert question.with_docs is True # Default value
|
||||
assert question.with_docs is True
|
||||
|
||||
def test_bench_query_request_valid(self):
|
||||
"""Test valid BenchQueryRequest."""
|
||||
|
|
@ -116,7 +116,7 @@ class TestQueryModels:
|
|||
questions=[QuestionRequest(body="Q1")]
|
||||
)
|
||||
|
||||
assert request.reset_session is True # Default value
|
||||
assert request.reset_session is True
|
||||
|
||||
def test_query_response(self):
|
||||
"""Test QueryResponse model."""
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ class TestBenchQueryEndpoint:
|
|||
|
||||
def test_bench_query_wrong_api_mode(self, client, mock_db_client, test_settings):
|
||||
"""Test bench query when environment is configured for backend mode."""
|
||||
# Create new settings with backend apiMode
|
||||
|
||||
from app.models.settings import EnvironmentSettings, UserSettings
|
||||
|
||||
backend_settings = EnvironmentSettings(
|
||||
|
|
@ -89,7 +89,7 @@ class TestBenchQueryEndpoint:
|
|||
|
||||
response = client.post("/api/v1/query/bench", json=request_data)
|
||||
|
||||
# Can be 400 (if caught properly) or 500 (if generic exception)
|
||||
|
||||
assert response.status_code in [400, 500]
|
||||
if response.status_code == 400:
|
||||
assert "not configured for bench mode" in response.json()["detail"].lower()
|
||||
|
|
@ -119,7 +119,7 @@ class TestBenchQueryEndpoint:
|
|||
|
||||
def test_bench_query_settings_not_found(self, client, mock_db_client, test_settings):
|
||||
"""Test bench query when environment settings not found."""
|
||||
# Remove ift settings
|
||||
|
||||
from app.models.settings import UserSettings
|
||||
settings_without_ift = UserSettings(
|
||||
user_id="test-user-123",
|
||||
|
|
@ -138,7 +138,7 @@ class TestBenchQueryEndpoint:
|
|||
|
||||
response = client.post("/api/v1/query/bench", json=request_data)
|
||||
|
||||
# HTTPException inside try/except is caught and returns 500
|
||||
|
||||
assert response.status_code == 500
|
||||
|
||||
|
||||
|
|
@ -147,7 +147,7 @@ class TestBackendQueryEndpoint:
|
|||
|
||||
def test_backend_query_success(self, client, mock_db_client, test_settings, mock_backend_response):
|
||||
"""Test successful backend query."""
|
||||
# Set apiMode to backend
|
||||
|
||||
test_settings.settings["ift"].apiMode = "backend"
|
||||
mock_db_client.get_user_settings = AsyncMock(return_value=test_settings)
|
||||
|
||||
|
|
@ -183,7 +183,7 @@ class TestBackendQueryEndpoint:
|
|||
|
||||
def test_backend_query_wrong_api_mode(self, client, mock_db_client, test_settings):
|
||||
"""Test backend query when environment is configured for bench mode."""
|
||||
# test_settings already has bench mode, so this should fail
|
||||
|
||||
mock_db_client.get_user_settings = AsyncMock(return_value=test_settings)
|
||||
|
||||
request_data = {
|
||||
|
|
@ -194,7 +194,7 @@ class TestBackendQueryEndpoint:
|
|||
|
||||
response = client.post("/api/v1/query/backend", json=request_data)
|
||||
|
||||
# Can be 400 (if caught properly) or 500 (if generic exception)
|
||||
|
||||
assert response.status_code in [400, 500]
|
||||
if response.status_code == 400:
|
||||
assert "not configured for backend mode" in response.json()["detail"].lower()
|
||||
|
|
@ -214,7 +214,7 @@ class TestBackendQueryEndpoint:
|
|||
|
||||
def test_backend_query_settings_not_found(self, client, mock_db_client, test_settings):
|
||||
"""Test backend query when environment settings not found."""
|
||||
# Set apiMode to backend for ift but remove psi settings
|
||||
|
||||
from app.models.settings import UserSettings
|
||||
test_settings.settings["ift"].apiMode = "backend"
|
||||
settings_without_psi = UserSettings(
|
||||
|
|
@ -235,7 +235,7 @@ class TestBackendQueryEndpoint:
|
|||
|
||||
response = client.post("/api/v1/query/backend", json=request_data)
|
||||
|
||||
# HTTPException inside try/except is caught and returns 500
|
||||
|
||||
assert response.status_code == 500
|
||||
|
||||
|
||||
|
|
@ -245,7 +245,7 @@ class TestRagService:
|
|||
@pytest.mark.asyncio
|
||||
async def test_send_bench_query_success(self, mock_httpx_client, mock_bench_response):
|
||||
"""Test successful bench query via RagService."""
|
||||
# Configure mock response
|
||||
|
||||
mock_httpx_client.post.return_value.json.return_value = mock_bench_response
|
||||
|
||||
with patch('app.services.rag_service.httpx.AsyncClient', return_value=mock_httpx_client):
|
||||
|
|
@ -271,7 +271,7 @@ class TestRagService:
|
|||
assert result == mock_bench_response
|
||||
mock_httpx_client.post.assert_called_once()
|
||||
|
||||
# Verify headers
|
||||
|
||||
call_kwargs = mock_httpx_client.post.call_args[1]
|
||||
headers = call_kwargs["headers"]
|
||||
assert headers["Request-Id"] == "test-request-123"
|
||||
|
|
@ -281,7 +281,7 @@ class TestRagService:
|
|||
@pytest.mark.asyncio
|
||||
async def test_send_backend_query_success(self, mock_httpx_client, mock_backend_response):
|
||||
"""Test successful backend query via RagService."""
|
||||
# Configure mock response
|
||||
|
||||
mock_httpx_client.post.return_value.json.return_value = mock_backend_response
|
||||
|
||||
with patch('app.services.rag_service.httpx.AsyncClient', return_value=mock_httpx_client):
|
||||
|
|
@ -307,7 +307,7 @@ class TestRagService:
|
|||
)
|
||||
|
||||
assert result == [mock_backend_response]
|
||||
# 2 calls: ask + reset
|
||||
|
||||
assert mock_httpx_client.post.call_count == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
|
@ -329,7 +329,7 @@ class TestRagService:
|
|||
)
|
||||
|
||||
assert result == [mock_backend_response]
|
||||
# Only 1 call: ask (no reset)
|
||||
|
||||
assert mock_httpx_client.post.call_count == 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
|
@ -374,7 +374,7 @@ class TestRagService:
|
|||
async def test_create_client_with_mtls(self):
|
||||
"""Test creating HTTP client with mTLS configuration."""
|
||||
with patch('app.services.rag_service.settings') as mock_settings:
|
||||
# Configure mTLS settings
|
||||
|
||||
mock_settings.IFT_RAG_CERT_CERT = "/path/to/client.crt"
|
||||
mock_settings.IFT_RAG_CERT_KEY = "/path/to/client.key"
|
||||
mock_settings.IFT_RAG_CERT_CA = "/path/to/ca.crt"
|
||||
|
|
@ -388,10 +388,10 @@ class TestRagService:
|
|||
with patch('app.services.rag_service.httpx.AsyncClient') as MockAsyncClient:
|
||||
service = RagService()
|
||||
|
||||
# Verify AsyncClient was called 3 times (one per environment)
|
||||
|
||||
assert MockAsyncClient.call_count == 3
|
||||
|
||||
# Check the first call (ift) had mTLS config
|
||||
|
||||
first_call_kwargs = MockAsyncClient.call_args_list[0][1]
|
||||
assert first_call_kwargs["cert"] == ("/path/to/client.crt", "/path/to/client.key")
|
||||
assert first_call_kwargs["verify"] == "/path/to/ca.crt"
|
||||
|
|
@ -400,7 +400,7 @@ class TestRagService:
|
|||
async def test_create_client_without_mtls(self):
|
||||
"""Test creating HTTP client without mTLS."""
|
||||
with patch('app.services.rag_service.settings') as mock_settings:
|
||||
# No mTLS certs for any environment
|
||||
|
||||
mock_settings.IFT_RAG_CERT_CERT = ""
|
||||
mock_settings.IFT_RAG_CERT_KEY = ""
|
||||
mock_settings.IFT_RAG_CERT_CA = ""
|
||||
|
|
@ -414,19 +414,19 @@ class TestRagService:
|
|||
with patch('app.services.rag_service.httpx.AsyncClient') as MockAsyncClient:
|
||||
service = RagService()
|
||||
|
||||
# Verify AsyncClient was called 3 times
|
||||
|
||||
assert MockAsyncClient.call_count == 3
|
||||
|
||||
# Check all calls had no mTLS
|
||||
|
||||
for call in MockAsyncClient.call_args_list:
|
||||
call_kwargs = call[1]
|
||||
assert call_kwargs["cert"] is None
|
||||
assert call_kwargs["verify"] is True # Default verify
|
||||
assert call_kwargs["verify"] is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_bench_query_http_error(self, mock_httpx_client):
|
||||
"""Test bench query with HTTP error."""
|
||||
# Configure mock to raise HTTP error
|
||||
|
||||
error_response = MagicMock()
|
||||
error_response.status_code = 500
|
||||
error_response.text = "Internal Server Error"
|
||||
|
|
@ -498,7 +498,7 @@ class TestRagService:
|
|||
|
||||
await service.close()
|
||||
|
||||
# Should close all 3 clients (ift, psi, prod)
|
||||
|
||||
assert mock_httpx_client.aclose.call_count == 3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
|
@ -508,7 +508,7 @@ class TestRagService:
|
|||
async with RagService() as service:
|
||||
assert service is not None
|
||||
|
||||
# Should close all clients on exit
|
||||
|
||||
assert mock_httpx_client.aclose.call_count == 3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ class TestJWTSecurity:
|
|||
"login": "12345678"
|
||||
}
|
||||
|
||||
# Create token that expires immediately
|
||||
|
||||
token = create_access_token(data, expires_delta=timedelta(seconds=-1))
|
||||
payload = decode_access_token(token)
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ class TestSettingsEndpoints:
|
|||
|
||||
def test_get_settings_not_found(self, client, mock_db_client):
|
||||
"""Test getting settings when user not found."""
|
||||
# Mock 404 from DB API
|
||||
|
||||
error_response = httpx.Response(404, json={"detail": "Not found"})
|
||||
mock_db_client.get_user_settings = AsyncMock(
|
||||
side_effect=httpx.HTTPStatusError("Not found", request=None, response=error_response)
|
||||
|
|
@ -43,7 +43,7 @@ class TestSettingsEndpoints:
|
|||
"""Test getting settings without authentication."""
|
||||
response = unauthenticated_client.get("/api/v1/settings")
|
||||
|
||||
assert response.status_code == 401 # HTTPBearer returns 401
|
||||
assert response.status_code == 401
|
||||
|
||||
def test_update_settings_success(self, client, mock_db_client, test_settings):
|
||||
"""Test updating user settings successfully."""
|
||||
|
|
|
|||
Loading…
Reference in New Issue