"""End-to-End tests for complete user flow. Tests the entire workflow from authentication to RAG query and analysis. """ import pytest import time @pytest.mark.e2e class TestCompleteUserFlow: """Test complete user flow from start to finish.""" @pytest.mark.usefixtures("check_prerequisites") def test_full_workflow_bench_mode( self, e2e_client, e2e_auth_headers, setup_test_settings, cleanup_test_sessions ): """Test complete workflow in bench mode. Flow: 1. Authenticate 2. Get/update settings 3. Send bench query to RAG 4. Save analysis session 5. Retrieve session 6. Delete session """ # 1. Authentication already done via fixture # 2. Verify settings settings_response = e2e_client.get( "/api/v1/settings", headers=e2e_auth_headers ) assert settings_response.status_code == 200 settings = settings_response.json() assert "ift" in settings["settings"] assert settings["settings"]["ift"]["apiMode"] == "bench" # 3. Send bench query to IFT RAG query_data = { "environment": "ift", "questions": [ {"body": "E2E тестовый вопрос 1", "with_docs": True}, {"body": "E2E тестовый вопрос 2", "with_docs": False} ] } query_response = e2e_client.post( "/api/v1/query/bench", json=query_data, headers=e2e_auth_headers, timeout=120.0 # RAG can be slow ) assert query_response.status_code == 200 query_result = query_response.json() assert "request_id" in query_result assert "response" in query_result assert "timestamp" in query_result assert query_result["environment"] == "ift" # 4. Save analysis session session_data = { "environment": "ift", "api_mode": "bench", "request": query_data["questions"], "response": query_result["response"], "annotations": { "request_id": query_result["request_id"], "timestamp": query_result["timestamp"], "test_type": "e2e_full_workflow" } } session_response = e2e_client.post( "/api/v1/analysis/sessions", json=session_data, headers=e2e_auth_headers ) assert session_response.status_code == 201 session = session_response.json() assert "session_id" in session session_id = session["session_id"] # 5. Retrieve session get_session_response = e2e_client.get( f"/api/v1/analysis/sessions/{session_id}", headers=e2e_auth_headers ) assert get_session_response.status_code == 200 retrieved_session = get_session_response.json() assert retrieved_session["session_id"] == session_id assert retrieved_session["environment"] == "ift" assert retrieved_session["api_mode"] == "bench" # 6. Delete session delete_response = e2e_client.delete( f"/api/v1/analysis/sessions/{session_id}", headers=e2e_auth_headers ) assert delete_response.status_code == 204 # Verify deletion verify_response = e2e_client.get( f"/api/v1/analysis/sessions/{session_id}", headers=e2e_auth_headers ) assert verify_response.status_code == 404 @pytest.mark.usefixtures("check_prerequisites") def test_full_workflow_backend_mode( self, e2e_client, e2e_auth_headers, setup_test_settings, cleanup_test_sessions ): """Test complete workflow in backend mode (PSI). Flow: 1. Authenticate 2. Verify PSI settings (backend mode) 3. Send backend query to PSI RAG 4. Save and verify session """ # 1. Verify PSI settings settings_response = e2e_client.get( "/api/v1/settings", headers=e2e_auth_headers ) assert settings_response.status_code == 200 settings = settings_response.json() assert settings["settings"]["psi"]["apiMode"] == "backend" # 2. Send backend query to PSI RAG query_data = { "environment": "psi", "questions": [ {"body": "E2E backend тест", "with_docs": True} ], "reset_session": False } query_response = e2e_client.post( "/api/v1/query/backend", json=query_data, headers=e2e_auth_headers, timeout=120.0 ) assert query_response.status_code == 200 query_result = query_response.json() assert query_result["environment"] == "psi" assert "response" in query_result # 3. Save session session_data = { "environment": "psi", "api_mode": "backend", "request": query_data["questions"], "response": query_result["response"], "annotations": { "test_type": "e2e_backend_mode", "reset_session": False } } session_response = e2e_client.post( "/api/v1/analysis/sessions", json=session_data, headers=e2e_auth_headers ) assert session_response.status_code == 201 @pytest.mark.usefixtures("check_prerequisites") def test_settings_change_affects_queries( self, e2e_client, e2e_auth_headers, setup_test_settings, cleanup_test_sessions ): """Test that changing settings affects subsequent queries.""" # 1. Get current settings settings_response = e2e_client.get( "/api/v1/settings", headers=e2e_auth_headers ) assert settings_response.status_code == 200 original_settings = settings_response.json() # 2. Change IFT to backend mode updated_settings = { "settings": { "ift": { **original_settings["settings"]["ift"], "apiMode": "backend" } } } update_response = e2e_client.put( "/api/v1/settings", json=updated_settings, headers=e2e_auth_headers ) assert update_response.status_code == 200 # 3. Try bench query (should fail - wrong mode) bench_query = { "environment": "ift", "questions": [{"body": "Test", "with_docs": True}] } bench_response = e2e_client.post( "/api/v1/query/bench", json=bench_query, headers=e2e_auth_headers ) # Should fail because IFT is now in backend mode assert bench_response.status_code in [400, 500] # 4. Backend query should work backend_query = { "environment": "ift", "questions": [{"body": "Test", "with_docs": True}], "reset_session": True } backend_response = e2e_client.post( "/api/v1/query/backend", json=backend_query, headers=e2e_auth_headers, timeout=120.0 ) assert backend_response.status_code == 200 # 5. Restore original settings restore_response = e2e_client.put( "/api/v1/settings", json={"settings": {"ift": original_settings["settings"]["ift"]}}, headers=e2e_auth_headers ) assert restore_response.status_code == 200 @pytest.mark.usefixtures("check_prerequisites") def test_multiple_sessions_management( self, e2e_client, e2e_auth_headers, setup_test_settings, cleanup_test_sessions ): """Test creating and managing multiple analysis sessions.""" session_ids = [] # Create multiple sessions for i, env in enumerate(["ift", "psi", "prod"]): session_data = { "environment": env, "api_mode": "bench" if env != "psi" else "backend", "request": [{"body": f"E2E test question {i}"}], "response": {"answer": f"E2E test answer {i}"}, "annotations": { "test_type": "e2e_multiple_sessions", "iteration": i } } response = e2e_client.post( "/api/v1/analysis/sessions", json=session_data, headers=e2e_auth_headers ) assert response.status_code == 201 session_ids.append(response.json()["session_id"]) # List all sessions list_response = e2e_client.get( "/api/v1/analysis/sessions?limit=50", headers=e2e_auth_headers ) assert list_response.status_code == 200 sessions_list = list_response.json() assert sessions_list["total"] >= 3 # Filter by environment ift_sessions = e2e_client.get( "/api/v1/analysis/sessions?environment=ift&limit=50", headers=e2e_auth_headers ) assert ift_sessions.status_code == 200 # Delete all created sessions for session_id in session_ids: delete_response = e2e_client.delete( f"/api/v1/analysis/sessions/{session_id}", headers=e2e_auth_headers ) assert delete_response.status_code == 204 @pytest.mark.usefixtures("check_prerequisites") def test_concurrent_user_isolation( self, e2e_client, e2e_auth_headers, setup_test_settings, cleanup_test_sessions ): """Test that user data is properly isolated (sessions, settings).""" # Create a session session_data = { "environment": "ift", "api_mode": "bench", "request": [{"body": "Isolation test"}], "response": {"answer": "Isolated data"}, "annotations": {"test": "isolation"} } create_response = e2e_client.post( "/api/v1/analysis/sessions", json=session_data, headers=e2e_auth_headers ) assert create_response.status_code == 201 session_id = create_response.json()["session_id"] # Verify we can access our session get_response = e2e_client.get( f"/api/v1/analysis/sessions/{session_id}", headers=e2e_auth_headers ) assert get_response.status_code == 200 # Try to access without auth (should fail) unauth_response = e2e_client.get( f"/api/v1/analysis/sessions/{session_id}" ) assert unauth_response.status_code == 401 # Cleanup e2e_client.delete( f"/api/v1/analysis/sessions/{session_id}", headers=e2e_auth_headers )