"""End-to-End tests for error scenarios and edge cases. Tests error handling, validation, and failure recovery across the entire stack. """ import pytest @pytest.mark.e2e class TestAuthenticationErrors: """Test authentication error scenarios.""" def test_query_without_authentication(self, e2e_client, setup_test_settings): """Test that queries without auth token are rejected.""" query_data = { "environment": "ift", "questions": [{"body": "Unauthorized query", "with_docs": True}] } response = e2e_client.post("/api/v1/query/bench", json=query_data) assert response.status_code == 401 def test_invalid_bearer_token(self, e2e_client, setup_test_settings): """Test that invalid JWT tokens are rejected.""" invalid_headers = {"Authorization": "Bearer invalid_token_12345"} response = e2e_client.get("/api/v1/settings", headers=invalid_headers) assert response.status_code == 401 def test_expired_or_malformed_token(self, e2e_client, setup_test_settings): """Test malformed authorization header.""" malformed_headers = {"Authorization": "NotBearer token"} response = e2e_client.get("/api/v1/settings", headers=malformed_headers) assert response.status_code == 401 def test_session_access_without_auth(self, e2e_client, setup_test_settings): """Test that session endpoints require authentication.""" response = e2e_client.get("/api/v1/analysis/sessions") assert response.status_code == 401 session_data = { "environment": "ift", "api_mode": "bench", "request": [{"body": "Test"}], "response": {"answer": "Test"} } response = e2e_client.post("/api/v1/analysis/sessions", json=session_data) assert response.status_code == 401 @pytest.mark.e2e class TestValidationErrors: """Test input validation errors.""" @pytest.mark.usefixtures("check_prerequisites") def test_invalid_environment( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test query with invalid environment name.""" query_data = { "environment": "invalid_env", "questions": [{"body": "Test", "with_docs": True}] } response = e2e_client.post( "/api/v1/query/bench", json=query_data, headers=e2e_auth_headers ) assert response.status_code == 422 @pytest.mark.usefixtures("check_prerequisites") def test_empty_questions_list( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test query with empty questions list.""" query_data = { "environment": "ift", "questions": [] } response = e2e_client.post( "/api/v1/query/bench", json=query_data, headers=e2e_auth_headers ) assert response.status_code == 422 @pytest.mark.usefixtures("check_prerequisites") def test_missing_required_fields( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test query with missing required fields.""" query_data = {"environment": "ift"} response = e2e_client.post( "/api/v1/query/bench", json=query_data, headers=e2e_auth_headers ) assert response.status_code == 422 @pytest.mark.usefixtures("check_prerequisites") def test_invalid_question_structure( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test question with missing required fields.""" query_data = { "environment": "ift", "questions": [ {"body": "Valid question", "with_docs": True}, {"with_docs": True} ] } response = e2e_client.post( "/api/v1/query/bench", json=query_data, headers=e2e_auth_headers ) assert response.status_code == 422 @pytest.mark.usefixtures("check_prerequisites") def test_invalid_session_data( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test creating session with invalid data.""" invalid_session = { "environment": "ift" } response = e2e_client.post( "/api/v1/analysis/sessions", json=invalid_session, headers=e2e_auth_headers ) assert response.status_code == 422 @pytest.mark.e2e class TestModeCompatibilityErrors: """Test API mode compatibility errors.""" @pytest.mark.usefixtures("check_prerequisites") def test_bench_query_with_backend_mode_settings( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test that bench query fails when environment is in backend mode.""" settings_update = { "settings": { "ift": { "apiMode": "backend", "bearerToken": "test_token", "systemPlatform": "test", "systemPlatformUser": "test_user", "platformUserId": "123", "platformId": "test_platform", "withClassify": False, "resetSessionMode": False } } } update_response = e2e_client.put( "/api/v1/settings", json=settings_update, headers=e2e_auth_headers ) assert update_response.status_code == 200 query_data = { "environment": "ift", "questions": [{"body": "Test", "with_docs": True}] } response = e2e_client.post( "/api/v1/query/bench", json=query_data, headers=e2e_auth_headers ) assert response.status_code in [400, 500, 502] @pytest.mark.usefixtures("check_prerequisites") def test_backend_query_with_bench_mode_settings( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test that backend query fails when environment is in bench mode.""" query_data = { "environment": "ift", "questions": [{"body": "Test", "with_docs": True}], "reset_session": False } response = e2e_client.post( "/api/v1/query/backend", json=query_data, headers=e2e_auth_headers ) assert response.status_code in [400, 500, 502] @pytest.mark.e2e class TestResourceNotFoundErrors: """Test resource not found scenarios.""" @pytest.mark.usefixtures("check_prerequisites") def test_get_nonexistent_session( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test retrieving a session that doesn't exist.""" fake_session_id = "00000000-0000-0000-0000-000000000000" response = e2e_client.get( f"/api/v1/analysis/sessions/{fake_session_id}", headers=e2e_auth_headers ) assert response.status_code == 404 @pytest.mark.usefixtures("check_prerequisites") def test_delete_nonexistent_session( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test deleting a session that doesn't exist.""" fake_session_id = "00000000-0000-0000-0000-000000000000" response = e2e_client.delete( f"/api/v1/analysis/sessions/{fake_session_id}", headers=e2e_auth_headers ) assert response.status_code == 404 @pytest.mark.usefixtures("check_prerequisites") def test_invalid_session_id_format( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test accessing session with invalid UUID format.""" invalid_session_id = "not-a-valid-uuid" response = e2e_client.get( f"/api/v1/analysis/sessions/{invalid_session_id}", headers=e2e_auth_headers ) assert response.status_code in [404, 422] @pytest.mark.e2e class TestSettingsErrors: """Test settings-related error scenarios.""" @pytest.mark.usefixtures("check_prerequisites") def test_update_settings_with_invalid_mode( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test updating settings with invalid API mode.""" invalid_settings = { "settings": { "ift": { "apiMode": "invalid_mode", "bearerToken": "test" } } } response = e2e_client.put( "/api/v1/settings", json=invalid_settings, headers=e2e_auth_headers ) assert response.status_code == 422 @pytest.mark.usefixtures("check_prerequisites") def test_update_settings_with_invalid_environment( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test updating settings for non-existent environment.""" invalid_settings = { "settings": { "invalid_env": { "apiMode": "bench", "bearerToken": "test" } } } response = e2e_client.put( "/api/v1/settings", json=invalid_settings, headers=e2e_auth_headers ) assert response.status_code in [400, 422] @pytest.mark.e2e class TestEdgeCases: """Test edge cases and boundary conditions.""" @pytest.mark.usefixtures("check_prerequisites") def test_very_long_question( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test query with very long question text.""" long_question = "Тест " * 1000 query_data = { "environment": "ift", "questions": [{"body": long_question, "with_docs": True}] } response = e2e_client.post( "/api/v1/query/bench", json=query_data, headers=e2e_auth_headers, timeout=120.0 ) assert response.status_code in [200, 400, 413, 422, 502] @pytest.mark.usefixtures("check_prerequisites") def test_special_characters_in_question( self, e2e_client, e2e_auth_headers, setup_test_settings ): """Test query with special characters.""" special_chars_question = "Test with special chars: <>&\"'`\n\t\r" query_data = { "environment": "ift", "questions": [{"body": special_chars_question, "with_docs": True}] } response = e2e_client.post( "/api/v1/query/bench", json=query_data, headers=e2e_auth_headers, timeout=120.0 ) assert response.status_code in [200, 400, 422, 502] @pytest.mark.usefixtures("check_prerequisites") def test_large_number_of_questions( self, e2e_client, e2e_auth_headers, setup_test_settings, cleanup_test_sessions ): """Test query with many questions.""" questions = [ {"body": f"Вопрос номер {i}", "with_docs": i % 2 == 0} for i in range(50) ] query_data = { "environment": "ift", "questions": questions } response = e2e_client.post( "/api/v1/query/bench", json=query_data, headers=e2e_auth_headers, timeout=180.0 ) assert response.status_code in [200, 400, 413, 422, 502, 504] @pytest.mark.usefixtures("check_prerequisites") def test_query_pagination_limits( self, e2e_client, e2e_auth_headers, setup_test_settings, cleanup_test_sessions ): """Test session list pagination with edge case limits.""" response = e2e_client.get( "/api/v1/analysis/sessions?limit=0", headers=e2e_auth_headers ) assert response.status_code in [200, 400, 422] response = e2e_client.get( "/api/v1/analysis/sessions?limit=10000", headers=e2e_auth_headers ) assert response.status_code in [200, 400, 422] response = e2e_client.get( "/api/v1/analysis/sessions?offset=-1", headers=e2e_auth_headers ) assert response.status_code in [200, 400, 422]