Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
Rohini-Microsoft committed Jul 15, 2024
2 parents dd17bf2 + efd72d3 commit 38ccea2
Show file tree
Hide file tree
Showing 9 changed files with 152 additions and 38 deletions.
9 changes: 7 additions & 2 deletions code/backend/batch/utilities/search/search_handler_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,15 @@ def __init__(self, env_helper: EnvHelper):
self.env_helper = env_helper
self.search_client = self.create_search_client()

def search_with_facets(self, query: str, facets: list[str]):
def search_with_facets(self, query: str, facet: str, facet_count: int):
if self.search_client is None:
return None
return self.search_client.search(query, facets=facets)

# Construct facet parameter based on facet_count
facet_param = f"{facet},count:{facet_count}"

# Perform search with facets and facet_param
return self.search_client.search(query, facets=[facet_param])

def get_unique_files(self, results, facet_key: str):
if results:
Expand Down
2 changes: 1 addition & 1 deletion code/backend/pages/02_Explore_Data.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def load_css(file_path):
try:
search_handler = Search.get_search_handler(env_helper)

results = search_handler.search_with_facets("*", ["title"])
results = search_handler.search_with_facets("*", "title", facet_count=0)
unique_files = search_handler.get_unique_files(results, "title")
filename = st.selectbox("Select your file:", unique_files)
st.write("Showing chunks for:", filename)
Expand Down
2 changes: 1 addition & 1 deletion code/backend/pages/04_Configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def validate_documents():
)
example_user_question_help = "The example user question."
example_answer_help = "The expected answer."
with st.expander("", expanded=True):
with st.expander("Assistant type configuration", expanded=True):
cols = st.columns([2, 4])
with cols[0]:
st.selectbox(
Expand Down
21 changes: 15 additions & 6 deletions code/create_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from os import path
import sys
import requests
from openai import AzureOpenAI, Stream, RateLimitError
from openai import AzureOpenAI, Stream, RateLimitError, APIStatusError
from openai.types.chat import ChatCompletionChunk
from flask import Flask, Response, request, Request, jsonify
from dotenv import load_dotenv
Expand All @@ -24,7 +24,6 @@
ERROR_GENERIC_MESSAGE = "An error occurred. Please try again. If the problem persists, please contact the site administrator."
logger = logging.getLogger(__name__)


def stream_with_data(response: Stream[ChatCompletionChunk]):
"""This function streams the response from Azure OpenAI with data."""
response_obj = {
Expand Down Expand Up @@ -345,10 +344,15 @@ def conversation_azure_byod():
return conversation_with_data(request, env_helper)
else:
return conversation_without_data(request, env_helper)
except RateLimitError as e:
except APIStatusError as e:
error_message = str(e)
logger.exception("Exception in /api/conversation | %s", error_message)
return jsonify({"error": ERROR_429_MESSAGE}), 429
response_json = e.response.json()
response_message = response_json.get("error", {}).get("message", "")
response_code = response_json.get("error", {}).get("code", "")
if response_code == "429" or "429" in response_message:
return jsonify({"error": ERROR_429_MESSAGE}), 429
return jsonify({"error": ERROR_GENERIC_MESSAGE}), 500
except Exception as e:
error_message = str(e)
logger.exception("Exception in /api/conversation | %s", error_message)
Expand Down Expand Up @@ -384,10 +388,15 @@ async def conversation_custom():

return jsonify(response_obj), 200

except RateLimitError as e:
except APIStatusError as e:
error_message = str(e)
logger.exception("Exception in /api/conversation | %s", error_message)
return jsonify({"error": ERROR_429_MESSAGE}), 429
response_json = e.response.json()
response_message = response_json.get("error", {}).get("message", "")
response_code = response_json.get("error", {}).get("code", "")
if response_code == "429" or "429" in response_message:
return jsonify({"error": ERROR_429_MESSAGE}), 429
return jsonify({"error": ERROR_GENERIC_MESSAGE}), 500
except Exception as e:
error_message = str(e)
logger.exception("Exception in /api/conversation | %s", error_message)
Expand Down
43 changes: 43 additions & 0 deletions code/tests/search_utilities/test_azure_search_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,49 @@ def test_semantic_search_with_advanced_image_processing(
top=handler.env_helper.AZURE_SEARCH_TOP_K,
)

@pytest.fixture
def search_handler():
env_helper=Mock()
return AzureSearchHandler(env_helper)

def test_search_with_facets_no_search_client(search_handler):

search_handler.search_client = None
result = search_handler.search_with_facets("query", "facet1", 10)
assert result is None


def test_search_with_facets_valid(search_handler):
mock_search_client = MagicMock()
search_handler.search_client = mock_search_client
mock_search_client.search.return_value = "search_results"
result = search_handler.search_with_facets("query", "facet1", 10)
mock_search_client.search.assert_called_once_with(
"query", facets=["facet1,count:10"]
)
assert result == "search_results"



def test_get_unique_files_no_results(search_handler):
results = None
facet_key = "facet_key"
result = search_handler.get_unique_files(results, facet_key)
assert(result, [])


def test_get_unique_files_with_results(search_handler):
mock_results = MagicMock()
mock_results.get_facets.return_value = {
"facet_key": [
{"value": "file1"},
{"value": "file2"},
]
}
facet_key = "facet_key"
result = search_handler.get_unique_files(mock_results, facet_key)
assert(result, ["file1", "file2"])


def test_delete_from_index(handler, mock_search_client):
# given
Expand Down
82 changes: 75 additions & 7 deletions code/tests/test_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
"""

from unittest.mock import AsyncMock, MagicMock, Mock, patch, ANY
from openai import RateLimitError

from httpx import Response
from openai import RateLimitError, BadRequestError, InternalServerError
import pytest
from flask.testing import FlaskClient
from backend.batch.utilities.helpers.config.conversation_flow import ConversationFlow
Expand Down Expand Up @@ -332,6 +334,15 @@ def test_conversation_custom_returns_error_response_on_rate_limit_error(
# given
response_mock = Mock()
response_mock.status_code = 429
response_mock.json.return_value = {
'error': {
'code': "429",
'message': 'Requests to the Embeddings_Create Operation under Azure OpenAI API version 2024-02-01 '
'have exceeded call rate limit of your current OpenAI S0 pricing tier. Please retry after '
'2 seconds. Please go here: https://aka.ms/oai/quotaincrease if you would like to further '
'increase the default rate limit.'
}
}
body_mock = {"error": "Rate limit exceeded"}

rate_limit_error = RateLimitError("Rate limit exceeded", response=response_mock, body=body_mock)
Expand All @@ -351,6 +362,30 @@ def test_conversation_custom_returns_error_response_on_rate_limit_error(
"Please wait a moment and try again."
}

@patch("create_app.get_orchestrator_config")
def test_conversation_custom_returns_500_when_internalservererror_occurs(
self, get_orchestrator_config_mock, env_helper_mock, client
):
"""Test that an error response is returned when an exception occurs."""
# given
response_mock = MagicMock()
response_mock.status_code = 500
get_orchestrator_config_mock.side_effect = InternalServerError("Test exception", response=response_mock, body="")

# when
response = client.post(
"/api/conversation",
headers={"content-type": "application/json"},
json=self.body,
)

# then
assert response.status_code == 500
assert response.json == {
"error": "An error occurred. Please try again. If the problem persists, please contact the site "
"administrator."
}

@patch("create_app.get_message_orchestrator")
@patch("create_app.get_orchestrator_config")
def test_conversation_custom_allows_multiple_messages_from_user(
Expand Down Expand Up @@ -719,18 +754,51 @@ def test_conversation_azure_byod_returns_500_when_exception_occurs(
"error": "An error occurred. Please try again. If the problem persists, please contact the site administrator."
}

@patch("create_app.conversation_with_data")
def test_conversation_azure_byod_returns_500_when_internalservererror_occurs(
self, conversation_with_data_mock, env_helper_mock, client
):
"""Test that an error response is returned when an exception occurs."""
# given
response_mock = MagicMock()
response_mock.status_code = 500
conversation_with_data_mock.side_effect = InternalServerError("Test exception", response=response_mock, body="")
env_helper_mock.CONVERSATION_FLOW = ConversationFlow.BYOD.value

# when
response = client.post(
"/api/conversation",
headers={"content-type": "application/json"},
json=self.body,
)

# then
assert response.status_code == 500
assert response.json == {
"error": "An error occurred. Please try again. If the problem persists, please contact the site "
"administrator."
}

@patch("create_app.conversation_with_data")
def test_conversation_azure_byod_returns_429_on_rate_limit_error(
self, conversation_with_data_mock, env_helper_mock, client
self, conversation_with_data_mock, env_helper_mock, client
):
"""Test that a 429 response is returned on RateLimitError for BYOD conversation."""
# given
response_mock = Mock()
response_mock.status_code = 429
body_mock = {"error": "Rate limit exceeded"}
response_mock = MagicMock()
response_mock.status_code = 400
response_mock.json.return_value = {
'error': {
'requestid': 'f30740e1-c6e1-48ab-ab1e-35469ed41ba4',
'code': "400",
'message': 'An error occurred when calling Azure OpenAI: Rate limit reached for AOAI embedding '
'resource: Server responded with status 429. Error message: {"error":{"code":"429",'
'"message": "Rate limit is exceeded. Try again in 44 seconds."}}'
}
}

rate_limit_error = RateLimitError("Rate limit exceeded", response=response_mock, body=body_mock)
conversation_with_data_mock.side_effect = rate_limit_error
conversation_with_data_mock.side_effect = BadRequestError(message="Error code: 400", response=response_mock,
body="")
env_helper_mock.CONVERSATION_FLOW = ConversationFlow.BYOD.value

# when
Expand Down
17 changes: 3 additions & 14 deletions code/tests/utilities/helpers/test_azure_computer_vision_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,22 +176,11 @@ def test_returns_text_vectors(
assert actual_vectors == expected_vectors


@mock.patch("backend.batch.utilities.helpers.azure_computer_vision_client.requests")
def test_vectorize_image_calls_computer_vision_timeout(
httpserver: HTTPServer, azure_computer_vision_client: AzureComputerVisionClient
mock_requests: MagicMock, azure_computer_vision_client: AzureComputerVisionClient
):
# given
def handler(_) -> werkzeug.Response:
time.sleep(0.3)
return werkzeug.Response(
json.dumps({"modelVersion": "2022-04-11", "vector": [1.0, 2.0, 3.0]}),
status=200,
)

httpserver.expect_request(
COMPUTER_VISION_VECTORIZE_IMAGE_PATH,
COMPUTER_VISION_VECTORIZE_IMAGE_REQUEST_METHOD,
).respond_with_handler(handler)

mock_requests.post.side_effect = ReadTimeout("An error occurred")
# when
with pytest.raises(Exception) as exec_info:
azure_computer_vision_client.vectorize_image(IMAGE_URL)
Expand Down
12 changes: 6 additions & 6 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ azure-monitor-opentelemetry = "^1.6.0"
opentelemetry-instrumentation-httpx = "^0.46b0"
pillow = "10.4.0"
azure-mgmt-cognitiveservices = "^13.5.0"
jsonschema = "^4.22.0"
jsonschema = "^4.23.0"
semantic-kernel = {version = "1.1.2", python = "<3.13"}
azure-ai-ml = "^1.17.1"

Expand Down

0 comments on commit 38ccea2

Please sign in to comment.