Skip to content

Commit

Permalink
fix: Fix for the issue encountered when updating the .env file and mo…
Browse files Browse the repository at this point in the history
…difying model configurations. (#1347)

Co-authored-by: Pavan Kumar <v-kupavan.microsoft.com>
Co-authored-by: Francia Riesco <Fr4nc3@users.noreply.github.com>
  • Loading branch information
Pavan-Microsoft and Fr4nc3 authored Sep 26, 2024
1 parent 5f1efcd commit 6518797
Show file tree
Hide file tree
Showing 18 changed files with 329 additions and 333 deletions.
5 changes: 2 additions & 3 deletions .env.sample
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,8 @@ AZURE_SEARCH_DATASOURCE_NAME=
# Azure OpenAI for generating the answer and computing the embedding of the documents
AZURE_OPENAI_RESOURCE=
AZURE_OPENAI_API_KEY=
AZURE_OPENAI_MODEL=gpt-35-turbo
AZURE_OPENAI_MODEL_NAME=gpt-35-turbo
AZURE_OPENAI_EMBEDDING_MODEL=text-embedding-ada-002
AZURE_OPENAI_MODEL_INFO="{\"model\":\"gpt-35-turbo-16k\",\"modelName\":\"gpt-35-turbo-16k\",\"modelVersion\":\"0613\"}"
AZURE_OPENAI_EMBEDDING_MODEL_INFO="{\"model\":\"text-embedding-ada-002\",\"modelName\":\"text-embedding-ada-002\",\"modelVersion\":\"2\"}"
AZURE_OPENAI_TEMPERATURE=0
AZURE_OPENAI_TOP_P=1.0
AZURE_OPENAI_MAX_TOKENS=1000
Expand Down
34 changes: 29 additions & 5 deletions code/backend/batch/utilities/helpers/env_helper.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
import os
import logging
import threading
Expand Down Expand Up @@ -92,11 +93,19 @@ def __load_config(self, **kwargs) -> None:

self.AZURE_AUTH_TYPE = os.getenv("AZURE_AUTH_TYPE", "keys")
# Azure OpenAI
# Default model info
default_azure_openai_model_info = '{"model":"gpt-35-turbo-16k","modelName":"gpt-35-turbo-16k","modelVersion":"0613"}'
default_azure_openai_embedding_model_info = '{"model":"text-embedding-ada-002","modelName":"text-embedding-ada-002","modelVersion":"2"}'

self.AZURE_OPENAI_RESOURCE = os.getenv("AZURE_OPENAI_RESOURCE", "")
self.AZURE_OPENAI_MODEL = os.getenv("AZURE_OPENAI_MODEL", "")
self.AZURE_OPENAI_MODEL_NAME = os.getenv(
"AZURE_OPENAI_MODEL_NAME", "gpt-35-turbo"

# Fetch and assign model info
azure_openai_model_info = self.get_info_from_env(
"AZURE_OPENAI_MODEL_INFO", default_azure_openai_model_info
)
self.AZURE_OPENAI_MODEL = azure_openai_model_info.get("model")
self.AZURE_OPENAI_MODEL_NAME = azure_openai_model_info.get("modelName")

self.AZURE_OPENAI_VISION_MODEL = os.getenv("AZURE_OPENAI_VISION_MODEL", "gpt-4")
self.AZURE_OPENAI_TEMPERATURE = os.getenv("AZURE_OPENAI_TEMPERATURE", "0")
self.AZURE_OPENAI_TOP_P = os.getenv("AZURE_OPENAI_TOP_P", "1.0")
Expand All @@ -110,9 +119,16 @@ def __load_config(self, **kwargs) -> None:
"AZURE_OPENAI_API_VERSION", "2024-02-01"
)
self.AZURE_OPENAI_STREAM = os.getenv("AZURE_OPENAI_STREAM", "true")
self.AZURE_OPENAI_EMBEDDING_MODEL = os.getenv(
"AZURE_OPENAI_EMBEDDING_MODEL", ""

# Fetch and assign embedding model info
azure_openai_embedding_model_info = self.get_info_from_env(
"AZURE_OPENAI_EMBEDDING_MODEL_INFO",
default_azure_openai_embedding_model_info,
)
self.AZURE_OPENAI_EMBEDDING_MODEL = azure_openai_embedding_model_info.get(
"model"
)

self.SHOULD_STREAM = (
True if self.AZURE_OPENAI_STREAM.lower() == "true" else False
)
Expand Down Expand Up @@ -267,6 +283,14 @@ def get_env_var_float(self, var_name: str, default: float):
def is_auth_type_keys(self):
return self.AZURE_AUTH_TYPE == "keys"

def get_info_from_env(self, env_var: str, default_info: str) -> dict:
# Fetch and parse model info from the environment variable.
info_str = os.getenv(env_var, default_info)
# Handle escaped characters in the JSON string by wrapping it in double quotes for parsing.
if "\\" in info_str:
info_str = json.loads(f'"{info_str}"')
return {} if not info_str else json.loads(info_str)

@staticmethod
def check_env():
for attr, value in EnvHelper().__dict__.items():
Expand Down
10 changes: 7 additions & 3 deletions code/tests/functional/app_config.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import base64
import json
import logging
import os
from backend.batch.utilities.helpers.config.conversation_flow import ConversationFlow
Expand All @@ -24,11 +25,10 @@ class AppConfig:
"AZURE_KEY_VAULT_ENDPOINT": "some-key-vault-endpoint",
"AZURE_OPENAI_API_KEY": "some-azure-openai-api-key",
"AZURE_OPENAI_API_VERSION": "2024-02-01",
"AZURE_OPENAI_EMBEDDING_MODEL": "some-embedding-model",
"AZURE_OPENAI_EMBEDDING_MODEL_INFO": '{"model":"some-embedding-model","modelName":"some-embedding-model-name","modelVersion":"some-embedding-model-version"}',
"AZURE_OPENAI_ENDPOINT": "some-openai-endpoint",
"AZURE_OPENAI_MAX_TOKENS": "1000",
"AZURE_OPENAI_MODEL": "some-openai-model",
"AZURE_OPENAI_MODEL_NAME": "some-openai-model-name",
"AZURE_OPENAI_MODEL_INFO": '{"model":"some-openai-model","modelName":"some-openai-model-name","modelVersion":"some-openai-model-version"}',
"AZURE_OPENAI_VISION_MODEL": "some-openai-vision-model",
"AZURE_OPENAI_RESOURCE": "some-openai-resource",
"AZURE_OPENAI_STREAM": "True",
Expand Down Expand Up @@ -95,6 +95,10 @@ def set(self, key: str, value: str | None) -> None:
def get(self, key: str) -> str | None:
return self.config[key]

def get_from_json(self, config_key: str, field: str) -> str | None:
config_json = json.loads(self.config[config_key])
return config_json.get(field)

def get_all(self) -> dict[str, str | None]:
return self.config

Expand Down
8 changes: 4 additions & 4 deletions code/tests/functional/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def setup_default_mocking(httpserver: HTTPServer, app_config: AppConfig):
).respond_with_data()

httpserver.expect_request(
f"/openai/deployments/{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}/embeddings",
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_EMBEDDING_MODEL_INFO','model')}/embeddings",
method="POST",
).respond_with_json(
{
Expand Down Expand Up @@ -58,15 +58,15 @@ def setup_default_mocking(httpserver: HTTPServer, app_config: AppConfig):

httpserver.expect_request(
re.compile(
f"/openai/deployments/({app_config.get('AZURE_OPENAI_MODEL')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
f"/openai/deployments/({app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
),
method="POST",
).respond_with_json(
{
"id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9",
"object": "chat.completion",
"created": 1679072642,
"model": app_config.get("AZURE_OPENAI_MODEL"),
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
"usage": {
"prompt_tokens": 58,
"completion_tokens": 68,
Expand Down Expand Up @@ -194,7 +194,7 @@ def setup_default_mocking(httpserver: HTTPServer, app_config: AppConfig):
"inputs": [{"name": "text", "source": "/document/pages/*"}],
"outputs": [{"name": "embedding", "targetName": "content_vector"}],
"resourceUri": f"https://localhost:{httpserver.port}/",
"deploymentId": f"{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}",
"deploymentId": f"{app_config.get_from_json('AZURE_OPENAI_EMBEDDING_MODEL_INFO','model')}",
"apiKey": f"{app_config.get('AZURE_OPENAI_API_KEY')}",
},
],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
@pytest.fixture(autouse=True)
def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
httpserver.expect_oneshot_request(
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
method="POST",
).respond_with_json(
{
Expand All @@ -48,7 +48,7 @@ def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
],
"created": 1714576877,
"id": "chatcmpl-9K63hMvVH1DyQJqqM7rFE4oRPFCeR",
"model": app_config.get("AZURE_OPENAI_MODEL"),
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
"object": "chat.completion",
"prompt_filter_results": [
{
Expand All @@ -72,7 +72,7 @@ def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):

httpserver.expect_oneshot_request(
re.compile(
f"/openai/deployments/({app_config.get('AZURE_OPENAI_MODEL')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
f"/openai/deployments/({app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
),
method="POST",
).respond_with_json(
Expand All @@ -95,7 +95,7 @@ def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
],
"created": 1714576891,
"id": "chatcmpl-9K63vDGs3slJFynnpi2K6RcVPwgrT",
"model": app_config.get("AZURE_OPENAI_MODEL"),
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
"object": "chat.completion",
"prompt_filter_results": [
{
Expand Down Expand Up @@ -167,7 +167,7 @@ def test_post_responds_successfully(app_url: str, app_config: AppConfig):
],
"created": "response.created",
"id": "response.id",
"model": app_config.get("AZURE_OPENAI_MODEL"),
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
"object": "response.object",
}
assert response.headers["Content-Type"] == "application/json"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,14 @@
@pytest.fixture(autouse=True)
def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
httpserver.expect_oneshot_request(
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
method="POST",
).respond_with_json(
{
"id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9",
"object": "chat.completion",
"created": 1679072642,
"model": app_config.get("AZURE_OPENAI_MODEL"),
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
"usage": {
"prompt_tokens": 58,
"completion_tokens": 68,
Expand All @@ -58,7 +58,7 @@ def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
)

httpserver.expect_oneshot_request(
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
method="POST",
).respond_with_json(
{
Expand Down Expand Up @@ -110,7 +110,7 @@ def test_post_responds_successfully(app_url: str, app_config: AppConfig):
],
"created": "response.created",
"id": "response.id",
"model": app_config.get("AZURE_OPENAI_MODEL"),
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
"object": "response.object",
}
assert response.headers["Content-Type"] == "application/json"
Expand All @@ -126,7 +126,7 @@ def test_post_makes_correct_calls_to_openai_embeddings_to_get_vector_dimensions(
verify_request_made(
mock_httpserver=httpserver,
request_matcher=RequestMatcher(
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}/embeddings",
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_EMBEDDING_MODEL_INFO','model')}/embeddings",
method="POST",
json={
"input": [[1199]],
Expand Down Expand Up @@ -155,13 +155,15 @@ def test_post_makes_correct_calls_to_openai_embeddings_to_embed_question_to_sear
verify_request_made(
mock_httpserver=httpserver,
request_matcher=RequestMatcher(
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}/embeddings",
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_EMBEDDING_MODEL_INFO','model')}/embeddings",
method="POST",
json={
"input": [
[3923, 374, 279, 7438, 315, 2324, 30]
], # Embedding of "What is the meaning of life?"
"model": app_config.get("AZURE_OPENAI_EMBEDDING_MODEL"),
"model": app_config.get_from_json(
"AZURE_OPENAI_EMBEDDING_MODEL_INFO", "model"
),
"encoding_format": "base64",
},
headers={
Expand All @@ -188,7 +190,7 @@ def test_post_makes_correct_calls_to_openai_embeddings_to_embed_question_to_stor
verify_request_made(
mock_httpserver=httpserver,
request_matcher=RequestMatcher(
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}/embeddings",
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_EMBEDDING_MODEL_INFO','model')}/embeddings",
method="POST",
json={
"input": [
Expand Down Expand Up @@ -265,7 +267,7 @@ def test_post_makes_correct_call_to_openai_chat_completions_with_functions(
verify_request_made(
mock_httpserver=httpserver,
request_matcher=RequestMatcher(
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
method="POST",
json={
"messages": [
Expand Down Expand Up @@ -555,7 +557,7 @@ def test_post_makes_correct_call_to_openai_chat_completions_with_documents(
verify_request_made(
mock_httpserver=httpserver,
request_matcher=RequestMatcher(
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
method="POST",
json={
"messages": [
Expand Down Expand Up @@ -589,7 +591,7 @@ def test_post_makes_correct_call_to_openai_chat_completions_with_documents(
"role": "user",
},
],
"model": app_config.get("AZURE_OPENAI_MODEL"),
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
"max_tokens": int(app_config.get("AZURE_OPENAI_MAX_TOKENS")),
"temperature": 0,
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,14 +61,14 @@ def setup_config_mocking(httpserver: HTTPServer):
@pytest.fixture(autouse=True)
def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):
httpserver.expect_oneshot_request(
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
method="POST",
).respond_with_json(
{
"id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9",
"object": "chat.completion",
"created": 1679072642,
"model": app_config.get("AZURE_OPENAI_MODEL"),
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
"usage": {
"prompt_tokens": 58,
"completion_tokens": 68,
Expand All @@ -92,7 +92,7 @@ def completions_mocking(httpserver: HTTPServer, app_config: AppConfig):

httpserver.expect_oneshot_request(
re.compile(
f"/openai/deployments/({app_config.get('AZURE_OPENAI_MODEL')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
f"/openai/deployments/({app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}|{app_config.get('AZURE_OPENAI_VISION_MODEL')})/chat/completions"
),
method="POST",
).respond_with_json(
Expand Down Expand Up @@ -125,7 +125,7 @@ def test_post_responds_successfully_when_not_filtered(
):
# given
httpserver.expect_oneshot_request(
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
method="POST",
).respond_with_json(
{
Expand Down Expand Up @@ -175,7 +175,7 @@ def test_post_responds_successfully_when_not_filtered(
],
"created": "response.created",
"id": "response.id",
"model": app_config.get("AZURE_OPENAI_MODEL"),
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
"object": "response.object",
}
assert response.headers["Content-Type"] == "application/json"
Expand All @@ -186,7 +186,7 @@ def test_post_responds_successfully_when_filtered(
):
# given
httpserver.expect_oneshot_request(
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
method="POST",
).respond_with_json(
{
Expand Down Expand Up @@ -236,7 +236,7 @@ def test_post_responds_successfully_when_filtered(
],
"created": "response.created",
"id": "response.id",
"model": app_config.get("AZURE_OPENAI_MODEL"),
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
"object": "response.object",
}
assert response.headers["Content-Type"] == "application/json"
Expand All @@ -247,7 +247,7 @@ def test_post_makes_correct_call_to_openai_from_post_prompt_tool(
):
# given
httpserver.expect_oneshot_request(
f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
method="POST",
).respond_with_json(
{
Expand Down Expand Up @@ -280,7 +280,7 @@ def test_post_makes_correct_call_to_openai_from_post_prompt_tool(
verify_request_made(
mock_httpserver=httpserver,
request_matcher=RequestMatcher(
path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions",
path=f"/openai/deployments/{app_config.get_from_json('AZURE_OPENAI_MODEL_INFO','model')}/chat/completions",
method="POST",
json={
"messages": [
Expand All @@ -289,7 +289,7 @@ def test_post_makes_correct_call_to_openai_from_post_prompt_tool(
"role": "user",
}
],
"model": app_config.get("AZURE_OPENAI_MODEL"),
"model": app_config.get_from_json("AZURE_OPENAI_MODEL_INFO", "model"),
"max_tokens": int(app_config.get("AZURE_OPENAI_MAX_TOKENS")),
},
headers={
Expand Down
Loading

0 comments on commit 6518797

Please sign in to comment.