From 21064e7ca1215c395f3d14619b3a90abf75b96c7 Mon Sep 17 00:00:00 2001 From: Chinedum Echeta <60179183+cecheta@users.noreply.github.com> Date: Fri, 17 May 2024 15:32:07 +0100 Subject: [PATCH 1/2] feat: Remove LangChain from post prompt tool (#937) --- code/backend/batch/utilities/common/answer.py | 12 + .../batch/utilities/tools/post_prompt_tool.py | 48 ++- code/tests/functional/conftest.py | 164 +++++----- .../default/test_post_prompt_tool.py | 301 ++++++++++++++++++ .../tools}/test_content_safety_checker.py | 0 .../utilities/tools/test_post_prompt_tool.py | 115 +++++++ .../{ => tools}/test_question_answer_tool.py | 4 +- 7 files changed, 534 insertions(+), 110 deletions(-) create mode 100644 code/tests/functional/tests/backend_api/default/test_post_prompt_tool.py rename code/tests/{ => utilities/tools}/test_content_safety_checker.py (100%) create mode 100644 code/tests/utilities/tools/test_post_prompt_tool.py rename code/tests/utilities/{ => tools}/test_question_answer_tool.py (99%) diff --git a/code/backend/batch/utilities/common/answer.py b/code/backend/batch/utilities/common/answer.py index 19e56fecd..a71576877 100644 --- a/code/backend/batch/utilities/common/answer.py +++ b/code/backend/batch/utilities/common/answer.py @@ -18,6 +18,18 @@ def __init__( self.prompt_tokens = prompt_tokens self.completion_tokens = completion_tokens + def __eq__(self, value: object) -> bool: + if not isinstance(value, Answer): + return False + + return ( + self.question == value.question + and self.answer == value.answer + and self.source_documents == value.source_documents + and self.prompt_tokens == value.prompt_tokens + and self.completion_tokens == value.completion_tokens + ) + def to_json(self): return json.dumps(self, cls=AnswerEncoder) diff --git a/code/backend/batch/utilities/tools/post_prompt_tool.py b/code/backend/batch/utilities/tools/post_prompt_tool.py index 728cdeb52..5fdf91705 100644 --- a/code/backend/batch/utilities/tools/post_prompt_tool.py +++ b/code/backend/batch/utilities/tools/post_prompt_tool.py @@ -1,6 +1,3 @@ -from langchain.chains.llm import LLMChain -from langchain.prompts import PromptTemplate -from langchain_community.callbacks import get_openai_callback from ..common.answer import Answer from ..helpers.llm_helper import LLMHelper from ..helpers.config.config_helper import ConfigHelper @@ -14,18 +11,6 @@ def validate_answer(self, answer: Answer) -> Answer: config = ConfigHelper.get_active_config_or_default() llm_helper = LLMHelper() - was_message_filtered = False - post_answering_prompt = PromptTemplate( - template=config.prompts.post_answering_prompt, - input_variables=["question", "answer", "sources"], - ) - post_answering_chain = LLMChain( - llm=llm_helper.get_llm(), - prompt=post_answering_prompt, - output_key="correct", - verbose=True, - ) - sources = "\n".join( [ f"[doc{i+1}]: {source.content}" @@ -33,34 +18,39 @@ def validate_answer(self, answer: Answer) -> Answer: ] ) - with get_openai_callback() as cb: - post_result = post_answering_chain( + message = config.prompts.post_answering_prompt.format( + question=answer.question, + answer=answer.answer, + sources=sources, + ) + + response = llm_helper.get_chat_completion( + [ { - "question": answer.question, - "answer": answer.answer, - "sources": sources, + "role": "user", + "content": message, } - ) - - was_message_filtered = not ( - post_result["correct"].lower() == "true" - or post_result["correct"].lower() == "yes" + ] ) + result = response.choices[0].message.content + + was_message_filtered = result.lower() not in ["true", "yes"] + # Return filtered answer or just the original one if was_message_filtered: return Answer( question=answer.question, answer=config.messages.post_answering_filter, source_documents=[], - prompt_tokens=cb.prompt_tokens, - completion_tokens=cb.completion_tokens, + prompt_tokens=response.usage.prompt_tokens, + completion_tokens=response.usage.completion_tokens, ) else: return Answer( question=answer.question, answer=answer.answer, source_documents=answer.source_documents, - prompt_tokens=cb.prompt_tokens, - completion_tokens=cb.completion_tokens, + prompt_tokens=response.usage.prompt_tokens, + completion_tokens=response.usage.completion_tokens, ) diff --git a/code/tests/functional/conftest.py b/code/tests/functional/conftest.py index 8f76a14e4..c42223138 100644 --- a/code/tests/functional/conftest.py +++ b/code/tests/functional/conftest.py @@ -16,86 +16,6 @@ def setup_default_mocking(httpserver: HTTPServer, app_config: AppConfig): method="HEAD", ).respond_with_data() - httpserver.expect_request( - f"/{AZURE_STORAGE_CONFIG_CONTAINER_NAME}/{AZURE_STORAGE_CONFIG_FILE_NAME}", - method="GET", - ).respond_with_json( - { - "prompts": { - "condense_question_prompt": "", - "answering_system_prompt": "system prompt", - "answering_user_prompt": "## Retrieved Documents\n{sources}\n\n## User Question\n{question}", - "use_on_your_data_format": True, - "post_answering_prompt": "post answering prompt", - "enable_post_answering_prompt": False, - "enable_content_safety": True, - }, - "messages": {"post_answering_filter": "post answering filer"}, - "example": { - "documents": '{"retrieved_documents":[{"[doc1]":{"content":"content"}}]}', - "user_question": "user question", - "answer": "answer", - }, - "document_processors": [ - { - "document_type": "pdf", - "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, - "loading": {"strategy": "layout"}, - "use_advanced_image_processing": False, - }, - { - "document_type": "txt", - "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, - "loading": {"strategy": "web"}, - "use_advanced_image_processing": False, - }, - { - "document_type": "url", - "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, - "loading": {"strategy": "web"}, - "use_advanced_image_processing": False, - }, - { - "document_type": "md", - "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, - "loading": {"strategy": "web"}, - "use_advanced_image_processing": False, - }, - { - "document_type": "html", - "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, - "loading": {"strategy": "web"}, - "use_advanced_image_processing": False, - }, - { - "document_type": "docx", - "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, - "loading": {"strategy": "docx"}, - "use_advanced_image_processing": False, - }, - { - "document_type": "jpg", - "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, - "loading": {"strategy": "layout"}, - "use_advanced_image_processing": True, - }, - { - "document_type": "png", - "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, - "loading": {"strategy": "layout"}, - "use_advanced_image_processing": False, - }, - ], - "logging": {"log_user_interactions": True, "log_tokens": True}, - "orchestrator": {"strategy": "openai_function"}, - "integrated_vectorization_config": None, - }, - headers={ - "Content-Type": "application/json", - "Content-Range": "bytes 0-12882/12883", - }, - ) - httpserver.expect_request( f"/openai/deployments/{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}/embeddings", method="POST", @@ -269,3 +189,87 @@ def prime_search_to_trigger_creation_of_index( "/indexes", method="GET", ).respond_with_json({"value": [{"name": app_config.get("AZURE_SEARCH_INDEX")}]}) + + +# This fixture can be overriden +@pytest.fixture(autouse=True) +def setup_config_mocking(httpserver: HTTPServer): + httpserver.expect_request( + f"/{AZURE_STORAGE_CONFIG_CONTAINER_NAME}/{AZURE_STORAGE_CONFIG_FILE_NAME}", + method="GET", + ).respond_with_json( + { + "prompts": { + "condense_question_prompt": "", + "answering_system_prompt": "system prompt", + "answering_user_prompt": "## Retrieved Documents\n{sources}\n\n## User Question\n{question}", + "use_on_your_data_format": True, + "post_answering_prompt": "post answering prompt\n{question}\n{answer}\n{sources}", + "enable_post_answering_prompt": False, + "enable_content_safety": True, + }, + "messages": {"post_answering_filter": "post answering filter"}, + "example": { + "documents": '{"retrieved_documents":[{"[doc1]":{"content":"content"}}]}', + "user_question": "user question", + "answer": "answer", + }, + "document_processors": [ + { + "document_type": "pdf", + "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, + "loading": {"strategy": "layout"}, + "use_advanced_image_processing": False, + }, + { + "document_type": "txt", + "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, + "loading": {"strategy": "web"}, + "use_advanced_image_processing": False, + }, + { + "document_type": "url", + "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, + "loading": {"strategy": "web"}, + "use_advanced_image_processing": False, + }, + { + "document_type": "md", + "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, + "loading": {"strategy": "web"}, + "use_advanced_image_processing": False, + }, + { + "document_type": "html", + "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, + "loading": {"strategy": "web"}, + "use_advanced_image_processing": False, + }, + { + "document_type": "docx", + "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, + "loading": {"strategy": "docx"}, + "use_advanced_image_processing": False, + }, + { + "document_type": "jpg", + "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, + "loading": {"strategy": "layout"}, + "use_advanced_image_processing": True, + }, + { + "document_type": "png", + "chunking": {"strategy": "layout", "size": 500, "overlap": 100}, + "loading": {"strategy": "layout"}, + "use_advanced_image_processing": False, + }, + ], + "logging": {"log_user_interactions": True, "log_tokens": True}, + "orchestrator": {"strategy": "openai_function"}, + "integrated_vectorization_config": None, + }, + headers={ + "Content-Type": "application/json", + "Content-Range": "bytes 0-12882/12883", + }, + ) diff --git a/code/tests/functional/tests/backend_api/default/test_post_prompt_tool.py b/code/tests/functional/tests/backend_api/default/test_post_prompt_tool.py new file mode 100644 index 000000000..11cff3694 --- /dev/null +++ b/code/tests/functional/tests/backend_api/default/test_post_prompt_tool.py @@ -0,0 +1,301 @@ +import json + +import pytest +import requests +from pytest_httpserver import HTTPServer +from tests.constants import ( + AZURE_STORAGE_CONFIG_CONTAINER_NAME, + AZURE_STORAGE_CONFIG_FILE_NAME, +) +from tests.functional.app_config import AppConfig +from tests.request_matching import RequestMatcher, verify_request_made + +pytestmark = pytest.mark.functional + +path = "/api/conversation/custom" +body = { + "conversation_id": "123", + "messages": [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi, how can I help?"}, + {"role": "user", "content": "What is the meaning of life?"}, + ], +} + + +@pytest.fixture(autouse=True) +def setup_config_mocking(httpserver: HTTPServer): + httpserver.expect_request( + f"/{AZURE_STORAGE_CONFIG_CONTAINER_NAME}/{AZURE_STORAGE_CONFIG_FILE_NAME}", + method="GET", + ).respond_with_json( + { + "prompts": { + "condense_question_prompt": "", + "answering_system_prompt": "system prompt", + "answering_user_prompt": "## Retrieved Documents\n{sources}\n\n## User Question\n{question}", + "use_on_your_data_format": True, + "post_answering_prompt": "post answering prompt\n{question}\n{answer}\n{sources}", + "enable_post_answering_prompt": True, + "enable_content_safety": True, + }, + "messages": {"post_answering_filter": "post answering filter"}, + "example": { + "documents": '{"retrieved_documents":[{"[doc1]":{"content":"content"}}]}', + "user_question": "user question", + "answer": "answer", + }, + "document_processors": [], + "logging": {"log_user_interactions": True, "log_tokens": True}, + "orchestrator": {"strategy": "openai_function"}, + "integrated_vectorization_config": None, + }, + headers={ + "Content-Type": "application/json", + "Content-Range": "bytes 0-12882/12883", + }, + ) + + +@pytest.fixture(autouse=True) +def completions_mocking(httpserver: HTTPServer, app_config: AppConfig): + httpserver.expect_oneshot_request( + f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions", + method="POST", + ).respond_with_json( + { + "id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9", + "object": "chat.completion", + "created": 1679072642, + "model": app_config.get("AZURE_OPENAI_MODEL"), + "usage": { + "prompt_tokens": 58, + "completion_tokens": 68, + "total_tokens": 126, + }, + "choices": [ + { + "message": { + "role": "assistant", + "function_call": { + "name": "search_documents", + "arguments": '{"question": "What is the meaning of life?"}', + }, + }, + "finish_reason": "function_call", + "index": 0, + } + ], + } + ) + + httpserver.expect_oneshot_request( + f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions", + method="POST", + ).respond_with_json( + { + "id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9", + "object": "chat.completion", + "created": 1679072642, + "model": "gpt-35-turbo", + "usage": { + "prompt_tokens": 40, + "completion_tokens": 50, + "total_tokens": 90, + }, + "choices": [ + { + "message": { + "role": "assistant", + "content": "42 is the meaning of life", + }, + "finish_reason": "stop", + "index": 0, + } + ], + } + ) + + +def test_post_responds_successfully_when_not_filtered( + app_url: str, app_config: AppConfig, httpserver: HTTPServer +): + # given + httpserver.expect_oneshot_request( + f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions", + method="POST", + ).respond_with_json( + { + "id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9", + "object": "chat.completion", + "created": 1679072642, + "model": "gpt-35-turbo", + "usage": { + "prompt_tokens": 40, + "completion_tokens": 50, + "total_tokens": 90, + }, + "choices": [ + { + "message": { + "role": "assistant", + "content": "True", + }, + "finish_reason": "stop", + "index": 0, + } + ], + } + ) + + # when + response = requests.post(f"{app_url}{path}", json=body) + + # then + assert response.status_code == 200 + assert json.loads(response.text) == { + "choices": [ + { + "messages": [ + { + "content": '{"citations": [], "intent": "What is the meaning of life?"}', + "end_turn": False, + "role": "tool", + }, + { + "content": "42 is the meaning of life", + "end_turn": True, + "role": "assistant", + }, + ] + } + ], + "created": "response.created", + "id": "response.id", + "model": app_config.get("AZURE_OPENAI_MODEL"), + "object": "response.object", + } + assert response.headers["Content-Type"] == "application/json" + + +def test_post_responds_successfully_when_filtered( + app_url: str, app_config: AppConfig, httpserver: HTTPServer +): + # given + httpserver.expect_oneshot_request( + f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions", + method="POST", + ).respond_with_json( + { + "id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9", + "object": "chat.completion", + "created": 1679072642, + "model": "gpt-35-turbo", + "usage": { + "prompt_tokens": 40, + "completion_tokens": 50, + "total_tokens": 90, + }, + "choices": [ + { + "message": { + "role": "assistant", + "content": "False", + }, + "finish_reason": "stop", + "index": 0, + } + ], + } + ) + + # when + response = requests.post(f"{app_url}{path}", json=body) + + # then + assert response.status_code == 200 + assert json.loads(response.text) == { + "choices": [ + { + "messages": [ + { + "content": '{"citations": [], "intent": "What is the meaning of life?"}', + "end_turn": False, + "role": "tool", + }, + { + "content": "post answering filter", + "end_turn": True, + "role": "assistant", + }, + ] + } + ], + "created": "response.created", + "id": "response.id", + "model": app_config.get("AZURE_OPENAI_MODEL"), + "object": "response.object", + } + assert response.headers["Content-Type"] == "application/json" + + +def test_post_makes_correct_call_to_openai_from_post_prompt_tool( + app_url: str, app_config: AppConfig, httpserver: HTTPServer +): + # given + httpserver.expect_oneshot_request( + f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions", + method="POST", + ).respond_with_json( + { + "id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9", + "object": "chat.completion", + "created": 1679072642, + "model": "gpt-35-turbo", + "usage": { + "prompt_tokens": 40, + "completion_tokens": 50, + "total_tokens": 90, + }, + "choices": [ + { + "message": { + "role": "assistant", + "content": "True", + }, + "finish_reason": "stop", + "index": 0, + } + ], + } + ) + + # when + requests.post(f"{app_url}{path}", json=body) + + # then + verify_request_made( + mock_httpserver=httpserver, + request_matcher=RequestMatcher( + path=f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions", + method="POST", + json={ + "messages": [ + { + "content": "post answering prompt\nWhat is the meaning of life?\n42 is the meaning of life\n[doc1]: content", + "role": "user", + } + ], + "model": app_config.get("AZURE_OPENAI_MODEL"), + "max_tokens": int(app_config.get("AZURE_OPENAI_MAX_TOKENS")), + }, + headers={ + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": f"Bearer {app_config.get('AZURE_OPENAI_API_KEY')}", + "Api-Key": app_config.get("AZURE_OPENAI_API_KEY"), + }, + query_string="api-version=2024-02-01", + times=1, + ), + ) diff --git a/code/tests/test_content_safety_checker.py b/code/tests/utilities/tools/test_content_safety_checker.py similarity index 100% rename from code/tests/test_content_safety_checker.py rename to code/tests/utilities/tools/test_content_safety_checker.py diff --git a/code/tests/utilities/tools/test_post_prompt_tool.py b/code/tests/utilities/tools/test_post_prompt_tool.py new file mode 100644 index 000000000..c09026d5d --- /dev/null +++ b/code/tests/utilities/tools/test_post_prompt_tool.py @@ -0,0 +1,115 @@ +from unittest.mock import MagicMock, patch + +import pytest +from backend.batch.utilities.common.answer import Answer +from backend.batch.utilities.common.source_document import SourceDocument +from backend.batch.utilities.tools.post_prompt_tool import PostPromptTool + + +@pytest.fixture(autouse=True) +def config_mock(): + with patch("backend.batch.utilities.tools.post_prompt_tool.ConfigHelper") as mock: + config = mock.get_active_config_or_default.return_value + config.prompts.post_answering_prompt = "mock\n{question}\n{answer}\n{sources}" + config.messages.post_answering_filter = "mock filter" + + yield config + + +@pytest.fixture(autouse=True) +def llm_helper_mock(): + with patch("backend.batch.utilities.tools.post_prompt_tool.LLMHelper") as mock: + llm_helper = mock.return_value + + mock_response = MagicMock() + mock_response.message.content = "true" + + llm_helper.get_chat_completion.return_value.choices = [mock_response] + llm_helper.get_chat_completion.return_value.usage.prompt_tokens = 10 + llm_helper.get_chat_completion.return_value.usage.completion_tokens = 20 + + yield llm_helper + + +@pytest.fixture +def answer(): + return Answer( + question="user question", + answer="answer", + source_documents=[ + SourceDocument( + id="id", + content="content", + source="source", + title="title", + chunk=1, + offset=1, + page_number=1, + chunk_id="chunk_id", + ) + ], + prompt_tokens=100, + completion_tokens=100, + ) + + +def test_validate_answer_without_filtering(llm_helper_mock: MagicMock, answer: Answer): + # when + result = PostPromptTool().validate_answer(answer) + + # then + assert result == Answer( + question="user question", + answer="answer", + source_documents=[ + SourceDocument( + id="id", + content="content", + source="source", + title="title", + chunk=1, + offset=1, + page_number=1, + chunk_id="chunk_id", + ) + ], + prompt_tokens=10, + completion_tokens=20, + ) + + llm_helper_mock.get_chat_completion.assert_called_once_with( + [ + { + "role": "user", + "content": "mock\nuser question\nanswer\n[doc1]: content", + } + ] + ) + + +def test_validate_answer_with_filtering(llm_helper_mock: MagicMock, answer: Answer): + # given + llm_helper_mock.get_chat_completion.return_value.choices[0].message.content = ( + "false" + ) + + # when + result = PostPromptTool().validate_answer(answer) + + # then + assert result == Answer( + question="user question", + answer="mock filter", + source_documents=[], + prompt_tokens=10, + completion_tokens=20, + ) + + llm_helper_mock.get_chat_completion.assert_called_once_with( + [ + { + "role": "user", + "content": "mock\nuser question\nanswer\n[doc1]: content", + } + ] + ) diff --git a/code/tests/utilities/test_question_answer_tool.py b/code/tests/utilities/tools/test_question_answer_tool.py similarity index 99% rename from code/tests/utilities/test_question_answer_tool.py rename to code/tests/utilities/tools/test_question_answer_tool.py index 86e16d7f2..769b81866 100644 --- a/code/tests/utilities/test_question_answer_tool.py +++ b/code/tests/utilities/tools/test_question_answer_tool.py @@ -10,7 +10,9 @@ @pytest.fixture(autouse=True) def config_mock(): - with patch("backend.batch.utilities.tools.question_answer_tool.ConfigHelper") as mock: + with patch( + "backend.batch.utilities.tools.question_answer_tool.ConfigHelper" + ) as mock: config = mock.get_active_config_or_default.return_value config.prompts.answering_system_prompt = "mock answering system prompt" config.prompts.answering_user_prompt = ( From 0dedf7f43fada4eb16e0b86dcadaa4dec5bce310 Mon Sep 17 00:00:00 2001 From: frtibble <32080479+frtibble@users.noreply.github.com> Date: Fri, 17 May 2024 17:16:01 +0100 Subject: [PATCH 2/2] ci: use GitHub token (#904) Co-authored-by: Arpit Gaur Co-authored-by: Ross Smith --- .github/PULL_REQUEST_TEMPLATE.md | 15 +++++++ .github/workflows/create-release.yml | 41 +++++++++++++++++++ .github/workflows/release-please.yml | 23 ----------- docs/RELEASE_GUIDELINES.md | 31 ++++---------- .../2024-05-02-enable-project-versioning.md | 4 +- pyproject.toml | 2 +- version.txt | 1 - 7 files changed, 68 insertions(+), 49 deletions(-) create mode 100644 .github/workflows/create-release.yml delete mode 100644 .github/workflows/release-please.yml delete mode 100644 version.txt diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 711944eb5..43868db35 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -8,6 +8,21 @@ - [ ] Yes - [ ] No + + ## How to Test * Get the code diff --git a/.github/workflows/create-release.yml b/.github/workflows/create-release.yml new file mode 100644 index 000000000..9b818b22a --- /dev/null +++ b/.github/workflows/create-release.yml @@ -0,0 +1,41 @@ +on: + workflow_run: + workflows: ["CI"] + types: + - completed + +permissions: + contents: write + +name: create-release + +jobs: + create-release: + runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'success' }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Conventional Changelog Action + id: changelog + uses: TriPSs/conventional-changelog-action@v5 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + output-file: 'CHANGELOG.md' + git-user-name: 'github-actions[bot]' + git-user-email: 'github-actions[bot]@users.noreply.github.com' + version-file: './pyproject.toml' + version-path: 'tool.poetry.version' + + + - name: Create Release + uses: actions/create-release@v1 + if: ${{ steps.changelog.outputs.skipped == 'false' }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.changelog.outputs.tag }} + release_name: ${{ steps.changelog.outputs.tag }} + body: ${{ steps.changelog.outputs.clean_changelog }} diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml deleted file mode 100644 index e60032644..000000000 --- a/.github/workflows/release-please.yml +++ /dev/null @@ -1,23 +0,0 @@ -on: - push: - branches: - - main - -permissions: - contents: write - pull-requests: write - -name: release-please - -jobs: - release-please: - runs-on: ubuntu-latest - steps: - - uses: google-github-actions/release-please-action@v4 - with: - # This is a personal access token (PAT) which has been - # configured as a GitHub action secret. - token: ${{ secrets.RELEASE_PLEASE_TOKEN }} - # This release type is for repos with a version.txt and - # a CHANGELOG.md. - release-type: simple diff --git a/docs/RELEASE_GUIDELINES.md b/docs/RELEASE_GUIDELINES.md index 58669e0d8..7634e07f2 100644 --- a/docs/RELEASE_GUIDELINES.md +++ b/docs/RELEASE_GUIDELINES.md @@ -12,42 +12,29 @@ This repository uses GitHub's in-built [Releases](https://docs.github.com/en/rep # Automated releases -In order to automate the generation of a changelog, the creation of a release, and the bumping of a version number, we use [Release Please](https://github.com/googleapis/release-please). +In order to automate the generation of a changelog, the creation of a release, and the bumping of a version number, we use the [Conventional Changelog Action](https://github.com/TriPSs/conventional-changelog-action). It works by inferring from the commit history what changes have been made, and hence what version should be assigned. This is why it is important for Pull Request titles to adhere to the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification, which many repositories use. This convention uses types such as `docs`, `fix`, `feat`, etc to label commits and PRs. From these, the [semantic version](https://semver.org/) of a release can be identified. For example a release which consists of a PR which adds a feature (`feat`) would result in an increment of the Minor part of the semantic version, e.g. 1.1.0 -> 1.2.0. -Using Release Please takes much of the manual work out of creating a release, requiring only an approval from a maintainer to approve the release. +Using the Conventional Changelog Action along with GitHub Releases takes all of the manual work out of creating a release. -# Release Please Action +# Conventional Changelog Action ## Usage -We use the [Release Please](https://github.com/google-github-actions/release-please-action) GitHub Action, which you can find in `./github/workflows/release-please.yml`. +We use the [Conventional Changelog](https://github.com/TriPSs/conventional-changelog-action) GitHub Action, which you can find in `./github/workflows/create-release.yml`. -Once a PR is merged, the Action will automatically run and update a Release PR. It will automatically create the changelog and version number of the release. If subsequent PRs are merged, the Release PR will update to reflect these new changes. Once ready, the release PR can be merged and the main page of the repository will be updated with the new release. +Once a PR is merged to `main`, the Action will automatically run. It will automatically generate a changelog, and if that changelog is empty, then no release is made. This would be the case for merges to `main` that include `docs`, `chore`, etc. -## Security - -Due to the restrictions on this repository, and the inability to enable Actions to create pull requests, this workflow uses a personal access token. +Once a merge to `main` is completed that would result in a major/minor/patch version increase (such as `feat`, `fix`, etc.) then a changelog will be generated, and this will trigger a release to be published automatically with the appropriate version number. -You can generate a personal access token by going to your [profile](https://github.com/settings/profile) > Developer settings > Personal access tokens. +The workflow is configured so that the `CHANGELOG.md` is continuously updated. The Action by default uses the `package.json` version, which it also automatically updates. -- Token name: `RELEASE_PLEASE_TOKEN` -- Expiration: `90 days` -- Resource Owner: `Azure-Samples` -- Repository access: `Only select repositories` > `chat-with-your-data-solution-accelerator` - -The personal access token must be regenerated every 90 days. It must have the following permissions in order for the GitHub Action to be able to create a release pull request: -- Actions: `Read and write` -- Contents: `Read and write` -- Merge queues: `Read and write` -- Metadata: `Read-only` -- Pull requests: `Read and write` -- Workflows: `Read and write` +## Security -Once the personal access token has been generated, it should be stored in the repository Settings, under Security > Secrets and Variables > Actions > RELEASE_PLEASE_TOKEN. +The GitHub Action to create the release requires only the GitHub token, as this has sufficient permissions for it to checkout main (and read the commit history) and to create a release for the repository. # Conventional Commits diff --git a/docs/design/adrs/2024-05-02-enable-project-versioning.md b/docs/design/adrs/2024-05-02-enable-project-versioning.md index 92bcbb32f..68602c9bf 100644 --- a/docs/design/adrs/2024-05-02-enable-project-versioning.md +++ b/docs/design/adrs/2024-05-02-enable-project-versioning.md @@ -60,6 +60,6 @@ Pros: - When introducing automated versioning, there are a few things we need to consider: - **major/minor/patch** versions: Each release is tagged with a semantic version which is based on the change included (e.g. breaking change, feature, bug fix). - **Changelog**: A changelog must be included which details the changes that are included in the release. - - The [**release-please**](https://github.com/google-github-actions/release-please-action) GitHub Action is created by Google, and automates releases. It looks at commit messages to automatically generate the version number and to generate the changelog. + - The [**conventional-changelog-action**](https://github.com/TriPSs/conventional-changelog-action) GitHub Action automates releases. It looks at commit messages to automatically generate the version number and to generate the changelog. - It uses [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), meaning we'd need to follow a convention of labelling PRs with `fix`/`feat`/`BREAKING CHANGE`, etc, in order to be picked up by the release generator. It uses these labels to create the correct version. - - As a consequence, current dependabot PRs would not trigger a release PR. These would be in main until a automatic or manual release was triggered. + - As a consequence, current dependabot PRs would not trigger a release PR. These would be in main until an automatic or manual release was triggered. diff --git a/pyproject.toml b/pyproject.toml index 1d90f6018..3aec7a868 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "chat-with-your-data-solution-accelerator" -version = "0.1.0" +version = "1.1.0" description = "Chat with your data solution accelerator" authors = [] readme = "README.md" diff --git a/version.txt b/version.txt deleted file mode 100644 index 9084fa2f7..000000000 --- a/version.txt +++ /dev/null @@ -1 +0,0 @@ -1.1.0