From 415d049822f6beb3cb473959267a80f8ddf8b54e Mon Sep 17 00:00:00 2001 From: Xiaoyun Zhang Date: Thu, 21 Nov 2024 09:53:23 -0800 Subject: [PATCH 01/33] .NET add document on packaging && disable uploading artifacts folder to pipeline by default (#4299) * add package readme * Update PACKAGING.md --- .azure/pipelines/build.yaml | 12 +++++--- .azure/pipelines/templates/build.yaml | 12 ++++++-- dotnet/PACKAGING.md | 41 +++++++++++++++++++++++++++ dotnet/nuget/README.md | 13 +++++++++ 4 files changed, 71 insertions(+), 7 deletions(-) create mode 100644 dotnet/PACKAGING.md create mode 100644 dotnet/nuget/README.md diff --git a/.azure/pipelines/build.yaml b/.azure/pipelines/build.yaml index 6f21e59b00f3..7ce3f0c1efa9 100644 --- a/.azure/pipelines/build.yaml +++ b/.azure/pipelines/build.yaml @@ -3,8 +3,8 @@ trigger: include: - main paths: - exclude: - - samples + include: + - dotnet schedules: - cron: "0 0 * * *" @@ -12,7 +12,6 @@ schedules: branches: include: - main - - 3.x always: true parameters: @@ -46,6 +45,10 @@ parameters: - name: publish_nightly displayName: Publish to autogen-nightly type: boolean + default: true + - name: publish_artifacts + displayName: Publish artifacts + type: boolean default: false - name: runCodeQL3000 default: false @@ -87,4 +90,5 @@ extends: skip_test: ${{ parameters.skip_test }} publish_nightly: ${{ parameters.publish_nightly }} publish_nuget: ${{ parameters.publish_nuget }} - runCodeQL3000: ${{ parameters.runCodeQL3000 }} \ No newline at end of file + runCodeQL3000: ${{ parameters.runCodeQL3000 }} + publish_artifacts: ${{ parameters.publish_artifacts }} \ No newline at end of file diff --git a/.azure/pipelines/templates/build.yaml b/.azure/pipelines/templates/build.yaml index 7e3d82f4d678..0b7dbe990c38 100644 --- a/.azure/pipelines/templates/build.yaml +++ b/.azure/pipelines/templates/build.yaml @@ -30,6 +30,10 @@ parameters: displayName: Publish to nuget.org type: boolean default: false + - name: publish_artifacts + displayName: Publish artifacts + type: boolean + default: false - name: runCodeQL3000 default: false displayName: Run CodeQL3000 tasks @@ -49,9 +53,11 @@ jobs: ${{ if ne(variables['System.TeamProject'], 'GitHub - PR Builds') }}: templateContext: outputs: - - output: pipelineArtifact - targetPath: '$(build.sourcesdirectory)/dotnet/artifacts' - artifactName: artifacts folder + # Publish artifacts if enabled + - ${{ if eq(parameters.publish_artifacts, true) }}: # TODO add eq(parameters.codesign, true) + - output: pipelineArtifact + targetPath: '$(build.sourcesdirectory)/dotnet/artifacts' + artifactName: artifacts folder # Publish packages to nightly - ${{ if eq(parameters.publish_nightly, true) }}: # TODO add eq(parameters.codesign, true) - output: nuget diff --git a/dotnet/PACKAGING.md b/dotnet/PACKAGING.md new file mode 100644 index 000000000000..af03850f7cea --- /dev/null +++ b/dotnet/PACKAGING.md @@ -0,0 +1,41 @@ +# Packaging AutoGen.NET + +This document describes the steps to pack the `AutoGen.NET` project. + +## Prerequisites + +- .NET SDK + +## Create Package + +1. **Restore and Build the Project** +```bash +dotnet restore +dotnet build --configuration Release --no-restore +``` + + +2. **Create the NuGet Package** +```bash +dotnet pack --configuration Release --no-build +``` + +This will generate both the `.nupkg` file and the `.snupkg` file in the `./artifacts/package/release` directory. + +For more details, refer to the [official .NET documentation](https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-pack). + +## Add new project to package list. +By default, when you add a new project to `AutoGen.sln`, it will not be included in the package list. To include the new project in the package, you need to add the following line to the new project's `.csproj` file + +e.g. + +```xml + +``` + +The `nuget-packages.props` enables `IsPackable` to `true` for the project, it also sets nenecessary metadata for the package. + +For more details, refer to the [NuGet folder](./nuget/README.md). + +## Package versioning +The version of the package is defined by `VersionPrefix` and `VersionPrefixForAutoGen0_2` in [MetaInfo.props](./eng/MetaInfo.props). If the name of your project starts with `AutoGen.`, the version will be set to `VersionPrefixForAutoGen0_2`, otherwise it will be set to `VersionPrefix`. diff --git a/dotnet/nuget/README.md b/dotnet/nuget/README.md new file mode 100644 index 000000000000..c95a97624788 --- /dev/null +++ b/dotnet/nuget/README.md @@ -0,0 +1,13 @@ +# NuGet Directory + +This directory contains resources and metadata for packaging the AutoGen.NET SDK as a NuGet package. + +## Files + +- **icon.png**: The icon used for the NuGet package. +- **NUGET.md**: The readme file displayed on the NuGet package page. +- **NUGET-PACKAGE.PROPS**: The MSBuild properties file that defines the packaging settings for the NuGet package. + +## Purpose + +The files in this directory are used to configure and build the NuGet package for the AutoGen.NET SDK, ensuring that it includes necessary metadata, documentation, and resources. \ No newline at end of file From b65269b8f81296d94f67011a49d8012e718ea749 Mon Sep 17 00:00:00 2001 From: Diego Colombo Date: Thu, 21 Nov 2024 19:03:30 +0000 Subject: [PATCH 02/33] create solution for the dev team sample (#4086) Co-authored-by: Ryan Sweet --- dotnet/samples/dev-team/dev team.sln | 49 ++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 dotnet/samples/dev-team/dev team.sln diff --git a/dotnet/samples/dev-team/dev team.sln b/dotnet/samples/dev-team/dev team.sln new file mode 100644 index 000000000000..f8a7aeacd924 --- /dev/null +++ b/dotnet/samples/dev-team/dev team.sln @@ -0,0 +1,49 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.11.35327.3 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DevTeam.AgentHost", "DevTeam.AgentHost\DevTeam.AgentHost.csproj", "{A6FC8B01-A177-4690-BD16-73EE3D0C06A0}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DevTeam.Backend", "DevTeam.Backend\DevTeam.Backend.csproj", "{2D4BAD10-85F3-4E4B-B759-13449A212A96}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DevTeam.Agents", "DevTeam.Agents\DevTeam.Agents.csproj", "{A51CE540-72B0-4271-B63D-A30CAB61C227}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DevTeam.AppHost", "DevTeam.AppHost\DevTeam.AppHost.csproj", "{2B8A3C64-9F4E-4CC5-9466-AFFD8E676D2E}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DevTeam.Shared", "DevTeam.Shared\DevTeam.Shared.csproj", "{557701A5-35D8-4CE3-BA75-D5412B4227F5}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {A6FC8B01-A177-4690-BD16-73EE3D0C06A0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A6FC8B01-A177-4690-BD16-73EE3D0C06A0}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A6FC8B01-A177-4690-BD16-73EE3D0C06A0}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A6FC8B01-A177-4690-BD16-73EE3D0C06A0}.Release|Any CPU.Build.0 = Release|Any CPU + {2D4BAD10-85F3-4E4B-B759-13449A212A96}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {2D4BAD10-85F3-4E4B-B759-13449A212A96}.Debug|Any CPU.Build.0 = Debug|Any CPU + {2D4BAD10-85F3-4E4B-B759-13449A212A96}.Release|Any CPU.ActiveCfg = Release|Any CPU + {2D4BAD10-85F3-4E4B-B759-13449A212A96}.Release|Any CPU.Build.0 = Release|Any CPU + {A51CE540-72B0-4271-B63D-A30CAB61C227}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A51CE540-72B0-4271-B63D-A30CAB61C227}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A51CE540-72B0-4271-B63D-A30CAB61C227}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A51CE540-72B0-4271-B63D-A30CAB61C227}.Release|Any CPU.Build.0 = Release|Any CPU + {2B8A3C64-9F4E-4CC5-9466-AFFD8E676D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {2B8A3C64-9F4E-4CC5-9466-AFFD8E676D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {2B8A3C64-9F4E-4CC5-9466-AFFD8E676D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {2B8A3C64-9F4E-4CC5-9466-AFFD8E676D2E}.Release|Any CPU.Build.0 = Release|Any CPU + {557701A5-35D8-4CE3-BA75-D5412B4227F5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {557701A5-35D8-4CE3-BA75-D5412B4227F5}.Debug|Any CPU.Build.0 = Debug|Any CPU + {557701A5-35D8-4CE3-BA75-D5412B4227F5}.Release|Any CPU.ActiveCfg = Release|Any CPU + {557701A5-35D8-4CE3-BA75-D5412B4227F5}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {DE04DB59-B8CD-4305-875B-E71442345CCF} + EndGlobalSection +EndGlobal From e0375962284525aeb077b8553bdae1f966babc83 Mon Sep 17 00:00:00 2001 From: Griffin Bassman Date: Thu, 21 Nov 2024 19:17:30 -0500 Subject: [PATCH 03/33] typo: agbench readme (#4302) --- python/packages/agbench/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/agbench/README.md b/python/packages/agbench/README.md index e0b9c1c84694..a8209a1e9d25 100644 --- a/python/packages/agbench/README.md +++ b/python/packages/agbench/README.md @@ -10,7 +10,7 @@ If you are already an AutoGenBench pro, and want the full technical specificatio ## Docker Requirement -AutoGenBench also requires Docker (Desktop or Engine). **It will not run in GitHub codespaces**, unless you opt for native execution (with is strongly discouraged). To install Docker Desktop see [https://www.docker.com/products/docker-desktop/](https://www.docker.com/products/docker-desktop/). +AutoGenBench also requires Docker (Desktop or Engine). **It will not run in GitHub codespaces**, unless you opt for native execution (which is strongly discouraged). To install Docker Desktop see [https://www.docker.com/products/docker-desktop/](https://www.docker.com/products/docker-desktop/). If you are working in WSL, you can follow the instructions below to set up your environment: From 3c1ec7108a658feea01228bf49c1de4008d195f1 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Thu, 21 Nov 2024 19:24:12 -0500 Subject: [PATCH 04/33] Misc doc fixes (#4300) * Misc doc fixes * Update _console.py --------- Co-authored-by: Jack Gerrits --- .../src/autogen_agentchat/task/_console.py | 12 +++++++----- .../src/autogen_agentchat/task/_terminations.py | 1 + .../agentchat-user-guide/tutorial/teams.ipynb | 6 +++--- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_console.py index 6b5849e4cf4b..9899366cdc07 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_console.py @@ -27,16 +27,18 @@ async def Console( no_inline_images: bool = False, ) -> T: """ - Consume the stream from :meth:`~autogen_agentchat.base.Team.run_stream` - or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream` - print the messages to the console and return the last processed TaskResult or Response. + Consumes the message stream from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` + or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream` and renders the messages to the console. + Returns the last processed TaskResult or Response. Args: - stream (AsyncGenerator[AgentMessage | TaskResult, None] | AsyncGenerator[AgentMessage | Response, None]): Stream to render + stream (AsyncGenerator[AgentMessage | TaskResult, None] | AsyncGenerator[AgentMessage | Response, None]): Message stream to render. + This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. Returns: - last_processed: The last processed TaskResult or Response. + last_processed: A :class:`~autogen_agentchat.base.TaskResult` if the stream is from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` + or a :class:`~autogen_agentchat.base.Response` if the stream is from :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. """ render_image_iterm = _is_running_in_iterm() and _is_output_a_tty() and not no_inline_images start_time = time.time() diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py index 31b8d9d3eec0..f8d79cef2850 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py @@ -237,6 +237,7 @@ def terminated(self) -> bool: return self._terminated def set(self) -> None: + """Set the termination condition to terminated.""" self._setted = True async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb index 49340ef11b05..5c0b257dfec2 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb @@ -28,8 +28,8 @@ "\n", "At a high-level, a team API consists of the following methods:\n", "\n", - "- {py:meth}`~autogen_agentchat.base.TaskRunner.run`: To process a task, which can be a {py:class}`str`, {py:class}`~autogen_agentchat.messages.TextMessage`, or {py:class}`~autogen_agentchat.messages.MultiModalMessage`, and returns {py:class}`~autogen_agentchat.base.TaskResult`. The task can also be `None` to resume processing the previous task if the team has not been reset.\n", - "- {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`: Same as {py:meth}`~autogen_agentchat.base.TaskRunner.run`, but returns a async generator of messages and the final task result.\n", + "- {py:meth}`~autogen_agentchat.base.TaskRunner.run`: Process a task, which can be a {py:class}`str`, {py:class}`~autogen_agentchat.messages.TextMessage`, {py:class}`~autogen_agentchat.messages.MultiModalMessage`, or {py:class}`~autogen_agentchat.messages.HandoffMessage`, and returns {py:class}`~autogen_agentchat.base.TaskResult`. The task can also be `None` to resume processing the previous task if the team has not been reset.\n", + "- {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`: Similar to {py:meth}`~autogen_agentchat.base.TaskRunner.run`, but it returns an async generator of messages and the final task result.\n", "- {py:meth}`~autogen_agentchat.base.Team.reset`: To reset the team state if the next task is not related to the previous task. Otherwise, the team can utilize the context from the previous task to process the next one.\n", "\n", "In this section, we will be using the\n", @@ -782,7 +782,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.5" + "version": "3.12.6" } }, "nbformat": 4, From eb67e4ac93ea85621e6a604c95d38d47104a73b8 Mon Sep 17 00:00:00 2001 From: Ryan Sweet Date: Thu, 21 Nov 2024 16:31:13 -0800 Subject: [PATCH 05/33] add appsettings.Development.json to gitignore (#4303) Co-authored-by: Jack Gerrits --- dotnet/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/dotnet/.gitignore b/dotnet/.gitignore index 2fc32d9ac7e4..62205af71a07 100644 --- a/dotnet/.gitignore +++ b/dotnet/.gitignore @@ -82,6 +82,7 @@ BenchmarkDotNet.Artifacts/ project.lock.json project.fragment.lock.json artifacts/ +appsettings.Development.json # Tye .tye/ From 97fd6cc1e091fe6c0e5226e747b5b0fb6ecce2d6 Mon Sep 17 00:00:00 2001 From: Ryan Sweet Date: Fri, 22 Nov 2024 05:57:11 -0800 Subject: [PATCH 06/33] improve subscriptions (#4304) --- .../Agents/Services/Orleans/SubscriptionsGrain.cs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/SubscriptionsGrain.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/SubscriptionsGrain.cs index 905dc8e914ac..682073f0b97c 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/SubscriptionsGrain.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/SubscriptionsGrain.cs @@ -6,8 +6,13 @@ namespace Microsoft.AutoGen.Agents; internal sealed class SubscriptionsGrain([PersistentState("state", "PubSubStore")] IPersistentState state) : Grain, ISubscriptionsGrain { private readonly Dictionary> _subscriptions = new(); - public ValueTask>> GetSubscriptions(string agentType) + public ValueTask>> GetSubscriptions(string? agentType = null) { + //if agentType is null, return all subscriptions else filter on agentType + if (agentType != null) + { + return new ValueTask>>(_subscriptions.Where(x => x.Value.Contains(agentType)).ToDictionary(x => x.Key, x => x.Value)); + } return new ValueTask>>(_subscriptions); } public ValueTask Subscribe(string agentType, string topic) From 232068a245043aa7b62b2a93beb66b572b8da1e2 Mon Sep 17 00:00:00 2001 From: Gerardo Moreno Date: Fri, 22 Nov 2024 06:05:52 -0800 Subject: [PATCH 07/33] Add system msg when calling inside the assistant tool loop (#4308) (#4309) --- .../src/autogen_agentchat/agents/_assistant_agent.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 7e502498f9b6..cb1eff8d6f6e 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -279,8 +279,9 @@ async def on_messages_stream( return # Generate an inference result based on the current model context. + llm_messages = self._system_messages + self._model_context result = await self._model_client.create( - self._model_context, tools=self._tools + self._handoff_tools, cancellation_token=cancellation_token + llm_messages, tools=self._tools + self._handoff_tools, cancellation_token=cancellation_token ) self._model_context.append(AssistantMessage(content=result.content, source=self.name)) From ac53961bc8b6545824dbe900d497e72b4079e8d1 Mon Sep 17 00:00:00 2001 From: Leonardo Pinheiro Date: Sat, 23 Nov 2024 02:29:39 +1000 Subject: [PATCH 08/33] Delete autogen-ext refactor deprecations (#4305) * delete files and update dependencies * add explicit config exports * ignore mypy error on nb --------- Co-authored-by: Leonardo Pinheiro Co-authored-by: Jack Gerrits --- .../tutorial/termination.ipynb | 2 +- .../cookbook/local-llms-ollama-litellm.ipynb | 8 +- .../design-patterns/mixture-of-agents.ipynb | 3 +- .../design-patterns/multi-agent-debate.ipynb | 4 +- .../core-user-guide/framework/tools.ipynb | 2 +- .../autogen-core/samples/common/utils.py | 3 +- .../samples/distributed-group-chat/_types.py | 2 +- .../samples/distributed-group-chat/_utils.py | 2 +- .../components/models/__init__.py | 30 - .../components/models/_openai_client.py | 901 ------------------ .../components/models/config/__init__.py | 52 - .../autogen-core/tests/test_tool_agent.py | 127 ++- .../packages/autogen-core/tests/test_tools.py | 24 - .../aca_dynamic_sessions/__init__.py | 21 - .../code_executor/docker_executor/__init__.py | 11 - .../src/autogen_ext/models/__init__.py | 9 +- .../models/_openai/config/__init__.py | 3 + .../autogen_ext/tools/langchain/__init__.py | 7 - .../tests/models/test_openai_model_client.py | 51 +- 19 files changed, 131 insertions(+), 1131 deletions(-) delete mode 100644 python/packages/autogen-core/src/autogen_core/components/models/_openai_client.py delete mode 100644 python/packages/autogen-core/src/autogen_core/components/models/config/__init__.py delete mode 100644 python/packages/autogen-ext/src/autogen_ext/code_executor/aca_dynamic_sessions/__init__.py delete mode 100644 python/packages/autogen-ext/src/autogen_ext/code_executor/docker_executor/__init__.py delete mode 100644 python/packages/autogen-ext/src/autogen_ext/tools/langchain/__init__.py diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb index e10942491286..4a1cfe42cf6c 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb @@ -39,7 +39,7 @@ "from autogen_agentchat.logging import ConsoleLogHandler\n", "from autogen_agentchat.task import MaxMessageTermination, TextMentionTermination\n", "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_core.components.models import OpenAIChatCompletionClient\n", + "from autogen_ext.models import OpenAIChatCompletionClient\n", "\n", "logger = logging.getLogger(EVENT_LOGGER_NAME)\n", "logger.addHandler(ConsoleLogHandler())\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb index a90cb440d6ce..80fde2b71017 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb @@ -46,10 +46,10 @@ "from autogen_core.components.models import (\n", " AssistantMessage,\n", " ChatCompletionClient,\n", - " OpenAIChatCompletionClient,\n", " SystemMessage,\n", " UserMessage,\n", - ")" + ")\n", + "from autogen_ext.models import OpenAIChatCompletionClient" ] }, { @@ -65,7 +65,7 @@ "metadata": {}, "outputs": [], "source": [ - "def get_model_client() -> OpenAIChatCompletionClient:\n", + "def get_model_client() -> OpenAIChatCompletionClient: # type: ignore\n", " \"Mimic OpenAI API using Local LLM Server.\"\n", " return OpenAIChatCompletionClient(\n", " model=\"gpt-4o\", # Need to use one of the OpenAI models as a placeholder for now.\n", @@ -233,7 +233,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.12.7" } }, "nbformat": 4, diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb index dff7d18bd424..61b8b62bc221 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb @@ -41,7 +41,8 @@ "from autogen_core.application import SingleThreadedAgentRuntime\n", "from autogen_core.base import AgentId, MessageContext\n", "from autogen_core.components import RoutedAgent, message_handler\n", - "from autogen_core.components.models import ChatCompletionClient, OpenAIChatCompletionClient, SystemMessage, UserMessage" + "from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n", + "from autogen_ext.models import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb index 3120363dd23b..72b653687915 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb @@ -50,10 +50,10 @@ " AssistantMessage,\n", " ChatCompletionClient,\n", " LLMMessage,\n", - " OpenAIChatCompletionClient,\n", " SystemMessage,\n", " UserMessage,\n", - ")" + ")\n", + "from autogen_ext.models import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb index 183d878e4c8f..ff24095e8b50 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb @@ -161,12 +161,12 @@ "from autogen_core.components.models import (\n", " ChatCompletionClient,\n", " LLMMessage,\n", - " OpenAIChatCompletionClient,\n", " SystemMessage,\n", " UserMessage,\n", ")\n", "from autogen_core.components.tool_agent import ToolAgent, tool_agent_caller_loop\n", "from autogen_core.components.tools import FunctionTool, Tool, ToolSchema\n", + "from autogen_ext.models import OpenAIChatCompletionClient\n", "\n", "\n", "@dataclass\n", diff --git a/python/packages/autogen-core/samples/common/utils.py b/python/packages/autogen-core/samples/common/utils.py index 4e77ac33232e..0765ceec561a 100644 --- a/python/packages/autogen-core/samples/common/utils.py +++ b/python/packages/autogen-core/samples/common/utils.py @@ -3,14 +3,13 @@ from autogen_core.components.models import ( AssistantMessage, - AzureOpenAIChatCompletionClient, ChatCompletionClient, FunctionExecutionResult, FunctionExecutionResultMessage, LLMMessage, - OpenAIChatCompletionClient, UserMessage, ) +from autogen_ext.models import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider from typing_extensions import Literal diff --git a/python/packages/autogen-core/samples/distributed-group-chat/_types.py b/python/packages/autogen-core/samples/distributed-group-chat/_types.py index 178446ca8c62..0e05d941c1ff 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/_types.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/_types.py @@ -4,7 +4,7 @@ from autogen_core.components.models import ( LLMMessage, ) -from autogen_core.components.models.config import AzureOpenAIClientConfiguration +from autogen_ext.models import AzureOpenAIClientConfiguration from pydantic import BaseModel diff --git a/python/packages/autogen-core/samples/distributed-group-chat/_utils.py b/python/packages/autogen-core/samples/distributed-group-chat/_utils.py index 2c4b768e49da..431a94319fc5 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/_utils.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/_utils.py @@ -5,7 +5,7 @@ import yaml from _types import AppConfig from autogen_core.base import MessageSerializer, try_get_known_serializers_for_type -from autogen_core.components.models.config import AzureOpenAIClientConfiguration +from autogen_ext.models import AzureOpenAIClientConfiguration from azure.identity import DefaultAzureCredential, get_bearer_token_provider diff --git a/python/packages/autogen-core/src/autogen_core/components/models/__init__.py b/python/packages/autogen-core/src/autogen_core/components/models/__init__.py index f57c82289ddc..9b12aa702edd 100644 --- a/python/packages/autogen-core/src/autogen_core/components/models/__init__.py +++ b/python/packages/autogen-core/src/autogen_core/components/models/__init__.py @@ -1,7 +1,3 @@ -import importlib -import warnings -from typing import TYPE_CHECKING, Any - from ._model_client import ChatCompletionClient, ModelCapabilities from ._types import ( AssistantMessage, @@ -17,13 +13,7 @@ UserMessage, ) -if TYPE_CHECKING: - from ._openai_client import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient - - __all__ = [ - "AzureOpenAIChatCompletionClient", - "OpenAIChatCompletionClient", "ModelCapabilities", "ChatCompletionClient", "SystemMessage", @@ -38,23 +28,3 @@ "TopLogprob", "ChatCompletionTokenLogprob", ] - - -def __getattr__(name: str) -> Any: - deprecated_classes = { - "AzureOpenAIChatCompletionClient": "autogen_ext.models.AzureOpenAIChatCompletionClient", - "OpenAIChatCompletionClient": "autogen_ext.modelsChatCompletionClient", - } - if name in deprecated_classes: - warnings.warn( - f"{name} moved to autogen_ext. " f"Please import it from {deprecated_classes[name]}.", - FutureWarning, - stacklevel=2, - ) - # Dynamically import the class from the current module - module = importlib.import_module("._openai_client", __name__) - attr = getattr(module, name) - # Cache the attribute in the module's global namespace - globals()[name] = attr - return attr - raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/python/packages/autogen-core/src/autogen_core/components/models/_openai_client.py b/python/packages/autogen-core/src/autogen_core/components/models/_openai_client.py deleted file mode 100644 index 8ce8ddff2cbc..000000000000 --- a/python/packages/autogen-core/src/autogen_core/components/models/_openai_client.py +++ /dev/null @@ -1,901 +0,0 @@ -import asyncio -import inspect -import json -import logging -import math -import re -import warnings -from asyncio import Task -from typing import ( - Any, - AsyncGenerator, - Dict, - List, - Mapping, - Optional, - Sequence, - Set, - Type, - Union, - cast, -) - -import tiktoken -from openai import AsyncAzureOpenAI, AsyncOpenAI -from openai.types.chat import ( - ChatCompletion, - ChatCompletionAssistantMessageParam, - ChatCompletionContentPartParam, - ChatCompletionContentPartTextParam, - ChatCompletionMessageParam, - ChatCompletionMessageToolCallParam, - ChatCompletionRole, - ChatCompletionSystemMessageParam, - ChatCompletionToolMessageParam, - ChatCompletionToolParam, - ChatCompletionUserMessageParam, - ParsedChatCompletion, - ParsedChoice, - completion_create_params, -) -from openai.types.chat.chat_completion import Choice -from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice -from openai.types.shared_params import FunctionDefinition, FunctionParameters -from pydantic import BaseModel -from typing_extensions import Unpack - -from ...application.logging import EVENT_LOGGER_NAME, TRACE_LOGGER_NAME -from ...application.logging.events import LLMCallEvent -from ...base import CancellationToken -from .. import ( - FunctionCall, - Image, -) -from ..tools import Tool, ToolSchema -from . import _model_info -from ._model_client import ChatCompletionClient, ModelCapabilities -from ._types import ( - AssistantMessage, - ChatCompletionTokenLogprob, - CreateResult, - FunctionExecutionResultMessage, - LLMMessage, - RequestUsage, - SystemMessage, - TopLogprob, - UserMessage, -) -from .config import AzureOpenAIClientConfiguration, OpenAIClientConfiguration - -logger = logging.getLogger(EVENT_LOGGER_NAME) -trace_logger = logging.getLogger(TRACE_LOGGER_NAME) - -openai_init_kwargs = set(inspect.getfullargspec(AsyncOpenAI.__init__).kwonlyargs) -aopenai_init_kwargs = set(inspect.getfullargspec(AsyncAzureOpenAI.__init__).kwonlyargs) - -create_kwargs = set(completion_create_params.CompletionCreateParamsBase.__annotations__.keys()) | set( - ("timeout", "stream") -) -# Only single choice allowed -disallowed_create_args = set(["stream", "messages", "function_call", "functions", "n"]) -required_create_args: Set[str] = set(["model"]) - - -def _azure_openai_client_from_config(config: Mapping[str, Any]) -> AsyncAzureOpenAI: - # Take a copy - copied_config = dict(config).copy() - - # Do some fixups - copied_config["azure_deployment"] = copied_config.get("azure_deployment", config.get("model")) - if copied_config["azure_deployment"] is not None: - copied_config["azure_deployment"] = copied_config["azure_deployment"].replace(".", "") - copied_config["azure_endpoint"] = copied_config.get("azure_endpoint", copied_config.pop("base_url", None)) - - # Shave down the config to just the AzureOpenAIChatCompletionClient kwargs - azure_config = {k: v for k, v in copied_config.items() if k in aopenai_init_kwargs} - return AsyncAzureOpenAI(**azure_config) - - -def _openai_client_from_config(config: Mapping[str, Any]) -> AsyncOpenAI: - # Shave down the config to just the OpenAI kwargs - openai_config = {k: v for k, v in config.items() if k in openai_init_kwargs} - return AsyncOpenAI(**openai_config) - - -def _create_args_from_config(config: Mapping[str, Any]) -> Dict[str, Any]: - create_args = {k: v for k, v in config.items() if k in create_kwargs} - create_args_keys = set(create_args.keys()) - if not required_create_args.issubset(create_args_keys): - raise ValueError(f"Required create args are missing: {required_create_args - create_args_keys}") - if disallowed_create_args.intersection(create_args_keys): - raise ValueError(f"Disallowed create args are present: {disallowed_create_args.intersection(create_args_keys)}") - return create_args - - -# TODO check types -# oai_system_message_schema = type2schema(ChatCompletionSystemMessageParam) -# oai_user_message_schema = type2schema(ChatCompletionUserMessageParam) -# oai_assistant_message_schema = type2schema(ChatCompletionAssistantMessageParam) -# oai_tool_message_schema = type2schema(ChatCompletionToolMessageParam) - - -def type_to_role(message: LLMMessage) -> ChatCompletionRole: - if isinstance(message, SystemMessage): - return "system" - elif isinstance(message, UserMessage): - return "user" - elif isinstance(message, AssistantMessage): - return "assistant" - else: - return "tool" - - -def user_message_to_oai(message: UserMessage) -> ChatCompletionUserMessageParam: - assert_valid_name(message.source) - if isinstance(message.content, str): - return ChatCompletionUserMessageParam( - content=message.content, - role="user", - name=message.source, - ) - else: - parts: List[ChatCompletionContentPartParam] = [] - for part in message.content: - if isinstance(part, str): - oai_part = ChatCompletionContentPartTextParam( - text=part, - type="text", - ) - parts.append(oai_part) - elif isinstance(part, Image): - # TODO: support url based images - # TODO: support specifying details - parts.append(part.to_openai_format()) - else: - raise ValueError(f"Unknown content type: {part}") - return ChatCompletionUserMessageParam( - content=parts, - role="user", - name=message.source, - ) - - -def system_message_to_oai(message: SystemMessage) -> ChatCompletionSystemMessageParam: - return ChatCompletionSystemMessageParam( - content=message.content, - role="system", - ) - - -def func_call_to_oai(message: FunctionCall) -> ChatCompletionMessageToolCallParam: - return ChatCompletionMessageToolCallParam( - id=message.id, - function={ - "arguments": message.arguments, - "name": message.name, - }, - type="function", - ) - - -def tool_message_to_oai( - message: FunctionExecutionResultMessage, -) -> Sequence[ChatCompletionToolMessageParam]: - return [ - ChatCompletionToolMessageParam(content=x.content, role="tool", tool_call_id=x.call_id) for x in message.content - ] - - -def assistant_message_to_oai( - message: AssistantMessage, -) -> ChatCompletionAssistantMessageParam: - assert_valid_name(message.source) - if isinstance(message.content, list): - return ChatCompletionAssistantMessageParam( - tool_calls=[func_call_to_oai(x) for x in message.content], - role="assistant", - name=message.source, - ) - else: - return ChatCompletionAssistantMessageParam( - content=message.content, - role="assistant", - name=message.source, - ) - - -def to_oai_type(message: LLMMessage) -> Sequence[ChatCompletionMessageParam]: - if isinstance(message, SystemMessage): - return [system_message_to_oai(message)] - elif isinstance(message, UserMessage): - return [user_message_to_oai(message)] - elif isinstance(message, AssistantMessage): - return [assistant_message_to_oai(message)] - else: - return tool_message_to_oai(message) - - -def calculate_vision_tokens(image: Image, detail: str = "auto") -> int: - MAX_LONG_EDGE = 2048 - BASE_TOKEN_COUNT = 85 - TOKENS_PER_TILE = 170 - MAX_SHORT_EDGE = 768 - TILE_SIZE = 512 - - if detail == "low": - return BASE_TOKEN_COUNT - - width, height = image.image.size - - # Scale down to fit within a MAX_LONG_EDGE x MAX_LONG_EDGE square if necessary - - if width > MAX_LONG_EDGE or height > MAX_LONG_EDGE: - aspect_ratio = width / height - if aspect_ratio > 1: - # Width is greater than height - width = MAX_LONG_EDGE - height = int(MAX_LONG_EDGE / aspect_ratio) - else: - # Height is greater than or equal to width - height = MAX_LONG_EDGE - width = int(MAX_LONG_EDGE * aspect_ratio) - - # Resize such that the shortest side is MAX_SHORT_EDGE if both dimensions exceed MAX_SHORT_EDGE - aspect_ratio = width / height - if width > MAX_SHORT_EDGE and height > MAX_SHORT_EDGE: - if aspect_ratio > 1: - # Width is greater than height - height = MAX_SHORT_EDGE - width = int(MAX_SHORT_EDGE * aspect_ratio) - else: - # Height is greater than or equal to width - width = MAX_SHORT_EDGE - height = int(MAX_SHORT_EDGE / aspect_ratio) - - # Calculate the number of tiles based on TILE_SIZE - - tiles_width = math.ceil(width / TILE_SIZE) - tiles_height = math.ceil(height / TILE_SIZE) - total_tiles = tiles_width * tiles_height - # Calculate the total tokens based on the number of tiles and the base token count - - total_tokens = BASE_TOKEN_COUNT + TOKENS_PER_TILE * total_tiles - - return total_tokens - - -def _add_usage(usage1: RequestUsage, usage2: RequestUsage) -> RequestUsage: - return RequestUsage( - prompt_tokens=usage1.prompt_tokens + usage2.prompt_tokens, - completion_tokens=usage1.completion_tokens + usage2.completion_tokens, - ) - - -def convert_tools( - tools: Sequence[Tool | ToolSchema], -) -> List[ChatCompletionToolParam]: - result: List[ChatCompletionToolParam] = [] - for tool in tools: - if isinstance(tool, Tool): - tool_schema = tool.schema - else: - assert isinstance(tool, dict) - tool_schema = tool - - result.append( - ChatCompletionToolParam( - type="function", - function=FunctionDefinition( - name=tool_schema["name"], - description=(tool_schema["description"] if "description" in tool_schema else ""), - parameters=( - cast(FunctionParameters, tool_schema["parameters"]) if "parameters" in tool_schema else {} - ), - ), - ) - ) - # Check if all tools have valid names. - for tool_param in result: - assert_valid_name(tool_param["function"]["name"]) - return result - - -def normalize_name(name: str) -> str: - """ - LLMs sometimes ask functions while ignoring their own format requirements, this function should be used to replace invalid characters with "_". - - Prefer _assert_valid_name for validating user configuration or input - """ - return re.sub(r"[^a-zA-Z0-9_-]", "_", name)[:64] - - -def assert_valid_name(name: str) -> str: - """ - Ensure that configured names are valid, raises ValueError if not. - - For munging LLM responses use _normalize_name to ensure LLM specified names don't break the API. - """ - if not re.match(r"^[a-zA-Z0-9_-]+$", name): - raise ValueError(f"Invalid name: {name}. Only letters, numbers, '_' and '-' are allowed.") - if len(name) > 64: - raise ValueError(f"Invalid name: {name}. Name must be less than 64 characters.") - return name - - -class BaseOpenAIChatCompletionClient(ChatCompletionClient): - def __init__( - self, - client: Union[AsyncOpenAI, AsyncAzureOpenAI], - create_args: Dict[str, Any], - model_capabilities: Optional[ModelCapabilities] = None, - ): - self._client = client - if model_capabilities is None and isinstance(client, AsyncAzureOpenAI): - raise ValueError("AzureOpenAIChatCompletionClient requires explicit model capabilities") - elif model_capabilities is None: - self._model_capabilities = _model_info.get_capabilities(create_args["model"]) - else: - self._model_capabilities = model_capabilities - - self._resolved_model: Optional[str] = None - if "model" in create_args: - self._resolved_model = _model_info.resolve_model(create_args["model"]) - - if ( - "response_format" in create_args - and create_args["response_format"]["type"] == "json_object" - and not self._model_capabilities["json_output"] - ): - raise ValueError("Model does not support JSON output") - - self._create_args = create_args - self._total_usage = RequestUsage(prompt_tokens=0, completion_tokens=0) - self._actual_usage = RequestUsage(prompt_tokens=0, completion_tokens=0) - - @classmethod - def create_from_config(cls, config: Dict[str, Any]) -> ChatCompletionClient: - return OpenAIChatCompletionClient(**config) - - async def create( - self, - messages: Sequence[LLMMessage], - tools: Sequence[Tool | ToolSchema] = [], - json_output: Optional[bool] = None, - extra_create_args: Mapping[str, Any] = {}, - cancellation_token: Optional[CancellationToken] = None, - ) -> CreateResult: - # Make sure all extra_create_args are valid - extra_create_args_keys = set(extra_create_args.keys()) - if not create_kwargs.issuperset(extra_create_args_keys): - raise ValueError(f"Extra create args are invalid: {extra_create_args_keys - create_kwargs}") - - # Copy the create args and overwrite anything in extra_create_args - create_args = self._create_args.copy() - create_args.update(extra_create_args) - - # Declare use_beta_client - use_beta_client: bool = False - response_format_value: Optional[Type[BaseModel]] = None - - if "response_format" in create_args: - value = create_args["response_format"] - # If value is a Pydantic model class, use the beta client - if isinstance(value, type) and issubclass(value, BaseModel): - response_format_value = value - use_beta_client = True - else: - # response_format_value is not a Pydantic model class - use_beta_client = False - response_format_value = None - - # Remove 'response_format' from create_args to prevent passing it twice - create_args_no_response_format = {k: v for k, v in create_args.items() if k != "response_format"} - - # TODO: allow custom handling. - # For now we raise an error if images are present and vision is not supported - if self.capabilities["vision"] is False: - for message in messages: - if isinstance(message, UserMessage): - if isinstance(message.content, list) and any(isinstance(x, Image) for x in message.content): - raise ValueError("Model does not support vision and image was provided") - - if json_output is not None: - if self.capabilities["json_output"] is False and json_output is True: - raise ValueError("Model does not support JSON output") - - if json_output is True: - create_args["response_format"] = {"type": "json_object"} - else: - create_args["response_format"] = {"type": "text"} - - if self.capabilities["json_output"] is False and json_output is True: - raise ValueError("Model does not support JSON output") - - oai_messages_nested = [to_oai_type(m) for m in messages] - oai_messages = [item for sublist in oai_messages_nested for item in sublist] - - if self.capabilities["function_calling"] is False and len(tools) > 0: - raise ValueError("Model does not support function calling") - future: Union[Task[ParsedChatCompletion[BaseModel]], Task[ChatCompletion]] - if len(tools) > 0: - converted_tools = convert_tools(tools) - if use_beta_client: - # Pass response_format_value if it's not None - if response_format_value is not None: - future = asyncio.ensure_future( - self._client.beta.chat.completions.parse( - messages=oai_messages, - tools=converted_tools, - response_format=response_format_value, - **create_args_no_response_format, - ) - ) - else: - future = asyncio.ensure_future( - self._client.beta.chat.completions.parse( - messages=oai_messages, - tools=converted_tools, - **create_args_no_response_format, - ) - ) - else: - future = asyncio.ensure_future( - self._client.chat.completions.create( - messages=oai_messages, - stream=False, - tools=converted_tools, - **create_args, - ) - ) - else: - if use_beta_client: - if response_format_value is not None: - future = asyncio.ensure_future( - self._client.beta.chat.completions.parse( - messages=oai_messages, - response_format=response_format_value, - **create_args_no_response_format, - ) - ) - else: - future = asyncio.ensure_future( - self._client.beta.chat.completions.parse( - messages=oai_messages, - **create_args_no_response_format, - ) - ) - else: - future = asyncio.ensure_future( - self._client.chat.completions.create( - messages=oai_messages, - stream=False, - **create_args, - ) - ) - - if cancellation_token is not None: - cancellation_token.link_future(future) - result: Union[ParsedChatCompletion[BaseModel], ChatCompletion] = await future - if use_beta_client: - result = cast(ParsedChatCompletion[Any], result) - - if result.usage is not None: - logger.info( - LLMCallEvent( - prompt_tokens=result.usage.prompt_tokens, - completion_tokens=result.usage.completion_tokens, - ) - ) - - usage = RequestUsage( - # TODO backup token counting - prompt_tokens=result.usage.prompt_tokens if result.usage is not None else 0, - completion_tokens=(result.usage.completion_tokens if result.usage is not None else 0), - ) - - if self._resolved_model is not None: - if self._resolved_model != result.model: - warnings.warn( - f"Resolved model mismatch: {self._resolved_model} != {result.model}. Model mapping may be incorrect.", - stacklevel=2, - ) - - # Limited to a single choice currently. - choice: Union[ParsedChoice[Any], ParsedChoice[BaseModel], Choice] = result.choices[0] - if choice.finish_reason == "function_call": - raise ValueError("Function calls are not supported in this context") - - content: Union[str, List[FunctionCall]] - if choice.finish_reason == "tool_calls": - assert choice.message.tool_calls is not None - assert choice.message.function_call is None - - # NOTE: If OAI response type changes, this will need to be updated - content = [ - FunctionCall( - id=x.id, - arguments=x.function.arguments, - name=normalize_name(x.function.name), - ) - for x in choice.message.tool_calls - ] - finish_reason = "function_calls" - else: - finish_reason = choice.finish_reason - content = choice.message.content or "" - logprobs: Optional[List[ChatCompletionTokenLogprob]] = None - if choice.logprobs and choice.logprobs.content: - logprobs = [ - ChatCompletionTokenLogprob( - token=x.token, - logprob=x.logprob, - top_logprobs=[TopLogprob(logprob=y.logprob, bytes=y.bytes) for y in x.top_logprobs], - bytes=x.bytes, - ) - for x in choice.logprobs.content - ] - response = CreateResult( - finish_reason=finish_reason, # type: ignore - content=content, - usage=usage, - cached=False, - logprobs=logprobs, - ) - - _add_usage(self._actual_usage, usage) - _add_usage(self._total_usage, usage) - - # TODO - why is this cast needed? - return response - - async def create_stream( - self, - messages: Sequence[LLMMessage], - tools: Sequence[Tool | ToolSchema] = [], - json_output: Optional[bool] = None, - extra_create_args: Mapping[str, Any] = {}, - cancellation_token: Optional[CancellationToken] = None, - ) -> AsyncGenerator[Union[str, CreateResult], None]: - """ - Creates an AsyncGenerator that will yield a stream of chat completions based on the provided messages and tools. - - Args: - messages (Sequence[LLMMessage]): A sequence of messages to be processed. - tools (Sequence[Tool | ToolSchema], optional): A sequence of tools to be used in the completion. Defaults to `[]`. - json_output (Optional[bool], optional): If True, the output will be in JSON format. Defaults to None. - extra_create_args (Mapping[str, Any], optional): Additional arguments for the creation process. Default to `{}`. - cancellation_token (Optional[CancellationToken], optional): A token to cancel the operation. Defaults to None. - - Yields: - AsyncGenerator[Union[str, CreateResult], None]: A generator yielding the completion results as they are produced. - - In streaming, the default behaviour is not return token usage counts. See: [OpenAI API reference for possible args](https://platform.openai.com/docs/api-reference/chat/create). - However `extra_create_args={"stream_options": {"include_usage": True}}` will (if supported by the accessed API) - return a final chunk with usage set to a RequestUsage object having prompt and completion token counts, - all preceding chunks will have usage as None. See: [stream_options](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream_options). - - Other examples of OPENAI supported arguments that can be included in `extra_create_args`: - - `temperature` (float): Controls the randomness of the output. Higher values (e.g., 0.8) make the output more random, while lower values (e.g., 0.2) make it more focused and deterministic. - - `max_tokens` (int): The maximum number of tokens to generate in the completion. - - `top_p` (float): An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - - `frequency_penalty` (float): A value between -2.0 and 2.0 that penalizes new tokens based on their existing frequency in the text so far, decreasing the likelihood of repeated phrases. - - `presence_penalty` (float): A value between -2.0 and 2.0 that penalizes new tokens based on whether they appear in the text so far, encouraging the model to talk about new topics. - """ - # Make sure all extra_create_args are valid - extra_create_args_keys = set(extra_create_args.keys()) - if not create_kwargs.issuperset(extra_create_args_keys): - raise ValueError(f"Extra create args are invalid: {extra_create_args_keys - create_kwargs}") - - # Copy the create args and overwrite anything in extra_create_args - create_args = self._create_args.copy() - create_args.update(extra_create_args) - - oai_messages_nested = [to_oai_type(m) for m in messages] - oai_messages = [item for sublist in oai_messages_nested for item in sublist] - - # TODO: allow custom handling. - # For now we raise an error if images are present and vision is not supported - if self.capabilities["vision"] is False: - for message in messages: - if isinstance(message, UserMessage): - if isinstance(message.content, list) and any(isinstance(x, Image) for x in message.content): - raise ValueError("Model does not support vision and image was provided") - - if json_output is not None: - if self.capabilities["json_output"] is False and json_output is True: - raise ValueError("Model does not support JSON output") - - if json_output is True: - create_args["response_format"] = {"type": "json_object"} - else: - create_args["response_format"] = {"type": "text"} - - if len(tools) > 0: - converted_tools = convert_tools(tools) - stream_future = asyncio.ensure_future( - self._client.chat.completions.create( - messages=oai_messages, - stream=True, - tools=converted_tools, - **create_args, - ) - ) - else: - stream_future = asyncio.ensure_future( - self._client.chat.completions.create(messages=oai_messages, stream=True, **create_args) - ) - if cancellation_token is not None: - cancellation_token.link_future(stream_future) - stream = await stream_future - choice: Union[ParsedChoice[Any], ParsedChoice[BaseModel], ChunkChoice] = cast(ChunkChoice, None) - chunk = None - stop_reason = None - maybe_model = None - content_deltas: List[str] = [] - full_tool_calls: Dict[int, FunctionCall] = {} - completion_tokens = 0 - logprobs: Optional[List[ChatCompletionTokenLogprob]] = None - while True: - try: - chunk_future = asyncio.ensure_future(anext(stream)) - if cancellation_token is not None: - cancellation_token.link_future(chunk_future) - chunk = await chunk_future - - # to process usage chunk in streaming situations - # add stream_options={"include_usage": True} in the initialization of OpenAIChatCompletionClient(...) - # However the different api's - # OPENAI api usage chunk produces no choices so need to check if there is a choice - # liteLLM api usage chunk does produce choices - choice = ( - chunk.choices[0] - if len(chunk.choices) > 0 - else choice - if chunk.usage is not None and stop_reason is not None - else cast(ChunkChoice, None) - ) - - # for liteLLM chunk usage, do the following hack keeping the pervious chunk.stop_reason (if set). - # set the stop_reason for the usage chunk to the prior stop_reason - stop_reason = choice.finish_reason if chunk.usage is None and stop_reason is None else stop_reason - maybe_model = chunk.model - # First try get content - if choice.delta.content is not None: - content_deltas.append(choice.delta.content) - if len(choice.delta.content) > 0: - yield choice.delta.content - continue - - # Otherwise, get tool calls - if choice.delta.tool_calls is not None: - for tool_call_chunk in choice.delta.tool_calls: - idx = tool_call_chunk.index - if idx not in full_tool_calls: - # We ignore the type hint here because we want to fill in type when the delta provides it - full_tool_calls[idx] = FunctionCall(id="", arguments="", name="") - - if tool_call_chunk.id is not None: - full_tool_calls[idx].id += tool_call_chunk.id - - if tool_call_chunk.function is not None: - if tool_call_chunk.function.name is not None: - full_tool_calls[idx].name += tool_call_chunk.function.name - if tool_call_chunk.function.arguments is not None: - full_tool_calls[idx].arguments += tool_call_chunk.function.arguments - if choice.logprobs and choice.logprobs.content: - logprobs = [ - ChatCompletionTokenLogprob( - token=x.token, - logprob=x.logprob, - top_logprobs=[TopLogprob(logprob=y.logprob, bytes=y.bytes) for y in x.top_logprobs], - bytes=x.bytes, - ) - for x in choice.logprobs.content - ] - - except StopAsyncIteration: - break - - model = maybe_model or create_args["model"] - model = model.replace("gpt-35", "gpt-3.5") # hack for Azure API - - if chunk and chunk.usage: - prompt_tokens = chunk.usage.prompt_tokens - else: - prompt_tokens = 0 - - if stop_reason is None: - raise ValueError("No stop reason found") - - content: Union[str, List[FunctionCall]] - if len(content_deltas) > 1: - content = "".join(content_deltas) - if chunk and chunk.usage: - completion_tokens = chunk.usage.completion_tokens - else: - completion_tokens = 0 - else: - completion_tokens = 0 - # TODO: fix assumption that dict values were added in order and actually order by int index - # for tool_call in full_tool_calls.values(): - # # value = json.dumps(tool_call) - # # completion_tokens += count_token(value, model=model) - # completion_tokens += 0 - content = list(full_tool_calls.values()) - - usage = RequestUsage( - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - ) - if stop_reason == "function_call": - raise ValueError("Function calls are not supported in this context") - if stop_reason == "tool_calls": - stop_reason = "function_calls" - - result = CreateResult( - finish_reason=stop_reason, # type: ignore - content=content, - usage=usage, - cached=False, - logprobs=logprobs, - ) - - _add_usage(self._actual_usage, usage) - _add_usage(self._total_usage, usage) - - yield result - - def actual_usage(self) -> RequestUsage: - return self._actual_usage - - def total_usage(self) -> RequestUsage: - return self._total_usage - - def count_tokens(self, messages: Sequence[LLMMessage], tools: Sequence[Tool | ToolSchema] = []) -> int: - model = self._create_args["model"] - try: - encoding = tiktoken.encoding_for_model(model) - except KeyError: - trace_logger.warning(f"Model {model} not found. Using cl100k_base encoding.") - encoding = tiktoken.get_encoding("cl100k_base") - tokens_per_message = 3 - tokens_per_name = 1 - num_tokens = 0 - - # Message tokens. - for message in messages: - num_tokens += tokens_per_message - oai_message = to_oai_type(message) - for oai_message_part in oai_message: - for key, value in oai_message_part.items(): - if value is None: - continue - - if isinstance(message, UserMessage) and isinstance(value, list): - typed_message_value = cast(List[ChatCompletionContentPartParam], value) - - assert len(typed_message_value) == len( - message.content - ), "Mismatch in message content and typed message value" - - # We need image properties that are only in the original message - for part, content_part in zip(typed_message_value, message.content, strict=False): - if isinstance(content_part, Image): - # TODO: add detail parameter - num_tokens += calculate_vision_tokens(content_part) - elif isinstance(part, str): - num_tokens += len(encoding.encode(part)) - else: - try: - serialized_part = json.dumps(part) - num_tokens += len(encoding.encode(serialized_part)) - except TypeError: - trace_logger.warning(f"Could not convert {part} to string, skipping.") - else: - if not isinstance(value, str): - try: - value = json.dumps(value) - except TypeError: - trace_logger.warning(f"Could not convert {value} to string, skipping.") - continue - num_tokens += len(encoding.encode(value)) - if key == "name": - num_tokens += tokens_per_name - num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> - - # Tool tokens. - oai_tools = convert_tools(tools) - for tool in oai_tools: - function = tool["function"] - tool_tokens = len(encoding.encode(function["name"])) - if "description" in function: - tool_tokens += len(encoding.encode(function["description"])) - tool_tokens -= 2 - if "parameters" in function: - parameters = function["parameters"] - if "properties" in parameters: - assert isinstance(parameters["properties"], dict) - for propertiesKey in parameters["properties"]: # pyright: ignore - assert isinstance(propertiesKey, str) - tool_tokens += len(encoding.encode(propertiesKey)) - v = parameters["properties"][propertiesKey] # pyright: ignore - for field in v: # pyright: ignore - if field == "type": - tool_tokens += 2 - tool_tokens += len(encoding.encode(v["type"])) # pyright: ignore - elif field == "description": - tool_tokens += 2 - tool_tokens += len(encoding.encode(v["description"])) # pyright: ignore - elif field == "enum": - tool_tokens -= 3 - for o in v["enum"]: # pyright: ignore - tool_tokens += 3 - tool_tokens += len(encoding.encode(o)) # pyright: ignore - else: - trace_logger.warning(f"Not supported field {field}") - tool_tokens += 11 - if len(parameters["properties"]) == 0: # pyright: ignore - tool_tokens -= 2 - num_tokens += tool_tokens - num_tokens += 12 - return num_tokens - - def remaining_tokens(self, messages: Sequence[LLMMessage], tools: Sequence[Tool | ToolSchema] = []) -> int: - token_limit = _model_info.get_token_limit(self._create_args["model"]) - return token_limit - self.count_tokens(messages, tools) - - @property - def capabilities(self) -> ModelCapabilities: - return self._model_capabilities - - -class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): - def __init__(self, **kwargs: Unpack[OpenAIClientConfiguration]): - if "model" not in kwargs: - raise ValueError("model is required for OpenAIChatCompletionClient") - - model_capabilities: Optional[ModelCapabilities] = None - copied_args = dict(kwargs).copy() - if "model_capabilities" in kwargs: - model_capabilities = kwargs["model_capabilities"] - del copied_args["model_capabilities"] - - client = _openai_client_from_config(copied_args) - create_args = _create_args_from_config(copied_args) - self._raw_config = copied_args - super().__init__(client, create_args, model_capabilities) - - def __getstate__(self) -> Dict[str, Any]: - state = self.__dict__.copy() - state["_client"] = None - return state - - def __setstate__(self, state: Dict[str, Any]) -> None: - self.__dict__.update(state) - self._client = _openai_client_from_config(state["_raw_config"]) - - -class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): - def __init__(self, **kwargs: Unpack[AzureOpenAIClientConfiguration]): - if "model" not in kwargs: - raise ValueError("model is required for OpenAIChatCompletionClient") - - model_capabilities: Optional[ModelCapabilities] = None - copied_args = dict(kwargs).copy() - if "model_capabilities" in kwargs: - model_capabilities = kwargs["model_capabilities"] - del copied_args["model_capabilities"] - - client = _azure_openai_client_from_config(copied_args) - create_args = _create_args_from_config(copied_args) - self._raw_config = copied_args - super().__init__(client, create_args, model_capabilities) - - def __getstate__(self) -> Dict[str, Any]: - state = self.__dict__.copy() - state["_client"] = None - return state - - def __setstate__(self, state: Dict[str, Any]) -> None: - self.__dict__.update(state) - self._client = _azure_openai_client_from_config(state["_raw_config"]) diff --git a/python/packages/autogen-core/src/autogen_core/components/models/config/__init__.py b/python/packages/autogen-core/src/autogen_core/components/models/config/__init__.py deleted file mode 100644 index d1edcf8c62f9..000000000000 --- a/python/packages/autogen-core/src/autogen_core/components/models/config/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -from typing import Awaitable, Callable, Dict, List, Literal, Optional, Union - -from typing_extensions import Required, TypedDict - -from .._model_client import ModelCapabilities - - -class ResponseFormat(TypedDict): - type: Literal["text", "json_object"] - - -class CreateArguments(TypedDict, total=False): - frequency_penalty: Optional[float] - logit_bias: Optional[Dict[str, int]] - max_tokens: Optional[int] - n: Optional[int] - presence_penalty: Optional[float] - response_format: ResponseFormat - seed: Optional[int] - stop: Union[Optional[str], List[str]] - temperature: Optional[float] - top_p: Optional[float] - user: str - - -AsyncAzureADTokenProvider = Callable[[], Union[str, Awaitable[str]]] - - -class BaseOpenAIClientConfiguration(CreateArguments, total=False): - model: str - api_key: str - timeout: Union[float, None] - max_retries: int - - -# See OpenAI docs for explanation of these parameters -class OpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False): - organization: str - base_url: str - # Not required - model_capabilities: ModelCapabilities - - -class AzureOpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False): - # Azure specific - azure_endpoint: Required[str] - azure_deployment: str - api_version: Required[str] - azure_ad_token: str - azure_ad_token_provider: AsyncAzureADTokenProvider - # Must be provided - model_capabilities: Required[ModelCapabilities] diff --git a/python/packages/autogen-core/tests/test_tool_agent.py b/python/packages/autogen-core/tests/test_tool_agent.py index 322fdf6b7941..6184e9c78c83 100644 --- a/python/packages/autogen-core/tests/test_tool_agent.py +++ b/python/packages/autogen-core/tests/test_tool_agent.py @@ -1,6 +1,6 @@ import asyncio import json -from typing import Any, AsyncGenerator, List +from typing import Any, AsyncGenerator, List, Mapping, Optional, Sequence, Union import pytest from autogen_core.application import SingleThreadedAgentRuntime @@ -8,9 +8,13 @@ from autogen_core.components import FunctionCall from autogen_core.components.models import ( AssistantMessage, + ChatCompletionClient, + CreateResult, FunctionExecutionResult, FunctionExecutionResultMessage, - OpenAIChatCompletionClient, + LLMMessage, + ModelCapabilities, + RequestUsage, UserMessage, ) from autogen_core.components.tool_agent import ( @@ -20,13 +24,7 @@ ToolNotFoundException, tool_agent_caller_loop, ) -from autogen_core.components.tools import FunctionTool, Tool -from openai.resources.chat.completions import AsyncCompletions -from openai.types.chat.chat_completion import ChatCompletion, Choice -from openai.types.chat.chat_completion_chunk import ChatCompletionChunk -from openai.types.chat.chat_completion_message import ChatCompletionMessage -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall, Function -from openai.types.completion_usage import CompletionUsage +from autogen_core.components.tools import FunctionTool, Tool, ToolSchema def _pass_function(input: str) -> str: @@ -42,60 +40,6 @@ async def _async_sleep_function(input: str) -> str: return "pass" -class _MockChatCompletion: - def __init__(self, model: str = "gpt-4o") -> None: - self._saved_chat_completions: List[ChatCompletion] = [ - ChatCompletion( - id="id1", - choices=[ - Choice( - finish_reason="tool_calls", - index=0, - message=ChatCompletionMessage( - content=None, - tool_calls=[ - ChatCompletionMessageToolCall( - id="1", - type="function", - function=Function( - name="pass", - arguments=json.dumps({"input": "pass"}), - ), - ) - ], - role="assistant", - ), - ) - ], - created=0, - model=model, - object="chat.completion", - usage=CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0), - ), - ChatCompletion( - id="id2", - choices=[ - Choice( - finish_reason="stop", index=0, message=ChatCompletionMessage(content="Hello", role="assistant") - ) - ], - created=0, - model=model, - object="chat.completion", - usage=CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0), - ), - ] - self._curr_index = 0 - - async def mock_create( - self, *args: Any, **kwargs: Any - ) -> ChatCompletion | AsyncGenerator[ChatCompletionChunk, None]: - await asyncio.sleep(0.1) - completion = self._saved_chat_completions[self._curr_index] - self._curr_index += 1 - return completion - - @pytest.mark.asyncio async def test_tool_agent() -> None: runtime = SingleThreadedAgentRuntime() @@ -144,10 +88,59 @@ async def test_tool_agent() -> None: @pytest.mark.asyncio -async def test_caller_loop(monkeypatch: pytest.MonkeyPatch) -> None: - mock = _MockChatCompletion(model="gpt-4o-2024-05-13") - monkeypatch.setattr(AsyncCompletions, "create", mock.mock_create) - client = OpenAIChatCompletionClient(model="gpt-4o-2024-05-13", api_key="api_key") +async def test_caller_loop() -> None: + class MockChatCompletionClient(ChatCompletionClient): + async def create( + self, + messages: Sequence[LLMMessage], + tools: Sequence[Tool | ToolSchema] = [], + json_output: Optional[bool] = None, + extra_create_args: Mapping[str, Any] = {}, + cancellation_token: Optional[CancellationToken] = None, + ) -> CreateResult: + if len(messages) == 1: + return CreateResult( + content=[FunctionCall(id="1", name="pass", arguments=json.dumps({"input": "test"}))], + finish_reason="stop", + usage=RequestUsage(prompt_tokens=0, completion_tokens=0), + cached=False, + logprobs=None, + ) + return CreateResult( + content="Done", + finish_reason="stop", + usage=RequestUsage(prompt_tokens=0, completion_tokens=0), + cached=False, + logprobs=None, + ) + + def create_stream( + self, + messages: Sequence[LLMMessage], + tools: Sequence[Tool | ToolSchema] = [], + json_output: Optional[bool] = None, + extra_create_args: Mapping[str, Any] = {}, + cancellation_token: Optional[CancellationToken] = None, + ) -> AsyncGenerator[Union[str, CreateResult], None]: + raise NotImplementedError() + + def actual_usage(self) -> RequestUsage: + return RequestUsage(prompt_tokens=0, completion_tokens=0) + + def total_usage(self) -> RequestUsage: + return RequestUsage(prompt_tokens=0, completion_tokens=0) + + def count_tokens(self, messages: Sequence[LLMMessage], tools: Sequence[Tool | ToolSchema] = []) -> int: + return 0 + + def remaining_tokens(self, messages: Sequence[LLMMessage], tools: Sequence[Tool | ToolSchema] = []) -> int: + return 0 + + @property + def capabilities(self) -> ModelCapabilities: + return ModelCapabilities(vision=False, function_calling=True, json_output=False) + + client = MockChatCompletionClient() tools: List[Tool] = [FunctionTool(_pass_function, name="pass", description="Pass function")] runtime = SingleThreadedAgentRuntime() await runtime.register( diff --git a/python/packages/autogen-core/tests/test_tools.py b/python/packages/autogen-core/tests/test_tools.py index 70ec08469706..27a89748c659 100644 --- a/python/packages/autogen-core/tests/test_tools.py +++ b/python/packages/autogen-core/tests/test_tools.py @@ -4,7 +4,6 @@ import pytest from autogen_core.base import CancellationToken from autogen_core.components._function_utils import get_typed_signature -from autogen_core.components.models._openai_client import convert_tools from autogen_core.components.tools import BaseTool, FunctionTool from autogen_core.components.tools._base import ToolSchema from pydantic import BaseModel, Field, model_serializer @@ -323,29 +322,6 @@ def my_function(arg: int) -> int: assert tool.return_value_as_string(result) == "5" -def test_convert_tools_accepts_both_func_tool_and_schema() -> None: - def my_function(arg: str, other: Annotated[int, "int arg"], nonrequired: int = 5) -> MyResult: - return MyResult(result="test") - - tool = FunctionTool(my_function, description="Function tool.") - schema = tool.schema - - converted_tool_schema = convert_tools([tool, schema]) - - assert len(converted_tool_schema) == 2 - assert converted_tool_schema[0] == converted_tool_schema[1] - - -def test_convert_tools_accepts_both_tool_and_schema() -> None: - tool = MyTool() - schema = tool.schema - - converted_tool_schema = convert_tools([tool, schema]) - - assert len(converted_tool_schema) == 2 - assert converted_tool_schema[0] == converted_tool_schema[1] - - @pytest.mark.asyncio async def test_func_tool_return_list() -> None: def my_function() -> List[int]: diff --git a/python/packages/autogen-ext/src/autogen_ext/code_executor/aca_dynamic_sessions/__init__.py b/python/packages/autogen-ext/src/autogen_ext/code_executor/aca_dynamic_sessions/__init__.py deleted file mode 100644 index 009997c41abc..000000000000 --- a/python/packages/autogen-ext/src/autogen_ext/code_executor/aca_dynamic_sessions/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -import warnings -from typing import Any - -from ...code_executors import ACADynamicSessionsCodeExecutor - - -class AzureContainerCodeExecutor(ACADynamicSessionsCodeExecutor): - """AzureContainerCodeExecutor has been renamed and moved to autogen_ext.code_executors.ACADynamicSessionsCodeExecutor""" - - def __init__(self, *args: Any, **kwargs: Any) -> None: - warnings.warn( - "AzureContainerCodeExecutor has been renamed and moved to autogen_ext.code_executors.ACADynamicSessionsCodeExecutor", - DeprecationWarning, - stacklevel=2, - ) - super().__init__(*args, **kwargs) - - -__all__ = [ - "AzureContainerCodeExecutor", -] diff --git a/python/packages/autogen-ext/src/autogen_ext/code_executor/docker_executor/__init__.py b/python/packages/autogen-ext/src/autogen_ext/code_executor/docker_executor/__init__.py deleted file mode 100644 index 66719114300d..000000000000 --- a/python/packages/autogen-ext/src/autogen_ext/code_executor/docker_executor/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -import warnings - -from ...code_executors import DockerCommandLineCodeExecutor - -warnings.warn( - "DockerCommandLineCodeExecutor moved to autogen_ext.code_executors.DockerCommandLineCodeExecutor", - DeprecationWarning, - stacklevel=2, -) - -__all__ = ["DockerCommandLineCodeExecutor"] diff --git a/python/packages/autogen-ext/src/autogen_ext/models/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/__init__.py index d39c1d9bf247..80533f80575e 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/__init__.py @@ -2,6 +2,13 @@ AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient, ) +from ._openai.config import AzureOpenAIClientConfiguration, OpenAIClientConfiguration from ._reply_chat_completion_client import ReplayChatCompletionClient -__all__ = ["AzureOpenAIChatCompletionClient", "OpenAIChatCompletionClient", "ReplayChatCompletionClient"] +__all__ = [ + "AzureOpenAIClientConfiguration", + "AzureOpenAIChatCompletionClient", + "OpenAIClientConfiguration", + "OpenAIChatCompletionClient", + "ReplayChatCompletionClient", +] diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py index b6729a70d11e..53abfcc58796 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py @@ -49,3 +49,6 @@ class AzureOpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False) azure_ad_token_provider: AsyncAzureADTokenProvider # Must be provided model_capabilities: Required[ModelCapabilities] + + +__all__ = ["AzureOpenAIClientConfiguration", "OpenAIClientConfiguration"] diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/__init__.py b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/__init__.py deleted file mode 100644 index 4d401fc7ef1f..000000000000 --- a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -import warnings - -from ...tools import LangChainToolAdapter - -warnings.warn("LangChainToolAdapter moved to autogen_ext.tools.LangChainToolAdapter", DeprecationWarning, stacklevel=2) - -__all__ = ["LangChainToolAdapter"] diff --git a/python/packages/autogen-ext/tests/models/test_openai_model_client.py b/python/packages/autogen-ext/tests/models/test_openai_model_client.py index a51e33c0234a..cee3be5835b7 100644 --- a/python/packages/autogen-ext/tests/models/test_openai_model_client.py +++ b/python/packages/autogen-ext/tests/models/test_openai_model_client.py @@ -1,5 +1,5 @@ import asyncio -from typing import Any, AsyncGenerator, List, Tuple +from typing import Annotated, Any, AsyncGenerator, List, Tuple from unittest.mock import MagicMock import pytest @@ -15,17 +15,25 @@ SystemMessage, UserMessage, ) -from autogen_core.components.tools import FunctionTool +from autogen_core.components.tools import BaseTool, FunctionTool from autogen_ext.models import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient from autogen_ext.models._openai._model_info import resolve_model -from autogen_ext.models._openai._openai_client import calculate_vision_tokens +from autogen_ext.models._openai._openai_client import calculate_vision_tokens, convert_tools from openai.resources.chat.completions import AsyncCompletions from openai.types.chat.chat_completion import ChatCompletion, Choice from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice from openai.types.chat.chat_completion_message import ChatCompletionMessage from openai.types.completion_usage import CompletionUsage -from pydantic import BaseModel +from pydantic import BaseModel, Field + + +class MyResult(BaseModel): + result: str = Field(description="The other description.") + + +class MyArgs(BaseModel): + query: str = Field(description="The description.") class MockChunkDefinition(BaseModel): @@ -302,3 +310,38 @@ def test_openai_count_image_tokens(mock_size: Tuple[int, int], expected_num_toke # Directly call calculate_vision_tokens and check the result calculated_tokens = calculate_vision_tokens(mock_image, detail="auto") assert calculated_tokens == expected_num_tokens + + +def test_convert_tools_accepts_both_func_tool_and_schema() -> None: + def my_function(arg: str, other: Annotated[int, "int arg"], nonrequired: int = 5) -> MyResult: + return MyResult(result="test") + + tool = FunctionTool(my_function, description="Function tool.") + schema = tool.schema + + converted_tool_schema = convert_tools([tool, schema]) + + assert len(converted_tool_schema) == 2 + assert converted_tool_schema[0] == converted_tool_schema[1] + + +def test_convert_tools_accepts_both_tool_and_schema() -> None: + class MyTool(BaseTool[MyArgs, MyResult]): + def __init__(self) -> None: + super().__init__( + args_type=MyArgs, + return_type=MyResult, + name="TestTool", + description="Description of test tool.", + ) + + async def run(self, args: MyArgs, cancellation_token: CancellationToken) -> MyResult: + return MyResult(result="value") + + tool = MyTool() + schema = tool.schema + + converted_tool_schema = convert_tools([tool, schema]) + + assert len(converted_tool_schema) == 2 + assert converted_tool_schema[0] == converted_tool_schema[1] From 1e0b254d0aa011712328ac7f40b7788e37e63510 Mon Sep 17 00:00:00 2001 From: gagb Date: Fri, 22 Nov 2024 14:06:04 -0500 Subject: [PATCH 09/33] Update README.md with link to clarifications statement (#4318) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 71509427170e..b5bda7de8e0e 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ # AutoGen > [!IMPORTANT] -> +> - (11/14/24) ⚠️ In response to a number of asks to clarify and distinguish between official AutoGen and its forks that created confusion, we issued a [clarification statement](https://github.com/microsoft/autogen/discussions/4217). > - (10/13/24) Interested in the standard AutoGen as a prior user? Find it at the actively-maintained *AutoGen* [0.2 branch](https://github.com/microsoft/autogen/tree/0.2) and `autogen-agentchat~=0.2` PyPi package. > - (10/02/24) [AutoGen 0.4](https://microsoft.github.io/autogen/dev) is a from-the-ground-up rewrite of AutoGen. Learn more about the history, goals and future at [this blog post](https://microsoft.github.io/autogen/blog). We’re excited to work with the community to gather feedback, refine, and improve the project before we officially release 0.4. This is a big change, so AutoGen 0.2 is still available, maintained, and developed in the [0.2 branch](https://github.com/microsoft/autogen/tree/0.2). From 8f4d8c89c3cceaf18540f1b4904ada13bc120393 Mon Sep 17 00:00:00 2001 From: Xiaoyun Zhang Date: Fri, 22 Nov 2024 13:51:08 -0800 Subject: [PATCH 10/33] .NET add roleplay tool call orchestrator in AutoGen.OpenAI (#4323) * add roleplay tool call orchestrator * add chinese business workflow test * update --- .../src/AutoGen.OpenAI/AutoGen.OpenAI.csproj | 1 + .../RolePlayToolCallOrchestrator.cs | 133 +++++++++ .../RolePlayToolCallOrchestratorTests.cs | 269 ++++++++++++++++++ 3 files changed, 403 insertions(+) create mode 100644 dotnet/src/AutoGen.OpenAI/Orchestrator/RolePlayToolCallOrchestrator.cs create mode 100644 dotnet/test/AutoGen.OpenAI.Tests/RolePlayToolCallOrchestratorTests.cs diff --git a/dotnet/src/AutoGen.OpenAI/AutoGen.OpenAI.csproj b/dotnet/src/AutoGen.OpenAI/AutoGen.OpenAI.csproj index 7f00b63be86c..70c0f2b0d1ce 100644 --- a/dotnet/src/AutoGen.OpenAI/AutoGen.OpenAI.csproj +++ b/dotnet/src/AutoGen.OpenAI/AutoGen.OpenAI.csproj @@ -18,6 +18,7 @@ + diff --git a/dotnet/src/AutoGen.OpenAI/Orchestrator/RolePlayToolCallOrchestrator.cs b/dotnet/src/AutoGen.OpenAI/Orchestrator/RolePlayToolCallOrchestrator.cs new file mode 100644 index 000000000000..f088e1748e66 --- /dev/null +++ b/dotnet/src/AutoGen.OpenAI/Orchestrator/RolePlayToolCallOrchestrator.cs @@ -0,0 +1,133 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// RolePlayToolCallOrchestrator.cs + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using AutoGen.OpenAI.Extension; +using OpenAI.Chat; + +namespace AutoGen.OpenAI.Orchestrator; + +/// +/// Orchestrating group chat using role play tool call +/// +public partial class RolePlayToolCallOrchestrator : IOrchestrator +{ + public readonly ChatClient chatClient; + private readonly Graph? workflow; + + public RolePlayToolCallOrchestrator(ChatClient chatClient, Graph? workflow = null) + { + this.chatClient = chatClient; + this.workflow = workflow; + } + + public async Task GetNextSpeakerAsync( + OrchestrationContext context, + CancellationToken cancellationToken = default) + { + var candidates = context.Candidates.ToList(); + + if (candidates.Count == 0) + { + return null; + } + + if (candidates.Count == 1) + { + return candidates.First(); + } + + // if there's a workflow + // and the next available agent from the workflow is in the group chat + // then return the next agent from the workflow + if (this.workflow != null) + { + var lastMessage = context.ChatHistory.LastOrDefault(); + if (lastMessage == null) + { + return null; + } + var currentSpeaker = candidates.First(candidates => candidates.Name == lastMessage.From); + var nextAgents = await this.workflow.TransitToNextAvailableAgentsAsync(currentSpeaker, context.ChatHistory, cancellationToken); + nextAgents = nextAgents.Where(nextAgent => candidates.Any(candidate => candidate.Name == nextAgent.Name)); + candidates = nextAgents.ToList(); + if (!candidates.Any()) + { + return null; + } + + if (candidates is { Count: 1 }) + { + return candidates.First(); + } + } + + // In this case, since there are more than one available agents from the workflow for the next speaker + // We need to invoke LLM to select the next speaker via select next speaker function + + var chatHistoryStringBuilder = new StringBuilder(); + foreach (var message in context.ChatHistory) + { + var chatHistoryPrompt = $"{message.From}: {message.GetContent()}"; + + chatHistoryStringBuilder.AppendLine(chatHistoryPrompt); + } + + var chatHistory = chatHistoryStringBuilder.ToString(); + + var prompt = $""" + # Task: Select the next speaker + + You are in a role-play game. Carefully read the conversation history and select the next speaker from the available roles. + + # Conversation + {chatHistory} + + # Available roles + - {string.Join(",", candidates.Select(candidate => candidate.Name))} + + Select the next speaker from the available roles and provide a reason for your selection. + """; + + // enforce the next speaker to be selected by the LLM + var option = new ChatCompletionOptions + { + ToolChoice = ChatToolChoice.CreateFunctionChoice(this.SelectNextSpeakerFunctionContract.Name), + }; + + option.Tools.Add(this.SelectNextSpeakerFunctionContract.ToChatTool()); + var toolCallMiddleware = new FunctionCallMiddleware( + functions: [this.SelectNextSpeakerFunctionContract], + functionMap: new Dictionary>> + { + [this.SelectNextSpeakerFunctionContract.Name] = this.SelectNextSpeakerWrapper, + }); + + var selectAgent = new OpenAIChatAgent( + chatClient, + "admin", + option) + .RegisterMessageConnector() + .RegisterMiddleware(toolCallMiddleware); + + var reply = await selectAgent.SendAsync(prompt); + + var nextSpeaker = candidates.FirstOrDefault(candidate => candidate.Name == reply.GetContent()); + + return nextSpeaker; + } + + /// + /// Select the next speaker by name and reason + /// + [Function] + public async Task SelectNextSpeaker(string name, string reason) + { + return name; + } +} diff --git a/dotnet/test/AutoGen.OpenAI.Tests/RolePlayToolCallOrchestratorTests.cs b/dotnet/test/AutoGen.OpenAI.Tests/RolePlayToolCallOrchestratorTests.cs new file mode 100644 index 000000000000..807bf41e9479 --- /dev/null +++ b/dotnet/test/AutoGen.OpenAI.Tests/RolePlayToolCallOrchestratorTests.cs @@ -0,0 +1,269 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// RolePlayToolCallOrchestratorTests.cs + +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using AutoGen.OpenAI.Orchestrator; +using AutoGen.Tests; +using Azure.AI.OpenAI; +using FluentAssertions; +using Moq; +using OpenAI; +using OpenAI.Chat; +using Xunit; + +namespace AutoGen.OpenAI.Tests; + +public class RolePlayToolCallOrchestratorTests +{ + [Fact] + public async Task ItReturnNullWhenNoCandidateIsAvailableAsync() + { + var chatClient = Mock.Of(); + var orchestrator = new RolePlayToolCallOrchestrator(chatClient); + var context = new OrchestrationContext + { + Candidates = [], + ChatHistory = [], + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker.Should().BeNull(); + } + + [Fact] + public async Task ItReturnCandidateWhenOnlyOneCandidateIsAvailableAsync() + { + var chatClient = Mock.Of(); + var alice = new EchoAgent("Alice"); + var orchestrator = new RolePlayToolCallOrchestrator(chatClient); + var context = new OrchestrationContext + { + Candidates = [alice], + ChatHistory = [], + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker.Should().Be(alice); + } + + [Fact] + public async Task ItSelectNextSpeakerFromWorkflowIfProvided() + { + var workflow = new Graph(); + var alice = new EchoAgent("Alice"); + var bob = new EchoAgent("Bob"); + var charlie = new EchoAgent("Charlie"); + workflow.AddTransition(Transition.Create(alice, bob)); + workflow.AddTransition(Transition.Create(bob, charlie)); + workflow.AddTransition(Transition.Create(charlie, alice)); + + var client = Mock.Of(); + var orchestrator = new RolePlayToolCallOrchestrator(client, workflow); + var context = new OrchestrationContext + { + Candidates = [alice, bob, charlie], + ChatHistory = + [ + new TextMessage(Role.User, "Hello, Bob", from: "Alice"), + ], + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker.Should().Be(bob); + } + + [Fact] + public async Task ItReturnNullIfNoAvailableAgentFromWorkflowAsync() + { + var workflow = new Graph(); + var alice = new EchoAgent("Alice"); + var bob = new EchoAgent("Bob"); + workflow.AddTransition(Transition.Create(alice, bob)); + + var client = Mock.Of(); + var orchestrator = new RolePlayToolCallOrchestrator(client, workflow); + var context = new OrchestrationContext + { + Candidates = [alice, bob], + ChatHistory = + [ + new TextMessage(Role.User, "Hello, Alice", from: "Bob"), + ], + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker.Should().BeNull(); + } + + [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")] + public async Task GPT_3_5_CoderReviewerRunnerTestAsync() + { + var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new Exception("Please set AZURE_OPENAI_ENDPOINT environment variable."); + var key = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? throw new Exception("Please set AZURE_OPENAI_API_KEY environment variable."); + var deployName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOY_NAME") ?? throw new Exception("Please set AZURE_OPENAI_DEPLOY_NAME environment variable."); + var openaiClient = new AzureOpenAIClient(new Uri(endpoint), new System.ClientModel.ApiKeyCredential(key)); + var chatClient = openaiClient.GetChatClient(deployName); + + await BusinessWorkflowTest(chatClient); + await CoderReviewerRunnerTestAsync(chatClient); + } + + [ApiKeyFact("OPENAI_API_KEY")] + public async Task GPT_4o_CoderReviewerRunnerTestAsync() + { + var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new InvalidOperationException("OPENAI_API_KEY is not set"); + var model = "gpt-4o"; + var openaiClient = new OpenAIClient(apiKey); + var chatClient = openaiClient.GetChatClient(model); + + await BusinessWorkflowTest(chatClient); + await CoderReviewerRunnerTestAsync(chatClient); + } + + [ApiKeyFact("OPENAI_API_KEY")] + public async Task GPT_4o_mini_CoderReviewerRunnerTestAsync() + { + var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new InvalidOperationException("OPENAI_API_KEY is not set"); + var model = "gpt-4o-mini"; + var openaiClient = new OpenAIClient(apiKey); + var chatClient = openaiClient.GetChatClient(model); + + await BusinessWorkflowTest(chatClient); + await CoderReviewerRunnerTestAsync(chatClient); + } + + /// + /// This test is to mimic the conversation among coder, reviewer and runner. + /// The coder will write the code, the reviewer will review the code, and the runner will run the code. + /// + /// + /// + private async Task CoderReviewerRunnerTestAsync(ChatClient client) + { + var coder = new EchoAgent("Coder"); + var reviewer = new EchoAgent("Reviewer"); + var runner = new EchoAgent("Runner"); + var user = new EchoAgent("User"); + var initializeMessage = new List + { + new TextMessage(Role.User, "Hello, I am user, I will provide the coding task, please write the code first, then review and run it", from: "User"), + new TextMessage(Role.User, "Hello, I am coder, I will write the code", from: "Coder"), + new TextMessage(Role.User, "Hello, I am reviewer, I will review the code", from: "Reviewer"), + new TextMessage(Role.User, "Hello, I am runner, I will run the code", from: "Runner"), + new TextMessage(Role.User, "how to print 'hello world' using C#", from: user.Name), + }; + + var chatHistory = new List() + { + new TextMessage(Role.User, """ + ```csharp + Console.WriteLine("Hello World"); + ``` + """, from: coder.Name), + new TextMessage(Role.User, "The code looks good", from: reviewer.Name), + new TextMessage(Role.User, "The code runs successfully, the output is 'Hello World'", from: runner.Name), + }; + + var orchestrator = new RolePlayToolCallOrchestrator(client); + foreach (var message in chatHistory) + { + var context = new OrchestrationContext + { + Candidates = [coder, reviewer, runner, user], + ChatHistory = initializeMessage, + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker!.Name.Should().Be(message.From); + initializeMessage.Add(message); + } + + // the last next speaker should be the user + var lastSpeaker = await orchestrator.GetNextSpeakerAsync(new OrchestrationContext + { + Candidates = [coder, reviewer, runner, user], + ChatHistory = initializeMessage, + }); + + lastSpeaker!.Name.Should().Be(user.Name); + } + + // test if the tool call orchestrator still run business workflow when the conversation is not in English + private async Task BusinessWorkflowTest(ChatClient client) + { + var ceo = new EchoAgent("乙方首席执行官"); + var pm = new EchoAgent("乙方项目经理"); + var dev = new EchoAgent("乙方开发人员"); + var user = new EchoAgent("甲方"); + var initializeMessage = new List + { + new TextMessage(Role.User, "你好,我是你们的甲方", from: user.Name), + new TextMessage(Role.User, "你好,我是乙方首席执行官,我将负责对接甲方和给项目经理及开发人员分配任务", from: ceo.Name), + new TextMessage(Role.User, "你好,我是乙方项目经理,我将负责项目的进度和质量", from: pm.Name), + new TextMessage(Role.User, "你好,我是乙方开发人员 我将负责项目的具体开发", from: dev.Name), + new TextMessage(Role.User, "开发一个淘宝,预算1W", from: user.Name), + }; + + var workflow = new Graph(); + workflow.AddTransition(Transition.Create(ceo, pm)); + workflow.AddTransition(Transition.Create(ceo, dev)); + workflow.AddTransition(Transition.Create(pm, ceo)); + workflow.AddTransition(Transition.Create(dev, ceo)); + workflow.AddTransition(Transition.Create(user, ceo)); + workflow.AddTransition(Transition.Create(ceo, user)); + + var chatHistory = new List() + { + new TextMessage(Role.User, """ + 项目经理,如何使用1W预算开发一个淘宝 + """, from: ceo.Name), + new TextMessage(Role.User, """ + 对于1万预算开发淘宝类网站,以下是关键建议: + 技术选择: + - 使用开源电商系统节省成本, 选择便宜但稳定的云服务器和域名,预算2000元/年 + - 核心功能优先 + - 人员安排: + - 找1位全栈开发,负责系统搭建(6000元) + - 1位兼职UI设计(2000元) + - 进度规划: + - 基础功能1个月完成,后续根据运营情况逐步优化。 + """, from: pm.Name), + new TextMessage(Role.User, "好的,开发人员,请根据项目经理的规划开始开发", from: ceo.Name), + new TextMessage(Role.User, """ + 好的,已开发完毕 + ```html + + ``` + """, from: dev.Name), + new TextMessage(Role.User, "好的,项目已完成,甲方请付款", from: ceo.Name), + }; + + var orchestrator = new RolePlayToolCallOrchestrator(client, workflow); + + foreach (var message in chatHistory) + { + var context = new OrchestrationContext + { + Candidates = [ceo, pm, dev, user], + ChatHistory = initializeMessage, + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker!.Name.Should().Be(message.From); + initializeMessage.Add(message); + } + + // the last next speaker should be the user + var lastSpeaker = await orchestrator.GetNextSpeakerAsync(new OrchestrationContext + { + Candidates = [ceo, pm, dev, user], + ChatHistory = initializeMessage, + }); + + lastSpeaker!.Name.Should().Be(user.Name); + } +} From 0b5eaf1240e05f0ef05595c92c0d33a9850d0dc4 Mon Sep 17 00:00:00 2001 From: Thai Nguyen Date: Sat, 23 Nov 2024 22:07:21 +0700 Subject: [PATCH 11/33] Agent name termination (#4123) --- .../src/autogen_agentchat/task/__init__.py | 2 ++ .../autogen_agentchat/task/_terminations.py | 35 ++++++++++++++++++- .../tests/test_termination_condition.py | 25 +++++++++++++ 3 files changed, 61 insertions(+), 1 deletion(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py index dd7b6265ad44..e1e6766338d3 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py @@ -7,6 +7,7 @@ TextMentionTermination, TimeoutTermination, TokenUsageTermination, + SourceMatchTermination, ) __all__ = [ @@ -17,5 +18,6 @@ "HandoffTermination", "TimeoutTermination", "ExternalTermination", + "SourceMatchTermination", "Console", ] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py index f8d79cef2850..81cb5cca7d6c 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py @@ -1,5 +1,5 @@ import time -from typing import Sequence +from typing import Sequence, List from ..base import TerminatedException, TerminationCondition from ..messages import AgentMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage @@ -251,3 +251,36 @@ async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None async def reset(self) -> None: self._terminated = False self._setted = False + + +class SourceMatchTermination(TerminationCondition): + """Terminate the conversation after a specific source responds. + + Args: + sources (List[str]): List of source names to terminate the conversation. + + Raises: + TerminatedException: If the termination condition has already been reached. + """ + + def __init__(self, sources: List[str]) -> None: + self._sources = sources + self._terminated = False + + @property + def terminated(self) -> bool: + return self._terminated + + async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + if self._terminated: + raise TerminatedException("Termination condition has already been reached") + if not messages: + return None + for message in messages: + if message.source in self._sources: + self._terminated = True + return StopMessage(content=f"'{message.source}' answered", source="SourceMatchTermination") + return None + + async def reset(self) -> None: + self._terminated = False diff --git a/python/packages/autogen-agentchat/tests/test_termination_condition.py b/python/packages/autogen-agentchat/tests/test_termination_condition.py index c09e0e1c14ac..f4aa5d2a7203 100644 --- a/python/packages/autogen-agentchat/tests/test_termination_condition.py +++ b/python/packages/autogen-agentchat/tests/test_termination_condition.py @@ -1,6 +1,7 @@ import asyncio import pytest +from autogen_agentchat.base import TerminatedException from autogen_agentchat.messages import HandoffMessage, StopMessage, TextMessage from autogen_agentchat.task import ( ExternalTermination, @@ -10,6 +11,7 @@ TextMentionTermination, TimeoutTermination, TokenUsageTermination, + SourceMatchTermination, ) from autogen_core.components.models import RequestUsage @@ -242,3 +244,26 @@ async def test_external_termination() -> None: await termination.reset() assert await termination([]) is None + + +@pytest.mark.asyncio +async def test_source_match_termination() -> None: + termination = SourceMatchTermination(sources=["Assistant"]) + assert await termination([]) is None + + continue_messages = [TextMessage(content="Hello", source="agent"), TextMessage(content="Hello", source="user")] + assert await termination(continue_messages) is None + + terminate_messages = [ + TextMessage(content="Hello", source="agent"), + TextMessage(content="Hello", source="Assistant"), + TextMessage(content="Hello", source="user"), + ] + result = await termination(terminate_messages) + assert isinstance(result, StopMessage) + assert termination.terminated + + with pytest.raises(TerminatedException): + await termination([]) + await termination.reset() + assert not termination.terminated From caeab68f4b577212529781f3bad7626f9d55d204 Mon Sep 17 00:00:00 2001 From: Pramod Goyal <81946962+goyalpramod@users.noreply.github.com> Date: Sun, 24 Nov 2024 04:56:37 +0530 Subject: [PATCH 12/33] task: added warning when none is called in intervention handler (#4149) * task: added warning when none is called in intervention handler * add leading underscore to indicate private to _warn_if_none method in intervention.py * address comment of returning Any for result in intervention.py * Update intervention.py to remove redundant name change * Format and lint --------- Co-authored-by: Jack Gerrits Co-authored-by: Jack Gerrits --- .../src/autogen_core/base/intervention.py | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/python/packages/autogen-core/src/autogen_core/base/intervention.py b/python/packages/autogen-core/src/autogen_core/base/intervention.py index c9600ac9e13c..4b06fa19f94f 100644 --- a/python/packages/autogen-core/src/autogen_core/base/intervention.py +++ b/python/packages/autogen-core/src/autogen_core/base/intervention.py @@ -1,3 +1,4 @@ +import warnings from typing import Any, Awaitable, Callable, Protocol, final from autogen_core.base import AgentId @@ -14,6 +15,23 @@ class DropMessage: ... +def _warn_if_none(value: Any, handler_name: str) -> None: + """ + Utility function to check if the intervention handler returned None and issue a warning. + + Args: + value: The return value to check + handler_name: Name of the intervention handler method for the warning message + """ + if value is None: + warnings.warn( + f"Intervention handler {handler_name} returned None. This might be unintentional. " + "Consider returning the original message or DropMessage explicitly.", + RuntimeWarning, + stacklevel=2, + ) + + InterventionFunction = Callable[[Any], Any | Awaitable[type[DropMessage]]] @@ -27,10 +45,13 @@ async def on_response( class DefaultInterventionHandler(InterventionHandler): async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]: + _warn_if_none(message, "on_send") return message async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any | type[DropMessage]: + _warn_if_none(message, "on_publish") return message async def on_response(self, message: Any, *, sender: AgentId, recipient: AgentId | None) -> Any | type[DropMessage]: + _warn_if_none(message, "on_response") return message From c9835f3b52d69e0b6995c90069c57f48a0cb0874 Mon Sep 17 00:00:00 2001 From: Thomas Lin Date: Sun, 24 Nov 2024 07:56:28 +0800 Subject: [PATCH 13/33] Update README.md - add more information about using Azure OpenAI services (#4253) * Update README.md Add more information about using Azure OpenAI services * Update README.md Adjust the formatting to improve readability of the steps, making them easier to follow. --------- Co-authored-by: Hussein Mozannar --- .../packages/autogen-magentic-one/README.md | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/python/packages/autogen-magentic-one/README.md b/python/packages/autogen-magentic-one/README.md index 123d3580e393..12c8498bf979 100644 --- a/python/packages/autogen-magentic-one/README.md +++ b/python/packages/autogen-magentic-one/README.md @@ -67,22 +67,24 @@ You can install the Magentic-One package and then run the example code to see ho uv sync --all-extras source .venv/bin/activate ``` - Install magentic-one from source: + For Windows, run `.venv\Scripts\activate` to activate the environment. + +2. Install magentic-one from source: ```bash cd packages/autogen-magentic-one pip install -e . ``` + + The following instructions are for running the example code: -The following instructions are for running the example code: - -2. Configure the environment variables for the chat completion client. See instructions below [Environment Configuration for Chat Completion Client](#environment-configuration-for-chat-completion-client). -3. Magentic-One code uses code execution, you need to have [Docker installed](https://docs.docker.com/engine/install/) to run any examples. -4. Magentic-One uses playwright to interact with web pages. You need to install the playwright dependencies. Run the following command to install the playwright dependencies: +3. Configure the environment variables for the chat completion client. See instructions below [Environment Configuration for Chat Completion Client](#environment-configuration-for-chat-completion-client). +4. Magentic-One code uses code execution, you need to have [Docker installed](https://docs.docker.com/engine/install/) to run any examples. +5. Magentic-One uses playwright to interact with web pages. You need to install the playwright dependencies. Run the following command to install the playwright dependencies: ```bash playwright install --with-deps chromium ``` -5. Now you can run the example code to see how the agents work together to accomplish a task. +6. Now you can run the example code to see how the agents work together to accomplish a task. > [!CAUTION] > The example code may download files from the internet, execute code, and interact with web pages. Ensure you are in a safe environment before running the example code. @@ -108,7 +110,7 @@ playwright install --with-deps chromium - hil_mode: (Optional) Enable human-in-the-loop mode (default: disabled) - save_screenshots: (Optional) Save screenshots of browser (default: disabled) -6. [Preview] We have a preview API for Magentic-One. +7. [Preview] We have a preview API for Magentic-One. You can use the `MagenticOneHelper` class to interact with the system and stream logs. See the [interface README](interface/README.md) for more details. @@ -139,6 +141,12 @@ To configure for Azure OpenAI service, set the following environment variables: } ``` +This project uses Azure OpenAI service with [Entra ID authentcation by default](https://learn.microsoft.com/azure/ai-services/openai/how-to/managed-identity). If you run the examples on a local device, you can use the Azure CLI cached credentials for testing: + +Log in to Azure using `az login`, and then run the examples. The account used must have [RBAC permissions](https://learn.microsoft.com/azure/ai-services/openai/how-to/role-based-access-control) like `Azure Cognitive Services OpenAI User` for the OpenAI service; otherwise, you will receive the error: Principal does not have access to API/Operation. + +Note that even if you are the owner of the subscription, you still need to grant the necessary Azure Cognitive Services OpenAI permissions to call the API. + ### With OpenAI To configure for OpenAI, set the following environment variables: From 0ff1687485009c41f852f2347d3b806edb8dcca8 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Sat, 23 Nov 2024 19:24:24 -0800 Subject: [PATCH 14/33] Add UserProxyAgent in AgentChat API (#4255) * initial addition of a user proxy agent in agentchat, related to #3614 * fix typing/mypy errors * format fixes * format and pyright checks * update, add support for returning handoff message, add tests --------- Co-authored-by: Ryan Sweet Co-authored-by: Hussein Mozannar --- .../src/autogen_agentchat/agents/__init__.py | 2 + .../agents/_user_proxy_agent.py | 89 +++++++++++++++ .../tests/test_userproxy_agent.py | 103 ++++++++++++++++++ 3 files changed, 194 insertions(+) create mode 100644 python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py create mode 100644 python/packages/autogen-agentchat/tests/test_userproxy_agent.py diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py index cd435bf0228a..4cff9f45822b 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py @@ -4,6 +4,7 @@ from ._coding_assistant_agent import CodingAssistantAgent from ._society_of_mind_agent import SocietyOfMindAgent from ._tool_use_assistant_agent import ToolUseAssistantAgent +from ._user_proxy_agent import UserProxyAgent __all__ = [ "BaseChatAgent", @@ -13,4 +14,5 @@ "CodingAssistantAgent", "ToolUseAssistantAgent", "SocietyOfMindAgent", + "UserProxyAgent", ] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py new file mode 100644 index 000000000000..bdaca53ddc6c --- /dev/null +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -0,0 +1,89 @@ +import asyncio +from inspect import iscoroutinefunction +from typing import Awaitable, Callable, List, Optional, Sequence, Union, cast + +from autogen_core.base import CancellationToken + +from ..base import Response +from ..messages import ChatMessage, HandoffMessage, TextMessage +from ._base_chat_agent import BaseChatAgent + +# Define input function types more precisely +SyncInputFunc = Callable[[str], str] +AsyncInputFunc = Callable[[str, Optional[CancellationToken]], Awaitable[str]] +InputFuncType = Union[SyncInputFunc, AsyncInputFunc] + + +class UserProxyAgent(BaseChatAgent): + """An agent that can represent a human user in a chat.""" + + def __init__( + self, + name: str, + description: str = "a human user", + input_func: Optional[InputFuncType] = None, + ) -> None: + """Initialize the UserProxyAgent.""" + super().__init__(name=name, description=description) + self.input_func = input_func or input + self._is_async = iscoroutinefunction(self.input_func) + + @property + def produced_message_types(self) -> List[type[ChatMessage]]: + """Message types this agent can produce.""" + return [TextMessage, HandoffMessage] + + def _get_latest_handoff(self, messages: Sequence[ChatMessage]) -> Optional[HandoffMessage]: + """Find the most recent HandoffMessage in the message sequence.""" + for message in reversed(messages): + if isinstance(message, HandoffMessage): + return message + return None + + async def _get_input(self, prompt: str, cancellation_token: Optional[CancellationToken]) -> str: + """Handle input based on function signature.""" + try: + if self._is_async: + # Cast to AsyncInputFunc for proper typing + async_func = cast(AsyncInputFunc, self.input_func) + return await async_func(prompt, cancellation_token) + else: + # Cast to SyncInputFunc for proper typing + sync_func = cast(SyncInputFunc, self.input_func) + loop = asyncio.get_event_loop() + return await loop.run_in_executor(None, sync_func, prompt) + + except asyncio.CancelledError: + raise + except Exception as e: + raise RuntimeError(f"Failed to get user input: {str(e)}") from e + + async def on_messages( + self, messages: Sequence[ChatMessage], cancellation_token: Optional[CancellationToken] = None + ) -> Response: + """Handle incoming messages by requesting user input.""" + try: + # Check for handoff first + handoff = self._get_latest_handoff(messages) + prompt = ( + f"Handoff received from {handoff.source}. Enter your response: " if handoff else "Enter your response: " + ) + + user_input = await self._get_input(prompt, cancellation_token) + + # Return appropriate message type based on handoff presence + if handoff: + return Response( + chat_message=HandoffMessage(content=user_input, target=handoff.source, source=self.name) + ) + else: + return Response(chat_message=TextMessage(content=user_input, source=self.name)) + + except asyncio.CancelledError: + raise + except Exception as e: + raise RuntimeError(f"Failed to get user input: {str(e)}") from e + + async def on_reset(self, cancellation_token: Optional[CancellationToken] = None) -> None: + """Reset agent state.""" + pass diff --git a/python/packages/autogen-agentchat/tests/test_userproxy_agent.py b/python/packages/autogen-agentchat/tests/test_userproxy_agent.py new file mode 100644 index 000000000000..2ef3053f09bf --- /dev/null +++ b/python/packages/autogen-agentchat/tests/test_userproxy_agent.py @@ -0,0 +1,103 @@ +import asyncio +from typing import Optional, Sequence + +import pytest +from autogen_agentchat.agents import UserProxyAgent +from autogen_agentchat.base import Response +from autogen_agentchat.messages import ChatMessage, HandoffMessage, TextMessage +from autogen_core.base import CancellationToken + + +@pytest.mark.asyncio +async def test_basic_input() -> None: + """Test basic message handling with custom input""" + + def custom_input(prompt: str) -> str: + return "The height of the eiffel tower is 324 meters. Aloha!" + + agent = UserProxyAgent(name="test_user", input_func=custom_input) + messages = [TextMessage(content="What is the height of the eiffel tower?", source="assistant")] + + response = await agent.on_messages(messages, CancellationToken()) + + assert isinstance(response, Response) + assert isinstance(response.chat_message, TextMessage) + assert response.chat_message.content == "The height of the eiffel tower is 324 meters. Aloha!" + assert response.chat_message.source == "test_user" + + +@pytest.mark.asyncio +async def test_async_input() -> None: + """Test handling of async input function""" + + async def async_input(prompt: str, token: Optional[CancellationToken] = None) -> str: + await asyncio.sleep(0.1) + return "async response" + + agent = UserProxyAgent(name="test_user", input_func=async_input) + messages = [TextMessage(content="test prompt", source="assistant")] + + response = await agent.on_messages(messages, CancellationToken()) + + assert isinstance(response.chat_message, TextMessage) + assert response.chat_message.content == "async response" + assert response.chat_message.source == "test_user" + + +@pytest.mark.asyncio +async def test_handoff_handling() -> None: + """Test handling of handoff messages""" + + def custom_input(prompt: str) -> str: + return "handoff response" + + agent = UserProxyAgent(name="test_user", input_func=custom_input) + + messages: Sequence[ChatMessage] = [ + TextMessage(content="Initial message", source="assistant"), + HandoffMessage(content="Handing off to user for confirmation", source="assistant", target="test_user"), + ] + + response = await agent.on_messages(messages, CancellationToken()) + + assert isinstance(response.chat_message, HandoffMessage) + assert response.chat_message.content == "handoff response" + assert response.chat_message.source == "test_user" + assert response.chat_message.target == "assistant" + + +@pytest.mark.asyncio +async def test_cancellation() -> None: + """Test cancellation during message handling""" + + async def cancellable_input(prompt: str, token: Optional[CancellationToken] = None) -> str: + await asyncio.sleep(0.1) + if token and token.is_cancelled(): + raise asyncio.CancelledError() + return "cancellable response" + + agent = UserProxyAgent(name="test_user", input_func=cancellable_input) + messages = [TextMessage(content="test prompt", source="assistant")] + token = CancellationToken() + + async def cancel_after_delay() -> None: + await asyncio.sleep(0.05) + token.cancel() + + with pytest.raises(asyncio.CancelledError): + await asyncio.gather(agent.on_messages(messages, token), cancel_after_delay()) + + +@pytest.mark.asyncio +async def test_error_handling() -> None: + """Test error handling with problematic input function""" + + def failing_input(_: str) -> str: + raise ValueError("Input function failed") + + agent = UserProxyAgent(name="test_user", input_func=failing_input) + messages = [TextMessage(content="test prompt", source="assistant")] + + with pytest.raises(RuntimeError) as exc_info: + await agent.on_messages(messages, CancellationToken()) + assert "Failed to get user input" in str(exc_info.value) From 02ef110e10e464535c873f911171deafe280c9b4 Mon Sep 17 00:00:00 2001 From: Gerardo Moreno Date: Sat, 23 Nov 2024 21:36:32 -0800 Subject: [PATCH 15/33] Selector Group Chat Tutorial (#4112) (#4326) * Selector Group Chat Tutorial (#4112) * update doc * update * Add custom selector function * Update doc --------- Co-authored-by: Eric Zhu --- .../docs/drawio/selector-group-chat.drawio | 52 ++ .../tutorial/selector-group-chat.ipynb | 503 ++++++++++++------ .../tutorial/selector-group-chat.svg | 3 + .../agentchat-user-guide/tutorial/swarm.ipynb | 12 +- .../agentchat-user-guide/tutorial/teams.ipynb | 9 +- 5 files changed, 407 insertions(+), 172 deletions(-) create mode 100644 python/packages/autogen-core/docs/drawio/selector-group-chat.drawio create mode 100644 python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.svg diff --git a/python/packages/autogen-core/docs/drawio/selector-group-chat.drawio b/python/packages/autogen-core/docs/drawio/selector-group-chat.drawio new file mode 100644 index 000000000000..ad363bfd7fa1 --- /dev/null +++ b/python/packages/autogen-core/docs/drawio/selector-group-chat.drawio @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb index 0a3bb0fe3135..3377d3d47c34 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb @@ -11,46 +11,61 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The `SelectorGroupChat` implements a team coordination pattern where participants take turns publishing messages, with the next speaker selected by a generative model (LLM) based on the conversation context. This enables dynamic and context-aware multi-agent conversations.\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` implements a team where participants take turns broadcasting messages to all other participants,\n", + "with the next speaker selected by a generative model (e.g., an LLM) based on the shared context. \n", + "This enables dynamic and context-aware multi-agent collaboration.\n", "\n", - "\n", - "`SelectorGroupChat` provides several key features:\n", - "- Dynamic speaker selection using an LLM to analyze conversation context\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` provides several key features:\n", + "- Model-based speaker selection\n", "- Configurable participant roles and descriptions\n", "- Optional prevention of consecutive turns by the same speaker\n", "- Customizable selection prompting\n", + "- Customizable selection function to override the default model-based selection\n", + "\n", + "```{note}\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API.\n", + "If you need more control and customization that is not supported by this API,\n", + "you can take a look at the [Group Chat Pattern](../../core-user-guide/design-patterns/group-chat.ipynb)\n", + "in the Core API documentation and implement your own group chat logic.\n", + "```\n", + "\n", + "## How does it work?\n", "\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a group chat similar to {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", + "but with a model-based next speaker selection mechanism.\n", + "When the team receives a task through {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream`,\n", + "the following steps are executed:\n", "\n", - "### Speaker Selection Process\n", + "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. You can override the model by providing a custom selection function.\n", + "2. The team prompts the selected speaker agent to provide a response, which is then **broadcasted** to all other participants.\n", + "3. The termination condition is checked to determine if the conversation should end, if not, the process repeats from step 1.\n", + "4. When the conversation ends, the team returns the {py:class}`~autogen_agentchat.base.TaskResult` containing the conversation history from this task.\n", "\n", - "The chat uses an LLM to select the next speaker by:\n", - "1. Analyzing the conversation history\n", - "2. Evaluating participant roles and descriptions\n", - "3. Using a configurable prompt template to make the selection\n", - "4. Validating that exactly one participant is selected\n", - "\n" + "Once the team finishes the task, the conversation context is kept within the team and all participants, so the next task can continue from the previous conversation context.\n", + "You can reset the conversation context by calling {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset`.\n", + "\n", + "In this section, we will demonstrate how to use {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with a simple example for a web search and data analysis task." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Web Search and Analysis Example" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ - "import asyncio\n", - "from typing import List, Sequence\n", + "from typing import Sequence\n", "\n", - "from autogen_agentchat.agents import (\n", - " BaseChatAgent,\n", - " CodingAssistantAgent,\n", - " ToolUseAssistantAgent,\n", - ")\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import ChatMessage, StopMessage, TextMessage\n", - "from autogen_agentchat.task import TextMentionTermination\n", + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.messages import AgentMessage\n", + "from autogen_agentchat.task import Console, MaxMessageTermination, TextMentionTermination\n", "from autogen_agentchat.teams import SelectorGroupChat\n", - "from autogen_core.base import CancellationToken\n", - "from autogen_core.components.tools import FunctionTool\n", "from autogen_ext.models import OpenAIChatCompletionClient" ] }, @@ -58,61 +73,167 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Defining Agents\n", - "The `UserProxyAgent` allows the user to input messages directly. This agent waits for user input and returns a text message or a stop message if the user decides to terminate the conversation." + "### Agents\n", + "\n", + "![Selector Group Chat](selector-group-chat.svg)\n", + "\n", + "This system uses three specialized agents:\n", + "\n", + "- **Planning Agent**: The strategic coordinator that breaks down complex tasks into manageable subtasks. \n", + "- **Web Search Agent**: An information retrieval specialist that interfaces with the `search_web_tool`.\n", + "- **Data Analyst Agent**: An agent specialist in performing calculations equipped with `percentage_change_tool`. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The tools `search_web_tool` and `percentage_change_tool` are external tools that the agents can use to perform their tasks." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ - "class UserProxyAgent(BaseChatAgent):\n", - " def __init__(self, name: str) -> None:\n", - " super().__init__(name, \"A human user.\")\n", - "\n", - " @property\n", - " def produced_message_types(self) -> List[type[ChatMessage]]:\n", - " return [TextMessage, StopMessage]\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " user_input = await asyncio.get_event_loop().run_in_executor(None, input, \"Enter your response: \")\n", - " if \"TERMINATE\" in user_input:\n", - " return Response(chat_message=StopMessage(content=\"User has terminated the conversation.\", source=self.name))\n", - " return Response(chat_message=TextMessage(content=user_input, source=self.name))\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass" + "# Note: This example uses mock tools instead of real APIs for demonstration purposes\n", + "def search_web_tool(query: str) -> str:\n", + " if \"2006-2007\" in query:\n", + " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \"\"\"\n", + " elif \"2007-2008\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", + " elif \"2008-2009\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", + " return \"No data found.\"\n", + "\n", + "\n", + "def percentage_change_tool(start: float, end: float) -> float:\n", + " return ((end - start) / start) * 100" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create the specialized agents using the {py:class}`~autogen_agentchat.agents.AssistantAgent` class.\n", + "It is important to note that the agents' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes are used by the model to determine the next speaker,\n", + "so it is recommended to provide meaningful names and descriptions." ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ - "async def flight_search(start: str, destination: str, date: str) -> str:\n", - " return \"\\n\".join(\n", - " [\n", - " f\"AC24 from {start} to {destination} on {date} is $500\",\n", - " f\"UA23 from {start} to {destination} on {date} is $450\",\n", - " f\"AL21 from {start} to {destination} on {date} is $400\",\n", - " ]\n", - " )\n", - "\n", - "\n", - "async def flight_booking(flight: str, date: str) -> str:\n", - " return f\"Booked flight {flight} on {date}\"" + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "\n", + "planning_agent = AssistantAgent(\n", + " \"PlanningAgent\",\n", + " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a planning agent.\n", + " Your job is to break down complex tasks into smaller, manageable subtasks.\n", + " Your team members are:\n", + " Web search agent: Searches for information\n", + " Data analyst: Performs calculations\n", + "\n", + " You only plan and delegate tasks - you do not execute them yourself.\n", + "\n", + " When assigning tasks, use this format:\n", + " 1. : \n", + "\n", + " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", + " \"\"\",\n", + ")\n", + "\n", + "web_search_agent = AssistantAgent(\n", + " \"WebSearchAgent\",\n", + " description=\"A web search agent.\",\n", + " tools=[search_web_tool],\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a web search agent.\n", + " Your only tool is search_tool - use it to find information.\n", + " You make only one search call at a time.\n", + " Once you have the results, you never do calculations based on them.\n", + " \"\"\",\n", + ")\n", + "\n", + "data_analyst_agent = AssistantAgent(\n", + " \"DataAnalystAgent\",\n", + " description=\"A data analyst agent. Useful for performing calculations.\",\n", + " model_client=model_client,\n", + " tools=[percentage_change_tool],\n", + " system_message=\"\"\"\n", + " You are a data analyst.\n", + " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", + " \"\"\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Workflow\n", + "\n", + "1. The task is received by the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` which, based on agent descriptions, selects the most appropriate agent to handle the initial task (typically the Planning Agent).\n", + "\n", + "2. The **Planning Agent** analyzes the task and breaks it down into subtasks, assigning each to the most appropriate agent using the format:\n", + " ` : `\n", + "\n", + "3. Based on the conversation context and agent descriptions, the {py:class}`~autogen_agent.teams.SelectorGroupChat` manager dynamically selects the next agent to handle their assigned subtask.\n", + "\n", + "4. The **Web Search Agent** performs searches one at a time, storing results in the shared conversation history.\n", + "\n", + "5. The **Data Analyst** processes the gathered information using available calculation tools when selected.\n", + "\n", + "6. The workflow continues with agents being dynamically selected until either:\n", + " - The Planning Agent determines all subtasks are complete and sends \"TERMINATE\"\n", + " - An alternative termination condition is met (e.g., a maximum number of messages)\n", + "\n", + "When defining your agents, make sure to include a helpful {py:attr}`~autogen_agentchat.base.ChatAgent.description` since this is used to decide which agent to select next." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The `ToolUseAssistantAgent` is responsible for calling external tools. In this example, two tools are defined: `flight_search` and `flight_booking`.\n", + "Let's create the team with two termination conditions:\n", + "{py:class}`~autogen_agentchat.task.TextMentionTermination` to end the conversation when the Planning Agent sends \"TERMINATE\",\n", + "and {py:class}`~autogen_agentchat.task.MaxMessageTermination` to limit the conversation to 25 messages to avoid infinite loop." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", + "max_messages_termination = MaxMessageTermination(max_messages=25)\n", + "termination = text_mention_termination | max_messages_termination\n", "\n", - "Additionally, the `CodingAssistantAgent` serves as a general travel assistant with predefined behavior specified in the `system_message`." + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " termination_condition=termination,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we run the team with a task to find information about an NBA player." ] }, { @@ -124,144 +245,200 @@ "name": "stdout", "output_type": "stream", "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To answer your question, we need to separate this task into several subtasks:\n", "\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:35:30.283450]:\u001b[0m\n", + "1. Web search agent: Find out who was the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. Web search agent: Find the total rebounds for that player in the 2007-2008 NBA season.\n", + "3. Web search agent: Find the total rebounds for that player in the 2008-2009 NBA season.\n", + "4. Data analyst: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", "\n", - "Help user plan a trip and book a flight." + "Let's start with these tasks.\n", + "[Prompt tokens: 159, Completion tokens: 130]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_js7ogBp0UDmHfvLo6BmWFpM1', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", + "[Prompt tokens: 279, Completion tokens: 26]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_js7ogBp0UDmHfvLo6BmWFpM1')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_G7ATvIq0rSjc8fqLdKQ5uWI4', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Xzw9bAvgfo40EjILophG5pnl', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "[Prompt tokens: 371, Completion tokens: 70]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_G7ATvIq0rSjc8fqLdKQ5uWI4'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Xzw9bAvgfo40EjILophG5pnl')]\n", + "---------- WebSearchAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\n", + "\n", + "Between the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased from 214 to 398. To calculate the percentage change:\n", + "\n", + "Percentage Change = \\(\\frac{(398 - 214)}{214} \\times 100\\)\n", + "\n", + "Now, a data analyst would calculate the actual percentage change based on these numbers.\n", + "[Prompt tokens: 506, Completion tokens: 107]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_76VkQ2nnKrwtuI1dmjLQ7G5P', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "[Prompt tokens: 383, Completion tokens: 20]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_76VkQ2nnKrwtuI1dmjLQ7G5P')]\n", + "---------- DataAnalystAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade. His total rebounds increased by approximately 85.98% between the 2007-2008 and 2008-2009 seasons.\n", + "[Prompt tokens: 424, Completion tokens: 52]\n", + "---------- PlanningAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, scoring 1,397 points. Between the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased by approximately 85.98%. \n", + "\n", + "TERMINATE\n", + "[Prompt tokens: 470, Completion tokens: 66]\n", + "---------- Summary ----------\n", + "Number of messages: 11\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 2592\n", + "Total completion tokens: 471\n", + "Duration: 11.95 seconds\n" ] }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=159, completion_tokens=130), content=\"To answer your question, we need to separate this task into several subtasks:\\n\\n1. Web search agent: Find out who was the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. Web search agent: Find the total rebounds for that player in the 2007-2008 NBA season.\\n3. Web search agent: Find the total rebounds for that player in the 2008-2009 NBA season.\\n4. Data analyst: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's start with these tasks.\"), ToolCallMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=26), content=[FunctionCall(id='call_js7ogBp0UDmHfvLo6BmWFpM1', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]), ToolCallResultMessage(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_js7ogBp0UDmHfvLo6BmWFpM1')]), ToolCallMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=371, completion_tokens=70), content=[FunctionCall(id='call_G7ATvIq0rSjc8fqLdKQ5uWI4', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Xzw9bAvgfo40EjILophG5pnl', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]), ToolCallResultMessage(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_G7ATvIq0rSjc8fqLdKQ5uWI4'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Xzw9bAvgfo40EjILophG5pnl')]), TextMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=506, completion_tokens=107), content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\\n\\nBetween the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased from 214 to 398. To calculate the percentage change:\\n\\nPercentage Change = \\\\(\\\\frac{(398 - 214)}{214} \\\\times 100\\\\)\\n\\nNow, a data analyst would calculate the actual percentage change based on these numbers.\"), ToolCallMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=383, completion_tokens=20), content=[FunctionCall(id='call_76VkQ2nnKrwtuI1dmjLQ7G5P', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]), ToolCallResultMessage(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_76VkQ2nnKrwtuI1dmjLQ7G5P')]), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=424, completion_tokens=52), content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade. His total rebounds increased by approximately 85.98% between the 2007-2008 and 2008-2009 seasons.'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=470, completion_tokens=66), content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, scoring 1,397 points. Between the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased by approximately 85.98%. \\n\\nTERMINATE\")], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"\n", + "\n", + "# Use asyncio.run(...) if you are running this in a script.\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, after the Web Search Agent conducts the necessary searches and the Data Analyst Agent completes the necessary calculations, we find that Dwayne Wade was the Miami Heat player with the highest points in the 2006-2007 season, and the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons is 85.98%!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Custom Selector Function" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Often times we want better control over the selection process. \n", + "To this end, we can set the `selector_func` argument with a custom selector function to override the default model-based selection.\n", + "For instance, we want the Planning Agent to speak immediately after any specialized agent to check the progress.\n", + "\n", + "```{note}\n", + "Returning `None` from the custom selector function will use the default model-based selection.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To solve this inquiry, let's break it down into smaller tasks again:\n", "\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:35:48.275743], User:\u001b[0m\n", - "\n", - "\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:35:50.795496], TravelAssistant:\u001b[0m\n", - "\n", - "I'd be happy to help you plan your trip! To get started, could you please provide me with the following details:\n", - "\n", - "1. Your departure city and the destination city.\n", - "2. Your travel dates (departure and return).\n", - "3. The number of travelers and their ages (if any children are involved).\n", - "4. Your budget for flights and accommodations, if you have one in mind.\n", - "5. Any specific activities or attractions you're interested in at the destination.\n", - "\n", - "Once I have this information, I can help you find the best options!\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:35:59.701486], User:\u001b[0m\n", - "\n", - "Traveling to toronto from new york\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:02.325330], TravelAssistant:\u001b[0m\n", - "\n", - "Great choice! Toronto is a vibrant city with a lot to offer. Now, could you please provide the following additional details to help me assist you better?\n", - "\n", - "1. What are your travel dates (departure and return)?\n", - "2. How many travelers will be going, and what are their ages?\n", - "3. Do you have a budget for the flight and accommodations?\n", - "4. Are there any specific activities or attractions you’re interested in while in Toronto?\n", - "\n", - "Once I have this information, I can help you find the best flights and suggestions for your trip!\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:20.633004], User:\u001b[0m\n", - "\n", - "leaving on december 7 and returning on 12\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:23.202871], TravelAssistant:\u001b[0m\n", - "\n", - "Thank you for the details! Here's what I have so far:\n", - "\n", - "- **Departure City:** New York\n", - "- **Destination City:** Toronto\n", - "- **Departure Date:** December 7\n", - "- **Return Date:** December 12\n", + "1. Find out who was the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. Find that player's total rebounds for the 2007-2008 NBA season.\n", + "3. Find that player's total rebounds for the 2008-2009 NBA season.\n", + "4. Calculate the percentage change in the player's total rebounds from the 2007-2008 to the 2008-2009 season.\n", "\n", - "Now, could you please provide:\n", + "Let's proceed with these tasks and find the necessary information.\n", + "[Prompt tokens: 595, Completion tokens: 115]\n", + "---------- WebSearchAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, who scored a total of 1,397 points.\n", "\n", - "1. The number of travelers and their ages.\n", - "2. Your budget for flights and accommodations (if applicable).\n", - "3. Any specific activities or attractions you're interested in while in Toronto.\n", + "In terms of his rebound statistics:\n", + "- In the 2007-2008 season, Dwyane Wade recorded 214 total rebounds.\n", + "- In the 2008-2009 season, he recorded 398 total rebounds.\n", "\n", - "This will help me provide more tailored options for your trip!\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:38.096554], User:\u001b[0m\n", + "To find the percentage change in his total rebounds, a data analyst would perform the following calculation:\n", "\n", - "just myself one adult\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:40.307824], FlightBroker:\u001b[0m\n", + "\\[\n", + "\\text{Percentage Change} = \\left( \\frac{398 - 214}{214} \\right) \\times 100\n", + "\\]\n", "\n", - "Thanks for the information! Here's what I have:\n", + "A data analyst would use the above numbers to determine the percentage change in his total rebounds between these two seasons.\n", + "[Prompt tokens: 794, Completion tokens: 154]\n", + "---------- PlanningAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with a total of 1,397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season.\n", "\n", - "- **Departure City:** New York\n", - "- **Destination City:** Toronto\n", - "- **Departure Date:** December 7\n", - "- **Return Date:** December 12\n", - "- **Number of Travelers:** 1 Adult\n", + "Let's have a data analyst calculate the percentage change: \n", "\n", - "Could you let me know if you have a budget for flights and accommodations? Additionally, are there any specific activities or attractions you're interested in while in Toronto? This will help me provide the best options for your trip!\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:45.875280], User:\u001b[0m\n", + "1. Data analyst: Calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons using the formula provided by the Web search agent.\n", + "[Prompt tokens: 878, Completion tokens: 116]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_Fh84DXp5MxFzutmKVvclw5Cz', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "[Prompt tokens: 942, Completion tokens: 20]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_Fh84DXp5MxFzutmKVvclw5Cz')]\n", + "---------- DataAnalystAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade. The percentage change in his total rebounds between the 2007-2008 season and the 2008-2009 season was approximately 85.98%.\n", + "[Prompt tokens: 983, Completion tokens: 56]\n", + "---------- PlanningAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with a total of 1,397 points. Between the 2007-2008 and 2008-2009 seasons, his total rebounds increased by approximately 85.98%. \n", "\n", - "that's it\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:50.925624], FlightBroker:\u001b[0m\n", - "\n", - "Your flights have been successfully booked! Here are the details:\n", - "\n", - "- **Departure:** New York to Toronto\n", - " - **Flight:** AL21\n", - " - **Date:** December 7, 2023\n", - "\n", - "- **Return:** Toronto to New York\n", - " - **Flight:** AL21\n", - " - **Date:** December 12, 2023\n", - "\n", - "If you need help with accommodations, activities, or anything else for your trip, feel free to let me know! \n", - "\n", - "TERMINATE" + "TERMINATE\n", + "[Prompt tokens: 1065, Completion tokens: 65]\n", + "---------- Summary ----------\n", + "Number of messages: 8\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 5257\n", + "Total completion tokens: 526\n", + "Duration: 11.98 seconds\n" ] }, { "data": { "text/plain": [ - "TeamRunResult(messages=[TextMessage(source='user', content='Help user plan a trip and book a flight.'), TextMessage(source='User', content=''), TextMessage(source='TravelAssistant', content=\"I'd be happy to help you plan your trip! To get started, could you please provide me with the following details:\\n\\n1. Your departure city and the destination city.\\n2. Your travel dates (departure and return).\\n3. The number of travelers and their ages (if any children are involved).\\n4. Your budget for flights and accommodations, if you have one in mind.\\n5. Any specific activities or attractions you're interested in at the destination.\\n\\nOnce I have this information, I can help you find the best options!\"), TextMessage(source='User', content='Traveling to toronto from new york'), TextMessage(source='TravelAssistant', content='Great choice! Toronto is a vibrant city with a lot to offer. Now, could you please provide the following additional details to help me assist you better?\\n\\n1. What are your travel dates (departure and return)?\\n2. How many travelers will be going, and what are their ages?\\n3. Do you have a budget for the flight and accommodations?\\n4. Are there any specific activities or attractions you’re interested in while in Toronto?\\n\\nOnce I have this information, I can help you find the best flights and suggestions for your trip!'), TextMessage(source='User', content='leaving on december 7 and returning on 12'), TextMessage(source='TravelAssistant', content=\"Thank you for the details! Here's what I have so far:\\n\\n- **Departure City:** New York\\n- **Destination City:** Toronto\\n- **Departure Date:** December 7\\n- **Return Date:** December 12\\n\\nNow, could you please provide:\\n\\n1. The number of travelers and their ages.\\n2. Your budget for flights and accommodations (if applicable).\\n3. Any specific activities or attractions you're interested in while in Toronto.\\n\\nThis will help me provide more tailored options for your trip!\"), TextMessage(source='User', content='just myself one adult'), TextMessage(source='FlightBroker', content=\"Thanks for the information! Here's what I have:\\n\\n- **Departure City:** New York\\n- **Destination City:** Toronto\\n- **Departure Date:** December 7\\n- **Return Date:** December 12\\n- **Number of Travelers:** 1 Adult\\n\\nCould you let me know if you have a budget for flights and accommodations? Additionally, are there any specific activities or attractions you're interested in while in Toronto? This will help me provide the best options for your trip!\"), TextMessage(source='User', content=\"that's it\"), StopMessage(source='FlightBroker', content='Your flights have been successfully booked! Here are the details:\\n\\n- **Departure:** New York to Toronto\\n - **Flight:** AL21\\n - **Date:** December 7, 2023\\n\\n- **Return:** Toronto to New York\\n - **Flight:** AL21\\n - **Date:** December 12, 2023\\n\\nIf you need help with accommodations, activities, or anything else for your trip, feel free to let me know! \\n\\nTERMINATE'), StopMessage(source='StopMessageTermination', content='Stop message received')])" + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=595, completion_tokens=115), content=\"To solve this inquiry, let's break it down into smaller tasks again:\\n\\n1. Find out who was the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. Find that player's total rebounds for the 2007-2008 NBA season.\\n3. Find that player's total rebounds for the 2008-2009 NBA season.\\n4. Calculate the percentage change in the player's total rebounds from the 2007-2008 to the 2008-2009 season.\\n\\nLet's proceed with these tasks and find the necessary information.\"), TextMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=794, completion_tokens=154), content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, who scored a total of 1,397 points.\\n\\nIn terms of his rebound statistics:\\n- In the 2007-2008 season, Dwyane Wade recorded 214 total rebounds.\\n- In the 2008-2009 season, he recorded 398 total rebounds.\\n\\nTo find the percentage change in his total rebounds, a data analyst would perform the following calculation:\\n\\n\\\\[\\n\\\\text{Percentage Change} = \\\\left( \\\\frac{398 - 214}{214} \\\\right) \\\\times 100\\n\\\\]\\n\\nA data analyst would use the above numbers to determine the percentage change in his total rebounds between these two seasons.'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=878, completion_tokens=116), content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with a total of 1,397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season.\\n\\nLet's have a data analyst calculate the percentage change: \\n\\n1. Data analyst: Calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons using the formula provided by the Web search agent.\"), ToolCallMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=942, completion_tokens=20), content=[FunctionCall(id='call_Fh84DXp5MxFzutmKVvclw5Cz', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]), ToolCallResultMessage(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_Fh84DXp5MxFzutmKVvclw5Cz')]), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=983, completion_tokens=56), content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade. The percentage change in his total rebounds between the 2007-2008 season and the 2008-2009 season was approximately 85.98%.'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=1065, completion_tokens=65), content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with a total of 1,397 points. Between the 2007-2008 and 2008-2009 seasons, his total rebounds increased by approximately 85.98%. \\n\\nTERMINATE')], stop_reason=\"Text 'TERMINATE' mentioned\")" ] }, - "execution_count": 4, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "user_proxy = UserProxyAgent(\"User\")\n", - "flight_broker = ToolUseAssistantAgent(\n", - " \"FlightBroker\",\n", - " description=\"An assistant for booking flights\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " registered_tools=[\n", - " FunctionTool(flight_search, description=\"Search for flights\"),\n", - " FunctionTool(flight_booking, description=\"Book a flight\"),\n", - " ],\n", - ")\n", - "travel_assistant = CodingAssistantAgent(\n", - " \"TravelAssistant\",\n", - " description=\"A travel assistant\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " system_message=\"You are a travel assistant.\",\n", - ")\n", + "def selector_func(messages: Sequence[AgentMessage]) -> str | None:\n", + " if messages[-1].source != planning_agent.name:\n", + " return planning_agent.name\n", + " return None\n", + "\n", "\n", - "termination = TextMentionTermination(\"TERMINATE\")\n", "team = SelectorGroupChat(\n", - " [user_proxy, flight_broker, travel_assistant],\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", " termination_condition=termination,\n", + " selector_func=selector_func,\n", ")\n", - "await team.run(task=\"Help user plan a trip and book a flight.\")" + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see from the conversation log that the Planning Agent always speaks immediately after the specialized agents." ] } ], @@ -281,7 +458,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.6" + "version": "3.12.7" } }, "nbformat": 4, diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.svg b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.svg new file mode 100644 index 000000000000..4a4009992c4f --- /dev/null +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.svg @@ -0,0 +1,3 @@ + + +
Selector
Selector
Web Search Agent
Web Search Agent
Planning Agent
Planning Agent
Data Analyst
Agent
Data Analyst...
SelectorGroupChat
SelectorGroupChat
Application/User
Application/User
Task
Task
TaskResult
TaskResult
\ No newline at end of file diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb index 6ff950b10ef0..68e77690d45b 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb @@ -26,11 +26,11 @@ "\n", "At its core, the {py:class}`~autogen_agentchat.teams.Swarm` team is a group chat\n", "where agents take turn to generate a response. \n", - "Similar to the {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", - "and {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`: participant agents\n", - "all share the same mesasge context.\n", + "Similar to {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", + "and {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`, participant agents\n", + "broadcast their responses so all agents share the same mesasge context.\n", "\n", - "But different from the other two group chat teams, at each turn,\n", + "Different from the other two group chat teams, at each turn,\n", "**the speaker agent is selected based on the most recent\n", "{py:class}`~autogen_agentchat.messages.HandoffMessage` message in the context.**\n", "This naturally requires each agent in the team to be able to generate\n", @@ -76,7 +76,7 @@ "\n", "Additionally, we let the user interact with the agents, when agents handoff to `\"user\"`.\n", "\n", - "#### Workflow:\n", + "#### Workflow\n", "1. The **Travel Agent** initiates the conversation and evaluates the user's request.\n", "2. Based on the request:\n", " - For refund-related tasks, the Travel Agent hands off to the **Flights Refunder**.\n", @@ -282,7 +282,7 @@ "- **News Analyst**: An agent focused on gathering and summarizing recent news articles relevant to the stock, using tools such as `get_news`.\n", "- **Writer**: An agent tasked with compiling the findings from the stock and news analysis into a cohesive final report.\n", "\n", - "#### Workflow:\n", + "#### Workflow\n", "1. The **Planner** initiates the research process by delegating tasks to the appropriate agents in a step-by-step manner.\n", "2. Each agent performs its task independently and appends their work to the shared **message thread/history**. Rather than directly returning results to the planner, all agents contribute to and read from this shared message history. When agents generate their work using the LLM, they have access to this shared message history, which provides context and helps track the overall progress of the task.\n", "3. Once an agent completes its task, it hands off control back to the planner.\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb index 5c0b257dfec2..51976a5370d1 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb @@ -22,9 +22,9 @@ "\n", "AgentChat provides several preset teams that implements one or more [multi-agent design patterns](../../core-user-guide/design-patterns/index.md) to simplify development. Here is a list of the preset teams:\n", "\n", - "- {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`: All participants share context and takes turn to respond in a round-robin fashion.\n", - "- {py:class}`~autogen_agentchat.teams.SelectorGroupChat`: All participants share context and use a model-based selector (with custom override) to select the next agent to respond.\n", - "- {py:class}`~autogen_agentchat.teams.Swarm`: All participants share context and use {py:class}`~autogen_agentchat.messages.HandoffMessage`to pass control to the next agent.\n", + "- {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`: All participants share context and takes turn to respond in a round-robin fashion. We will cover this team in this section.\n", + "- {py:class}`~autogen_agentchat.teams.SelectorGroupChat`: All participants share context and use a model-based selector (with custom override) to select the next agent to respond. See [Selector Group Chat](./selector-group-chat.ipynb) for more details.\n", + "- {py:class}`~autogen_agentchat.teams.Swarm`: All participants share context and use {py:class}`~autogen_agentchat.messages.HandoffMessage`to pass control to the next agent. See [Swarm](./swarm.ipynb) for more details.\n", "\n", "At a high-level, a team API consists of the following methods:\n", "\n", @@ -42,6 +42,9 @@ "source": [ "## Round-Robin Group Chat\n", "\n", + "{py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` is a simple team that allows all agents to share context and take turns to respond in a round-robin fashion.\n", + "On its turn, each agent broadcasts its response to all other agents in the team, so all agents have the same context.\n", + "\n", "We will start by creating a team with a single {py:class}`~autogen_agentchat.agents.AssistantAgent` agent\n", "and {py:class}`~autogen_agentchat.task.TextMentionTermination`\n", "termination condition that stops the team when a word is detected." From 01dc56b244ab03a2846bc89451e5d1a76c6523d5 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Sun, 24 Nov 2024 01:36:30 -0500 Subject: [PATCH 16/33] Make grpc an optional dependency (#4315) * Make grpc an optional dependency * add note to the runtime docs * update version --------- Co-authored-by: Eric Zhu --- .../framework/distributed-agent-runtime.ipynb | 7 +++++++ python/packages/autogen-core/pyproject.toml | 6 +++++- .../src/autogen_core/application/_utils.py | 3 +++ .../autogen_core/application/_worker_runtime.py | 17 +++++++++++------ .../application/_worker_runtime_host.py | 11 +++++++---- .../_worker_runtime_host_servicer.py | 9 +++++++-- python/uv.lock | 8 ++++++-- 7 files changed, 46 insertions(+), 15 deletions(-) create mode 100644 python/packages/autogen-core/src/autogen_core/application/_utils.py diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb index 833799c2096a..b0b9c5e3f1fc 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb @@ -21,6 +21,13 @@ "It also advertises the agents which they support to the host service,\n", "so the host service can deliver messages to the correct worker.\n", "\n", + "````{note}\n", + "The distributed agent runtime requires extra dependencies, install them using:\n", + "```bash\n", + "pip install autogen-core[grpc]==0.4.0.dev6\n", + "```\n", + "````\n", + "\n", "We can start a host service using {py:class}`~autogen_core.application.WorkerAgentRuntimeHost`." ] }, diff --git a/python/packages/autogen-core/pyproject.toml b/python/packages/autogen-core/pyproject.toml index 9d564ded4b88..d0dd291e6369 100644 --- a/python/packages/autogen-core/pyproject.toml +++ b/python/packages/autogen-core/pyproject.toml @@ -20,7 +20,6 @@ dependencies = [ "aiohttp", "typing-extensions", "pydantic<3.0.0,>=2.0.0", - "grpcio~=1.62.0", "protobuf~=4.25.1", "tiktoken", "opentelemetry-api~=1.27.0", @@ -28,6 +27,11 @@ dependencies = [ "jsonref~=1.1.0", ] +[project.optional-dependencies] +grpc = [ + "grpcio~=1.62.0", +] + [tool.uv] dev-dependencies = [ "aiofiles", diff --git a/python/packages/autogen-core/src/autogen_core/application/_utils.py b/python/packages/autogen-core/src/autogen_core/application/_utils.py new file mode 100644 index 000000000000..10fbfd1b8c8a --- /dev/null +++ b/python/packages/autogen-core/src/autogen_core/application/_utils.py @@ -0,0 +1,3 @@ +GRPC_IMPORT_ERROR_STR = ( + "Distributed runtime features require additional dependencies. Install them with: pip install autogen-core[grpc]" +) diff --git a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime.py b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime.py index ac3e00e4a4a9..2c405710876a 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime.py +++ b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime.py @@ -27,16 +27,11 @@ cast, ) -import grpc -from grpc.aio import StreamStreamCall from opentelemetry.trace import TracerProvider from typing_extensions import Self, deprecated -from autogen_core.base import JSON_DATA_CONTENT_TYPE -from autogen_core.base._serialization import MessageSerializer, SerializationRegistry -from autogen_core.base._type_helpers import ChannelArgumentType - from ..base import ( + JSON_DATA_CONTENT_TYPE, Agent, AgentId, AgentInstantiationContext, @@ -50,11 +45,19 @@ SubscriptionInstantiationContext, TopicId, ) +from ..base._serialization import MessageSerializer, SerializationRegistry +from ..base._type_helpers import ChannelArgumentType from ..components import TypeSubscription from ._helpers import SubscriptionManager, get_impl +from ._utils import GRPC_IMPORT_ERROR_STR from .protos import agent_worker_pb2, agent_worker_pb2_grpc from .telemetry import MessageRuntimeTracingConfig, TraceHelper, get_telemetry_grpc_metadata +try: + import grpc.aio +except ImportError as e: + raise ImportError(GRPC_IMPORT_ERROR_STR) from e + if TYPE_CHECKING: from .protos.agent_worker_pb2_grpc import AgentRpcAsyncStub @@ -140,6 +143,8 @@ async def _connect( # type: ignore ) -> None: stub: AgentRpcAsyncStub = agent_worker_pb2_grpc.AgentRpcStub(channel) # type: ignore + from grpc.aio import StreamStreamCall + # TODO: where do exceptions from reading the iterable go? How do we recover from those? recv_stream: StreamStreamCall[agent_worker_pb2.Message, agent_worker_pb2.Message] = stub.OpenChannel( # type: ignore QueueAsyncIterable(send_queue) diff --git a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host.py b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host.py index e6585098bdbd..d7fee07ff1f8 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host.py +++ b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host.py @@ -3,11 +3,14 @@ import signal from typing import Optional, Sequence -import grpc - -from autogen_core.base._type_helpers import ChannelArgumentType - +from ..base._type_helpers import ChannelArgumentType +from ._utils import GRPC_IMPORT_ERROR_STR from ._worker_runtime_host_servicer import WorkerAgentRuntimeHostServicer + +try: + import grpc +except ImportError as e: + raise ImportError(GRPC_IMPORT_ERROR_STR) from e from .protos import agent_worker_pb2_grpc logger = logging.getLogger("autogen_core") diff --git a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host_servicer.py b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host_servicer.py index 1ed794c35f29..3da50c56f048 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host_servicer.py +++ b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host_servicer.py @@ -4,11 +4,16 @@ from asyncio import Future, Task from typing import Any, Dict, Set -import grpc - from ..base import TopicId from ..components import TypeSubscription from ._helpers import SubscriptionManager +from ._utils import GRPC_IMPORT_ERROR_STR + +try: + import grpc +except ImportError as e: + raise ImportError(GRPC_IMPORT_ERROR_STR) from e + from .protos import agent_worker_pb2, agent_worker_pb2_grpc logger = logging.getLogger("autogen_core") diff --git a/python/uv.lock b/python/uv.lock index db9fd753a92a..2a73eb34fb26 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -334,7 +334,6 @@ source = { editable = "packages/autogen-core" } dependencies = [ { name = "aiohttp" }, { name = "asyncio-atexit" }, - { name = "grpcio" }, { name = "jsonref" }, { name = "openai" }, { name = "opentelemetry-api" }, @@ -345,6 +344,11 @@ dependencies = [ { name = "typing-extensions" }, ] +[package.optional-dependencies] +grpc = [ + { name = "grpcio" }, +] + [package.dev-dependencies] dev = [ { name = "aiofiles" }, @@ -390,7 +394,7 @@ dev = [ requires-dist = [ { name = "aiohttp" }, { name = "asyncio-atexit" }, - { name = "grpcio", specifier = "~=1.62.0" }, + { name = "grpcio", marker = "extra == 'grpc'", specifier = "~=1.62.0" }, { name = "jsonref", specifier = "~=1.1.0" }, { name = "openai", specifier = ">=1.3" }, { name = "opentelemetry-api", specifier = "~=1.27.0" }, From d186a41ed1330d5abe6a262fd93bda770f9e594c Mon Sep 17 00:00:00 2001 From: Diego Colombo Date: Mon, 25 Nov 2024 00:32:56 +0000 Subject: [PATCH 17/33] ensure that cancellation token is passed in InvokeWithActivityAsync (#4329) * ensure that cancellation token is passed in InvokeWithActivityAsync * add comments and baggange is not nullable * store ncrunch settings * shange signature to have nullable activity at the end of Update * correct spelling case * primary contructor * add docs and make async interface accept cancellation tokens * address code ql error --- dotnet/AutoGen.v3.ncrunchsolution | 8 ++++ .../Abstractions/IAgentRuntime.cs | 6 +-- .../Abstractions/IAgentState.cs | 20 +++++++++- .../src/Microsoft.AutoGen/Agents/AgentBase.cs | 20 +++++----- .../Agents/AgentBaseExtensions.cs | 37 ++++++++++++++----- .../Microsoft.AutoGen/Agents/AgentRuntime.cs | 6 +-- .../Agents/Agents/AIAgent/InferenceAgent.cs | 16 ++++---- .../Services/Orleans/AgentStateGrain.cs | 6 ++- 8 files changed, 81 insertions(+), 38 deletions(-) create mode 100644 dotnet/AutoGen.v3.ncrunchsolution diff --git a/dotnet/AutoGen.v3.ncrunchsolution b/dotnet/AutoGen.v3.ncrunchsolution new file mode 100644 index 000000000000..13107d39442c --- /dev/null +++ b/dotnet/AutoGen.v3.ncrunchsolution @@ -0,0 +1,8 @@ + + + True + True + True + True + + \ No newline at end of file diff --git a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentRuntime.cs b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentRuntime.cs index aa5b5a13a6dc..6b3d4f98cdb2 100644 --- a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentRuntime.cs +++ b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentRuntime.cs @@ -15,8 +15,8 @@ public interface IAgentRuntime ValueTask SendRequestAsync(IAgentBase agent, RpcRequest request, CancellationToken cancellationToken = default); ValueTask SendMessageAsync(Message message, CancellationToken cancellationToken = default); ValueTask PublishEventAsync(CloudEvent @event, CancellationToken cancellationToken = default); - void Update(Activity? activity, RpcRequest request); - void Update(Activity? activity, CloudEvent cloudEvent); - (string?, string?) GetTraceIDandState(IDictionary metadata); + void Update(RpcRequest request, Activity? activity); + void Update(CloudEvent cloudEvent, Activity? activity); + (string?, string?) GetTraceIdAndState(IDictionary metadata); IDictionary ExtractMetadata(IDictionary metadata); } diff --git a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentState.cs b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentState.cs index 0a6784b54fd3..1b816b4ef3ad 100644 --- a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentState.cs +++ b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentState.cs @@ -3,8 +3,24 @@ namespace Microsoft.AutoGen.Abstractions; +/// +/// Interface for managing the state of an agent. +/// public interface IAgentState { - ValueTask ReadStateAsync(); - ValueTask WriteStateAsync(AgentState state, string eTag); + /// + /// Reads the current state of the agent asynchronously. + /// + /// A token to cancel the operation. + /// A task that represents the asynchronous read operation. The task result contains the current state of the agent. + ValueTask ReadStateAsync(CancellationToken cancellationToken = default); + + /// + /// Writes the specified state of the agent asynchronously. + /// + /// The state to write. + /// The ETag for concurrency control. + /// A token to cancel the operation. + /// A task that represents the asynchronous write operation. The task result contains the ETag of the written state. + ValueTask WriteStateAsync(AgentState state, string eTag, CancellationToken cancellationToken = default); } diff --git a/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs b/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs index 13b2e851969e..345e6d34c826 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs @@ -93,7 +93,7 @@ protected internal async Task HandleRpcMessage(Message msg, CancellationToken ca { var activity = this.ExtractActivity(msg.CloudEvent.Type, msg.CloudEvent.Metadata); await this.InvokeWithActivityAsync( - static ((AgentBase Agent, CloudEvent Item) state) => state.Agent.CallHandler(state.Item), + static ((AgentBase Agent, CloudEvent Item) state, CancellationToken _) => state.Agent.CallHandler(state.Item), (this, msg.CloudEvent), activity, msg.CloudEvent.Type, cancellationToken).ConfigureAwait(false); @@ -103,7 +103,7 @@ await this.InvokeWithActivityAsync( { var activity = this.ExtractActivity(msg.Request.Method, msg.Request.Metadata); await this.InvokeWithActivityAsync( - static ((AgentBase Agent, RpcRequest Request) state) => state.Agent.OnRequestCoreAsync(state.Request), + static ((AgentBase Agent, RpcRequest Request) state, CancellationToken ct) => state.Agent.OnRequestCoreAsync(state.Request, ct), (this, msg.Request), activity, msg.Request.Method, cancellationToken).ConfigureAwait(false); @@ -142,8 +142,8 @@ public async Task StoreAsync(AgentState state, CancellationToken cancellationTok } public async Task ReadAsync(AgentId agentId, CancellationToken cancellationToken = default) where T : IMessage, new() { - var agentstate = await _context.ReadAsync(agentId, cancellationToken).ConfigureAwait(false); - return agentstate.FromAgentState(); + var agentState = await _context.ReadAsync(agentId, cancellationToken).ConfigureAwait(false); + return agentState.FromAgentState(); } private void OnResponseCore(RpcResponse response) { @@ -195,9 +195,9 @@ protected async Task RequestAsync(AgentId target, string method, Di activity?.SetTag("peer.service", target.ToString()); var completion = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); - _context.Update(activity, request); + _context.Update(request, activity); await this.InvokeWithActivityAsync( - static async ((AgentBase Agent, RpcRequest Request, TaskCompletionSource) state) => + static async ((AgentBase Agent, RpcRequest Request, TaskCompletionSource) state, CancellationToken ct) => { var (self, request, completion) = state; @@ -206,7 +206,7 @@ static async ((AgentBase Agent, RpcRequest Request, TaskCompletionSource + static async ((AgentBase Agent, CloudEvent Event) state, CancellationToken ct) => { - await state.Agent._context.PublishEventAsync(state.Event).ConfigureAwait(false); + await state.Agent._context.PublishEventAsync(state.Event, ct).ConfigureAwait(false); }, (this, item), activity, diff --git a/dotnet/src/Microsoft.AutoGen/Agents/AgentBaseExtensions.cs b/dotnet/src/Microsoft.AutoGen/Agents/AgentBaseExtensions.cs index ce1318a0d332..5d738e5fc383 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/AgentBaseExtensions.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/AgentBaseExtensions.cs @@ -5,15 +5,25 @@ namespace Microsoft.AutoGen.Agents; +/// +/// Provides extension methods for the class. +/// public static class AgentBaseExtensions { + /// + /// Extracts an from the given agent and metadata. + /// + /// The agent from which to extract the activity. + /// The name of the activity. + /// The metadata containing trace information. + /// The extracted or null if extraction fails. public static Activity? ExtractActivity(this AgentBase agent, string activityName, IDictionary metadata) { Activity? activity; - (var traceParent, var traceState) = agent.Context.GetTraceIDandState(metadata); + var (traceParent, traceState) = agent.Context.GetTraceIdAndState(metadata); if (!string.IsNullOrEmpty(traceParent)) { - if (ActivityContext.TryParse(traceParent, traceState, isRemote: true, out ActivityContext parentContext)) + if (ActivityContext.TryParse(traceParent, traceState, isRemote: true, out var parentContext)) { // traceParent is a W3CId activity = AgentBase.s_source.CreateActivity(activityName, ActivityKind.Server, parentContext); @@ -33,12 +43,9 @@ public static class AgentBaseExtensions var baggage = agent.Context.ExtractMetadata(metadata); - if (baggage is not null) + foreach (var baggageItem in baggage) { - foreach (var baggageItem in baggage) - { - activity.AddBaggage(baggageItem.Key, baggageItem.Value); - } + activity.AddBaggage(baggageItem.Key, baggageItem.Value); } } } @@ -49,7 +56,19 @@ public static class AgentBaseExtensions return activity; } - public static async Task InvokeWithActivityAsync(this AgentBase agent, Func func, TState state, Activity? activity, string methodName, CancellationToken cancellationToken = default) + + /// + /// Invokes a function asynchronously within the context of an . + /// + /// The type of the state parameter. + /// The agent invoking the function. + /// The function to invoke. + /// The state parameter to pass to the function. + /// The activity within which to invoke the function. + /// The name of the method being invoked. + /// A token to monitor for cancellation requests. + /// A task representing the asynchronous operation. + public static async Task InvokeWithActivityAsync(this AgentBase agent, Func func, TState state, Activity? activity, string methodName, CancellationToken cancellationToken = default) { if (activity is not null && activity.StartTimeUtc == default) { @@ -63,7 +82,7 @@ public static async Task InvokeWithActivityAsync(this AgentBase agent, F try { - await func(state).ConfigureAwait(false); + await func(state, cancellationToken).ConfigureAwait(false); if (activity is not null && activity.IsAllDataRequested) { activity.SetStatus(ActivityStatusCode.Ok); diff --git a/dotnet/src/Microsoft.AutoGen/Agents/AgentRuntime.cs b/dotnet/src/Microsoft.AutoGen/Agents/AgentRuntime.cs index fad372ce2f93..c36d456af32e 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/AgentRuntime.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/AgentRuntime.cs @@ -15,7 +15,7 @@ internal sealed class AgentRuntime(AgentId agentId, IAgentWorker worker, ILogger public ILogger Logger { get; } = logger; public IAgentBase? AgentInstance { get; set; } private DistributedContextPropagator DistributedContextPropagator { get; } = distributedContextPropagator; - public (string?, string?) GetTraceIDandState(IDictionary metadata) + public (string?, string?) GetTraceIdAndState(IDictionary metadata) { DistributedContextPropagator.ExtractTraceIdAndState(metadata, static (object? carrier, string fieldName, out string? fieldValue, out IEnumerable? fieldValues) => @@ -28,11 +28,11 @@ internal sealed class AgentRuntime(AgentId agentId, IAgentWorker worker, ILogger out var traceState); return (traceParent, traceState); } - public void Update(Activity? activity, RpcRequest request) + public void Update(RpcRequest request, Activity? activity = null) { DistributedContextPropagator.Inject(activity, request.Metadata, static (carrier, key, value) => ((IDictionary)carrier!)[key] = value); } - public void Update(Activity? activity, CloudEvent cloudEvent) + public void Update(CloudEvent cloudEvent, Activity? activity = null) { DistributedContextPropagator.Inject(activity, cloudEvent.Metadata, static (carrier, key, value) => ((IDictionary)carrier!)[key] = value); } diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Agents/AIAgent/InferenceAgent.cs b/dotnet/src/Microsoft.AutoGen/Agents/Agents/AIAgent/InferenceAgent.cs index a0383a3c219d..bf68467e3fa7 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Agents/AIAgent/InferenceAgent.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Agents/AIAgent/InferenceAgent.cs @@ -5,16 +5,14 @@ using Microsoft.AutoGen.Abstractions; using Microsoft.Extensions.AI; namespace Microsoft.AutoGen.Agents; -public abstract class InferenceAgent : AgentBase where T : IMessage, new() +public abstract class InferenceAgent( + IAgentRuntime context, + EventTypes typeRegistry, + IChatClient client) + : AgentBase(context, typeRegistry) + where T : IMessage, new() { - protected IChatClient ChatClient { get; } - public InferenceAgent( - IAgentRuntime context, - EventTypes typeRegistry, IChatClient client - ) : base(context, typeRegistry) - { - ChatClient = client; - } + protected IChatClient ChatClient { get; } = client; private Task CompleteAsync( IList chatMessages, diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/AgentStateGrain.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/AgentStateGrain.cs index 50d8c3ad4542..9905f6aebac6 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/AgentStateGrain.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/AgentStateGrain.cs @@ -7,7 +7,8 @@ namespace Microsoft.AutoGen.Agents; internal sealed class AgentStateGrain([PersistentState("state", "AgentStateStore")] IPersistentState state) : Grain, IAgentState { - public async ValueTask WriteStateAsync(AgentState newState, string eTag) + /// + public async ValueTask WriteStateAsync(AgentState newState, string eTag, CancellationToken cancellationToken = default) { // etags for optimistic concurrency control // if the Etag is null, its a new state @@ -27,7 +28,8 @@ public async ValueTask WriteStateAsync(AgentState newState, string eTag) return state.Etag; } - public ValueTask ReadStateAsync() + /// + public ValueTask ReadStateAsync(CancellationToken cancellationToken = default) { return ValueTask.FromResult(state.State); } From 08383445fd5eea30293c284d87192b6ea7f1fbbf Mon Sep 17 00:00:00 2001 From: Kartik Ramesh Date: Sun, 24 Nov 2024 19:52:51 -0600 Subject: [PATCH 18/33] Fix typo in Agent Runtime Environments doc (#4336) Co-authored-by: Jack Gerrits --- .../user-guide/core-user-guide/core-concepts/architecture.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/core-concepts/architecture.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/core-concepts/architecture.md index 3346cb68cb56..d3e38802ae3b 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/core-concepts/architecture.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/core-concepts/architecture.md @@ -1,6 +1,6 @@ # Agent Runtime Environments -At the foundation level, the framework provides a _runtime envionment_, which facilitates +At the foundation level, the framework provides a _runtime environment_, which facilitates communication between agents, manages their identities and lifecycles, and enforce security and privacy boundaries. From 1e60b67f2e240740f076551a7f39afc3544f172f Mon Sep 17 00:00:00 2001 From: Will Dembinski Date: Sun, 24 Nov 2024 17:55:29 -0800 Subject: [PATCH 19/33] Readme Edits | Just cleanup edits (#4102) * Readme edits to support the team --------- Co-authored-by: Jack Gerrits --- docs/design/01 - Programming Model.md | 16 ++++--- docs/design/02 - Topics.md | 8 ++-- docs/design/03 - Agent Worker Protocol.md | 2 +- docs/design/04 - Agent and Topic ID Specs.md | 48 ++++++++++---------- 4 files changed, 39 insertions(+), 35 deletions(-) diff --git a/docs/design/01 - Programming Model.md b/docs/design/01 - Programming Model.md index 732a46c6aebe..6bfa9f9766ad 100644 --- a/docs/design/01 - Programming Model.md +++ b/docs/design/01 - Programming Model.md @@ -6,23 +6,27 @@ The programming model is basically publish-subscribe. Agents subscribe to events ## Events Delivered as CloudEvents -Each event in the system is defined using the [CloudEvents Specification](https://cloudevents.io/). This allows for a common event format that can be used across different systems and languages. In CloudEvents, each event has a Context Attributes that must unique *id* (eg a UUID) a *source* (a unique urn or path), a *type* (the namespace of the event - prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type: e.g *com.github.pull_request.opened* or -*com.example.object.deleted.v2*), and optionally fields describing the data schema/content-type or extensions. +Each event in the system is defined using the [CloudEvents Specification](https://cloudevents.io/). This allows for a common event format that can be used across different systems and languages. In CloudEvents, each event has "Context Attributes" that must include: + +1. *id* - A unique id (eg. a UUID). +2. *source* - A URI or URN indicating the event's origin. +3. *type* - The namespace of the event - prefixed with a reverse-DNS name. + - The prefixed domain dictates the organization which defines the semantics of this event type: e.g `com.github.pull_request.opened` or `com.example.object.deleted.v2`), and optionally fields describing the data schema/content-type or extensions. ## Event Handlers -Each agent has a set of event handlers, that are bound to a specific match against a CloudEvents *type*. Event Handlers could match against an exact type or match for a pattern of events of a particular level in the type heirarchy (eg: *com.Microsoft.AutoGen.Agents.System.\** for all Events in the *System* namespace) Each event handler is a function that can change state, call models, access memory, call external tools, emit other events, and flow data to/from other systems. Each event handler can be a simple function or a more complex function that uses a state machine or other control logic. +Each agent has a set of event handlers, that are bound to a specific match against a CloudEvents *type*. Event Handlers could match against an exact type or match for a pattern of events of a particular level in the type heirarchy (eg: `com.Microsoft.AutoGen.Agents.System.*` for all Events in the `System` namespace) Each event handler is a function that can change state, call models, access memory, call external tools, emit other events, and flow data to/from other systems. Each event handler can be a simple function or a more complex function that uses a state machine or other control logic. ## Orchestrating Agents -If is possible to build a functional and scalable agent system that only reacts to external events. In many cases, however, you will want to orchestrate the agents to achieve a specific goal or follow a pre-determined workflow. In this case, you will need to build an orchestrator agent that manages the flow of events between agents. +It is possible to build a functional and scalable agent system that only reacts to external events. In many cases, however, you will want to orchestrate the agents to achieve a specific goal or follow a pre-determined workflow. In this case, you will need to build an orchestrator agent that manages the flow of events between agents. ## Built-in Event Types The AutoGen system comes with a set of built-in event types that are used to manage the system. These include: -* System Events - Events that are used to manage the system itself. These include events for starting and stopping the Agents, sending messages to all agents, and other system-level events. -* ? insert other types here ? +- *System Events* - Events that are used to manage the system itself. These include events for starting and stopping the Agents, sending messages to all agents, and other system-level events. +- *Insert other types here* ## Agent Contracts diff --git a/docs/design/02 - Topics.md b/docs/design/02 - Topics.md index aca577e748fa..7d7149c37c91 100644 --- a/docs/design/02 - Topics.md +++ b/docs/design/02 - Topics.md @@ -17,16 +17,16 @@ This document does not specify RPC/direct messaging A topic is identified by two components (called a `TopicId`): - [`type`](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - represents the type of event that occurs, this is static and defined in code - - SHOULD use reverse domain name notation to avoid naming conflicts. For example: `com.example.my-topic`. + - SHOULD use reverse domain name notation to avoid naming conflicts. For example: `com.example.my-topic`. - [`source`](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - represents where the event originated from, this is dynamic and based on the message itself - - SHOULD be a URI + - SHOULD be a URI Agent instances are identified by two components (called an `AgentId`): - `type` - represents the type of agent, this is static and defined in code - - MUST be a valid identifier as defined [here](https://docs.python.org/3/reference/lexical_analysis.html#identifiers) except that only the ASCII range is allowed + - MUST be a valid identifier as defined [here](https://docs.python.org/3/reference/lexical_analysis.html#identifiers) except that only the ASCII range is allowed - `key` - represents the instance of the agent type for the key - - SHOULD be a URI + - SHOULD be a URI For example: `GraphicDesigner:1234` diff --git a/docs/design/03 - Agent Worker Protocol.md b/docs/design/03 - Agent Worker Protocol.md index 49d9e867191b..81a9b9b7e97a 100644 --- a/docs/design/03 - Agent Worker Protocol.md +++ b/docs/design/03 - Agent Worker Protocol.md @@ -22,7 +22,7 @@ Agents are never explicitly created or destroyed. When a request is received for ## Worker protocol flow -The worker protocol has three phases, following the lifetime of the worker: initiation, operation, and termination. +The worker protocol has three phases, following the lifetime of the worker: initialization, operation, and termination. ### Initialization diff --git a/docs/design/04 - Agent and Topic ID Specs.md b/docs/design/04 - Agent and Topic ID Specs.md index 22a8a08894fb..ee872ab2ac1e 100644 --- a/docs/design/04 - Agent and Topic ID Specs.md +++ b/docs/design/04 - Agent and Topic ID Specs.md @@ -8,23 +8,23 @@ This document describes the structure, constraints, and behavior of Agent IDs an #### type -* Type: `string` -* Description: The agent type is not an agent class. It associates an agent with a specific factory function, which produces instances of agents of the same agent `type`. For example, different factory functions can produce the same agent class but with different constructor perameters. -* Constraints: UTF8 and only contain alphanumeric letters (a-z) and (0-9), or underscores (_). A valid identifier cannot start with a number, or contain any spaces. -* Examples: - * `code_reviewer` - * `WebSurfer` - * `UserProxy` +- Type: `string` +- Description: The agent type is not an agent class. It associates an agent with a specific factory function, which produces instances of agents of the same agent `type`. For example, different factory functions can produce the same agent class but with different constructor perameters. +- Constraints: UTF8 and only contain alphanumeric letters (a-z) and (0-9), or underscores (\_). A valid identifier cannot start with a number, or contain any spaces. +- Examples: + - `code_reviewer` + - `WebSurfer` + - `UserProxy` #### key -* Type: `string` -* Description: The agent key is an instance identifier for the given agent `type` -* Constraints: UTF8 and only contain characters between (inclusive) ascii 32 (space) and 126 (~). -* Examples: - * `default` - * A memory address - * a UUID string +- Type: `string` +- Description: The agent key is an instance identifier for the given agent `type` +- Constraints: UTF8 and only contain characters between (inclusive) ascii 32 (space) and 126 (~). +- Examples: + - `default` + - A memory address + - a UUID string ## Topic ID @@ -32,16 +32,16 @@ This document describes the structure, constraints, and behavior of Agent IDs an #### type -* Type: `string` -* Description: topic type is usually defined by application code to mark the type of messages the topic is for. -* Constraints: UTF8 and only contain alphanumeric letters (a-z) and (0-9), or underscores (_). A valid identifier cannot start with a number, or contain any spaces. -* Examples: - * `GitHub_Issues` +- Type: `string` +- Description: Topic type is usually defined by application code to mark the type of messages the topic is for. +- Constraints: UTF8 and only contain alphanumeric letters (a-z) and (0-9), or underscores (\_). A valid identifier cannot start with a number, or contain any spaces. +- Examples: + - `GitHub_Issues` #### source -* Type: `string` -* Description: Topic source is the unique identifier for a topic within a topic type. It is typically defined by application data. -* Constraints: UTF8 and only contain characters between (inclusive) ascii 32 (space) and 126 (~). -* Examples: - * `github.com/{repo_name}/issues/{issue_number}` \ No newline at end of file +- Type: `string` +- Description: Topic source is the unique identifier for a topic within a topic type. It is typically defined by application data. +- Constraints: UTF8 and only contain characters between (inclusive) ascii 32 (space) and 126 (~). +- Examples: + - `github.com/{repo_name}/issues/{issue_number}` From 341417e6367fca9d49a26b2011eea1c7f891fd6c Mon Sep 17 00:00:00 2001 From: Mohammad Mazraeh Date: Mon, 25 Nov 2024 14:06:35 +0000 Subject: [PATCH 20/33] add tolerance for empty choices (#4311) * add tolerance for empty choices Signed-off-by: Mohammad Mazraeh * address pr comments Signed-off-by: Mohammad Mazraeh * Update python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py Co-authored-by: Jack Gerrits * Update python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py Co-authored-by: Jack Gerrits * address pr comments Signed-off-by: Mohammad Mazraeh * Update python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py Co-authored-by: Jack Gerrits * Update _openai_client.py --------- Signed-off-by: Mohammad Mazraeh Co-authored-by: Ryan Sweet Co-authored-by: Jack Gerrits --- .../models/_openai/_openai_client.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py index 7faefa44c4a8..999e5083f665 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py @@ -556,6 +556,8 @@ async def create_stream( json_output: Optional[bool] = None, extra_create_args: Mapping[str, Any] = {}, cancellation_token: Optional[CancellationToken] = None, + *, + max_consecutive_empty_chunk_tolerance: int = 0, ) -> AsyncGenerator[Union[str, CreateResult], None]: """ Creates an AsyncGenerator that will yield a stream of chat completions based on the provided messages and tools. @@ -566,6 +568,7 @@ async def create_stream( json_output (Optional[bool], optional): If True, the output will be in JSON format. Defaults to None. extra_create_args (Mapping[str, Any], optional): Additional arguments for the creation process. Default to `{}`. cancellation_token (Optional[CancellationToken], optional): A token to cancel the operation. Defaults to None. + max_consecutive_empty_chunk_tolerance (int): The maximum number of consecutive empty chunks to tolerate before raising a ValueError. This seems to only be needed to set when using `AzureOpenAIChatCompletionClient`. Defaults to 0. Yields: AsyncGenerator[Union[str, CreateResult], None]: A generator yielding the completion results as they are produced. @@ -636,6 +639,8 @@ async def create_stream( full_tool_calls: Dict[int, FunctionCall] = {} completion_tokens = 0 logprobs: Optional[List[ChatCompletionTokenLogprob]] = None + empty_chunk_count = 0 + while True: try: chunk_future = asyncio.ensure_future(anext(stream)) @@ -643,6 +648,20 @@ async def create_stream( cancellation_token.link_future(chunk_future) chunk = await chunk_future + # This is to address a bug in AzureOpenAIChatCompletionClient. OpenAIChatCompletionClient works fine. + # https://github.com/microsoft/autogen/issues/4213 + if len(chunk.choices) == 0: + empty_chunk_count += 1 + if max_consecutive_empty_chunk_tolerance == 0: + raise ValueError( + "Consecutive empty chunks found. Change max_empty_consecutive_chunk_tolerance to increase empty chunk tolerance" + ) + elif empty_chunk_count >= max_consecutive_empty_chunk_tolerance: + raise ValueError("Exceeded the threshold of receiving consecutive empty chunks") + continue + else: + empty_chunk_count = 0 + # to process usage chunk in streaming situations # add stream_options={"include_usage": True} in the initialization of OpenAIChatCompletionClient(...) # However the different api's From b2ae4d1203b4dc095ab65df79b56b51b3dd8fd55 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Mon, 25 Nov 2024 12:34:52 -0500 Subject: [PATCH 21/33] Add warnings for deprecated azure oai config changes (#4317) * Add warnings for deprecated azure oai config changes * Update docs and usages, simplify capabilities --- .../tutorial/models.ipynb | 8 +--- .../cookbook/azure-openai-with-aad-auth.md | 8 +--- .../cookbook/structured-output-agent.ipynb | 10 ++--- .../framework/model-clients.ipynb | 10 ++--- .../models/_openai/_openai_client.py | 38 ++++++++++++------- .../models/_openai/config/__init__.py | 6 +-- .../tests/models/test_openai_model_client.py | 1 + 7 files changed, 37 insertions(+), 44 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb index 2adc862e541b..5251e17c098f 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb @@ -137,16 +137,12 @@ "token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n", "\n", "az_model_client = AzureOpenAIChatCompletionClient(\n", - " model=\"{your-azure-deployment}\",\n", + " azure_deployment=\"{your-azure-deployment}\",\n", + " model=\"{model-name, such as gpt-4o}\",\n", " api_version=\"2024-06-01\",\n", " azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n", " azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n", " # api_key=\"sk-...\", # For key-based authentication.\n", - " model_capabilities={\n", - " \"vision\": True,\n", - " \"function_calling\": True,\n", - " \"json_output\": True,\n", - " },\n", ")" ] }, diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md index c8e4b632bd03..b347ed7de251 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md @@ -24,15 +24,11 @@ token_provider = get_bearer_token_provider( ) client = AzureOpenAIChatCompletionClient( - model="{your-azure-deployment}", + azure_deployment="{your-azure-deployment}", + model="{model-name, such as gpt-4o}", api_version="2024-02-01", azure_endpoint="https://{your-custom-endpoint}.openai.azure.com/", azure_ad_token_provider=token_provider, - model_capabilities={ - "vision":True, - "function_calling":True, - "json_output":True, - } ) ``` diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb index fa50d6da2797..95edbfa0c257 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb @@ -57,7 +57,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -79,15 +79,11 @@ "\n", "# Create the client with type-checked environment variables\n", "client = AzureOpenAIChatCompletionClient(\n", - " model=get_env_variable(\"AZURE_OPENAI_DEPLOYMENT_NAME\"),\n", + " azure_deployment=get_env_variable(\"AZURE_OPENAI_DEPLOYMENT_NAME\"),\n", + " model=get_env_variable(\"AZURE_OPENAI_MODEL\"),\n", " api_version=get_env_variable(\"AZURE_OPENAI_API_VERSION\"),\n", " azure_endpoint=get_env_variable(\"AZURE_OPENAI_ENDPOINT\"),\n", " api_key=get_env_variable(\"AZURE_OPENAI_API_KEY\"),\n", - " model_capabilities={\n", - " \"vision\": False,\n", - " \"function_calling\": True,\n", - " \"json_output\": True,\n", - " },\n", ")" ] }, diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb index cb17886b964a..e074eb970e0e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb @@ -283,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -294,16 +294,12 @@ "token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n", "\n", "az_model_client = AzureOpenAIChatCompletionClient(\n", - " model=\"{your-azure-deployment}\",\n", + " azure_deployment=\"{your-azure-deployment}\",\n", + " model=\"{model-name, such as gpt-4o}\",\n", " api_version=\"2024-06-01\",\n", " azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n", " azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n", " # api_key=\"sk-...\", # For key-based authentication.\n", - " model_capabilities={\n", - " \"vision\": True,\n", - " \"function_calling\": True,\n", - " \"json_output\": True,\n", - " },\n", ")" ] }, diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py index 999e5083f665..28dc81a338ee 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py @@ -86,10 +86,29 @@ def _azure_openai_client_from_config(config: Mapping[str, Any]) -> AsyncAzureOpe # Take a copy copied_config = dict(config).copy() + import warnings + + if "azure_deployment" not in copied_config and "model" in copied_config: + warnings.warn( + "Previous behavior of using the model name as the deployment name is deprecated and will be removed in 0.4. Please specify azure_deployment", + stacklevel=2, + ) + + if "azure_endpoint" not in copied_config and "base_url" in copied_config: + warnings.warn( + "Previous behavior of using the base_url as the endpoint is deprecated and will be removed in 0.4. Please specify azure_endpoint", + stacklevel=2, + ) + # Do some fixups copied_config["azure_deployment"] = copied_config.get("azure_deployment", config.get("model")) if copied_config["azure_deployment"] is not None: - copied_config["azure_deployment"] = copied_config["azure_deployment"].replace(".", "") + if "." in copied_config["azure_deployment"]: + warnings.warn( + "Previous behavior stripping '.' from the deployment name is deprecated and will be removed in 0.4", + stacklevel=2, + ) + copied_config["azure_deployment"] = copied_config["azure_deployment"].replace(".", "") copied_config["azure_endpoint"] = copied_config.get("azure_endpoint", copied_config.pop("base_url", None)) # Shave down the config to just the AzureOpenAIChatCompletionClient kwargs @@ -331,9 +350,7 @@ def __init__( model_capabilities: Optional[ModelCapabilities] = None, ): self._client = client - if model_capabilities is None and isinstance(client, AsyncAzureOpenAI): - raise ValueError("AzureOpenAIChatCompletionClient requires explicit model capabilities") - elif model_capabilities is None: + if model_capabilities is None: self._model_capabilities = _model_info.get_capabilities(create_args["model"]) else: self._model_capabilities = model_capabilities @@ -963,7 +980,7 @@ class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): api_version (str): The API version to use. **Required for Azure models.** azure_ad_token (str): The Azure AD token to use. Provide this or `azure_ad_token_provider` for token-based authentication. azure_ad_token_provider (Callable[[], Awaitable[str]]): The Azure AD token provider to use. Provide this or `azure_ad_token` for token-based authentication. - model_capabilities (ModelCapabilities): The capabilities of the model. **Required for Azure models.** + model_capabilities (ModelCapabilities): The capabilities of the model if default resolved values are not correct. api_key (optional, str): The API key to use, use this if you are using key based authentication. It is optional if you are using Azure AD token based authentication or `AZURE_OPENAI_API_KEY` environment variable. timeout (optional, int): The timeout for the request in seconds. max_retries (optional, int): The maximum number of retries to attempt. @@ -990,16 +1007,12 @@ class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") az_model_client = AzureOpenAIChatCompletionClient( - model="{your-azure-deployment}", + azure_deployment="{your-azure-deployment}", + model="{deployed-model, such as 'gpt-4o'}", api_version="2024-06-01", azure_endpoint="https://{your-custom-endpoint}.openai.azure.com/", azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication. # api_key="sk-...", # For key-based authentication. `AZURE_OPENAI_API_KEY` environment variable can also be used instead. - model_capabilities={ - "vision": True, - "function_calling": True, - "json_output": True, - }, ) See `here `_ for how to use the Azure client directly or for more info. @@ -1007,9 +1020,6 @@ class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): """ def __init__(self, **kwargs: Unpack[AzureOpenAIClientConfiguration]): - if "model" not in kwargs: - raise ValueError("model is required for OpenAIChatCompletionClient") - model_capabilities: Optional[ModelCapabilities] = None copied_args = dict(kwargs).copy() if "model_capabilities" in kwargs: diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py index 53abfcc58796..8afff868293e 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py @@ -30,14 +30,14 @@ class BaseOpenAIClientConfiguration(CreateArguments, total=False): api_key: str timeout: Union[float, None] max_retries: int + model_capabilities: ModelCapabilities + """What functionality the model supports, determined by default from model name but is overriden if value passed.""" # See OpenAI docs for explanation of these parameters class OpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False): organization: str base_url: str - # Not required - model_capabilities: ModelCapabilities class AzureOpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False): @@ -47,8 +47,6 @@ class AzureOpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False) api_version: Required[str] azure_ad_token: str azure_ad_token_provider: AsyncAzureADTokenProvider - # Must be provided - model_capabilities: Required[ModelCapabilities] __all__ = ["AzureOpenAIClientConfiguration", "OpenAIClientConfiguration"] diff --git a/python/packages/autogen-ext/tests/models/test_openai_model_client.py b/python/packages/autogen-ext/tests/models/test_openai_model_client.py index cee3be5835b7..b2dc504abd0e 100644 --- a/python/packages/autogen-ext/tests/models/test_openai_model_client.py +++ b/python/packages/autogen-ext/tests/models/test_openai_model_client.py @@ -141,6 +141,7 @@ async def test_openai_chat_completion_client() -> None: @pytest.mark.asyncio async def test_azure_openai_chat_completion_client() -> None: client = AzureOpenAIChatCompletionClient( + azure_deployment="gpt-4o-1", model="gpt-4o", api_key="api_key", api_version="2020-08-04", From 1b2d42d4205a08ab9eba0ea46a7e996f14aa3000 Mon Sep 17 00:00:00 2001 From: Gerardo Moreno Date: Mon, 25 Nov 2024 10:09:06 -0800 Subject: [PATCH 22/33] Termination Conditions Tutorial (#4334) (#4339) * Termination Conditions Tutorial (#4334) * Fix format * Update python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb * Update python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb * Update python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb * update docs * update doc --------- Co-authored-by: Eric Zhu --- .../src/autogen_agentchat/task/__init__.py | 2 +- .../autogen_agentchat/task/_terminations.py | 2 +- .../tests/test_termination_condition.py | 2 +- .../tutorial/termination.ipynb | 271 ++++++++++++------ 4 files changed, 189 insertions(+), 88 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py index e1e6766338d3..d863c87cd13a 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py @@ -3,11 +3,11 @@ ExternalTermination, HandoffTermination, MaxMessageTermination, + SourceMatchTermination, StopMessageTermination, TextMentionTermination, TimeoutTermination, TokenUsageTermination, - SourceMatchTermination, ) __all__ = [ diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py index 81cb5cca7d6c..0053ff9821bd 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py @@ -1,5 +1,5 @@ import time -from typing import Sequence, List +from typing import List, Sequence from ..base import TerminatedException, TerminationCondition from ..messages import AgentMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage diff --git a/python/packages/autogen-agentchat/tests/test_termination_condition.py b/python/packages/autogen-agentchat/tests/test_termination_condition.py index f4aa5d2a7203..ec6ff43e00ce 100644 --- a/python/packages/autogen-agentchat/tests/test_termination_condition.py +++ b/python/packages/autogen-agentchat/tests/test_termination_condition.py @@ -7,11 +7,11 @@ ExternalTermination, HandoffTermination, MaxMessageTermination, + SourceMatchTermination, StopMessageTermination, TextMentionTermination, TimeoutTermination, TokenUsageTermination, - SourceMatchTermination, ) from autogen_core.components.models import RequestUsage diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb index 4a1cfe42cf6c..67c7582a311e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb @@ -6,56 +6,77 @@ "source": [ "# Termination \n", "\n", - "\n", - "In the previous section, we explored how to define agents, and organize them into teams that can solve tasks by communicating (a conversation). However, conversations can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n", + "In the previous section, we explored how to define agents, and organize them into teams that can solve tasks. However, a run can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n", "\n", "AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n", "\n", - "A termination condition is a callable that takes a sequence of ChatMessage objects since the last time the condition was called, and returns a StopMessage if the conversation should be terminated, or None otherwise. Once a termination condition has been reached, it must be reset before it can be used again.\n", + "A termination condition is a callable that takes a sequece of {py:class}`~autogen_agentchat.messages.AgentMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n", + "Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n", "\n", "Some important things to note about termination conditions: \n", - "- They are stateful, and must be reset before they can be used again. \n", - "- They can be combined using the AND and OR operators. \n", - "- They are implemented/enforced by the team, and not by the agents. An agent may signal or request termination e.g., by sending a StopMessage, but the team is responsible for enforcing it.\n" + "- They are stateful but reset automatically after each run ({py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`) is finished.\n", + "- They can be combined using the AND and OR operators.\n", + "\n", + "```{note}\n", + "For group chat teams (i.e., {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`),\n", + "the termination condition is called after each agent responds.\n", + "While a response may contain multiple inner messages, the team calls its termination condition just once for all the messages from a single response.\n", + "So the condition is called with the \"delta sequence\" of messages since the last time it was called.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# AutoGen provides several built-in termination conditions: \n", + "1. {py:class}`~autogen_agentchat.task.MaxMessageTermination`: Stops after a specified number of messages have been produced, including both agent and task messages.\n", + "2. {py:class}`~autogen_agentchat.task.TextMentionTermination`: Stops when specific text or string is mentioned in a message (e.g., \"TERMINATE\").\n", + "3. {py:class}`~autogen_agentchat.task.TokenUsageTermination`: Stops when a certain number of prompt or completion tokens are used. This requires the agents to report token usage in their messages.\n", + "4. {py:class}`~autogen_agentchat.task.TimeoutTermination`: Stops after a specified duration in seconds.\n", + "5. {py:class}`~autogen_agentchat.task.HandoffTermination`: Stops when a handoff to a specific target is requested. Handoff messages can be used to build patterns such as {py:class}`~autogen_agentchat.teams.Swarm`. This is useful when you want to pause the run and allow application or user to provide input when an agent hands off to them.\n", + "6. {py:class}`~autogen_agentchat.task.SourceMatchTermination`: Stops after a specific agent responds.\n", + "7. {py:class}`~autogen_agentchat.task.ExternalTermination`: Enables programmatic control of termination from outside the run. This is useful for UI integration (e.g., \"Stop\" buttons in chat interfaces).\n", + "8. {py:class}`~autogen_agentchat.task.StopMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.StopMessage` is produced by an agent." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To begin, let us define a simple team with only one agent and then explore how multiple termination conditions can be applied to guide the resulting behavior." + "To demonstrate the characteristics of termination conditions, we'll create a team consisting of two agents: a primary agent responsible for text generation and a critic agent that reviews and provides feedback on the generated text." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ - "import logging\n", - "\n", - "from autogen_agentchat import EVENT_LOGGER_NAME\n", - "from autogen_agentchat.agents import CodingAssistantAgent\n", - "from autogen_agentchat.logging import ConsoleLogHandler\n", - "from autogen_agentchat.task import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.task import Console, MaxMessageTermination, TextMentionTermination\n", "from autogen_agentchat.teams import RoundRobinGroupChat\n", "from autogen_ext.models import OpenAIChatCompletionClient\n", "\n", - "logger = logging.getLogger(EVENT_LOGGER_NAME)\n", - "logger.addHandler(ConsoleLogHandler())\n", - "logger.setLevel(logging.INFO)\n", - "\n", - "\n", "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o-2024-08-06\",\n", + " model=\"gpt-4o\",\n", " temperature=1,\n", " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", ")\n", "\n", - "writing_assistant_agent = CodingAssistantAgent(\n", - " name=\"writing_assistant_agent\",\n", - " system_message=\"You are a helpful assistant that solve tasks by generating text responses and code.\",\n", + "# Create the primary agent.\n", + "primary_agent = AssistantAgent(\n", + " \"primary\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + ")\n", + "\n", + "# Create the critic agent.\n", + "critic_agent = AssistantAgent(\n", + " \"critic\",\n", " model_client=model_client,\n", + " system_message=\"Provide constructive feedback for every message. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", ")" ] }, @@ -63,9 +84,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## MaxMessageTermination \n", - "\n", - "The simplest termination condition is the {py:class}`~autogen_agentchat.teams.MaxMessageTermination` condition, which terminates the conversation after a fixed number of messages. \n" + "Let's explore how termination conditions automatically reset after each `run` or `run_stream` call, allowing the team to resume its conversation from where it left off." ] }, { @@ -77,62 +96,117 @@ "name": "stdout", "output_type": "stream", "text": [ - "\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:28.807176]:\u001b[0m\n", - "\n", + "---------- user ----------\n", "Write a unique, Haiku about the weather in Paris\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:29.604935], writing_assistant_agent:\u001b[0m\n", - "\n", + "---------- primary ----------\n", "Gentle rain whispers, \n", - "Eiffel veiled in mist’s embrace, \n", - "Spring’s soft sigh in France.\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:30.168531], writing_assistant_agent:\u001b[0m\n", + "Cobblestones glisten softly— \n", + "Paris dreams in gray.\n", + "[Prompt tokens: 30, Completion tokens: 19]\n", + "---------- critic ----------\n", + "The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\n", "\n", - "Gentle rain whispers, \n", - "Eiffel veiled in mist’s embrace, \n", - "Spring’s soft sigh in France.\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:31.213291], writing_assistant_agent:\u001b[0m\n", - "\n", - "Gentle rain whispers, \n", - "Eiffel veiled in mist’s embrace, \n", - "Spring’s soft sigh in France.\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:31.213655], Termination:\u001b[0m\n", + "For example:\n", + "Soft rain whispers down, \n", + "Cobblestones glisten softly — \n", + "Paris dreams in gray.\n", "\n", - "Maximal number of messages 3 reached, current message count: 3" + "This revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\n", + "[Prompt tokens: 70, Completion tokens: 120]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", + "Total prompt tokens: 100\n", + "Total completion tokens: 139\n", + "Duration: 3.34 seconds\n" ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=19), content='Gentle rain whispers, \\nCobblestones glisten softly— \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=120), content=\"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\\n\\nFor example:\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.\\n\\nThis revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ "max_msg_termination = MaxMessageTermination(max_messages=3)\n", - "round_robin_team = RoundRobinGroupChat([writing_assistant_agent], termination_condition=max_msg_termination)\n", - "round_robin_team_result = await round_robin_team.run(task=\"Write a unique, Haiku about the weather in Paris\")" + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=max_msg_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We see that the conversation is terminated after the specified number of messages have been sent by the agent." + "The conversation stopped after reaching the maximum message limit. Since the primary agent didn't get to respond to the feedback, let's continue the conversation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- primary ----------\n", + "Thank you for your feedback. Here is the revised Haiku:\n", + "\n", + "Soft rain whispers down, \n", + "Cobblestones glisten softly — \n", + "Paris dreams in gray.\n", + "[Prompt tokens: 181, Completion tokens: 32]\n", + "---------- critic ----------\n", + "The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \n", + "\n", + "APPROVE\n", + "[Prompt tokens: 234, Completion tokens: 54]\n", + "---------- primary ----------\n", + "Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\n", + "[Prompt tokens: 279, Completion tokens: 39]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", + "Total prompt tokens: 694\n", + "Total completion tokens: 125\n", + "Duration: 6.43 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=181, completion_tokens=32), content='Thank you for your feedback. Here is the revised Haiku:\\n\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=234, completion_tokens=54), content='The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \\n\\nAPPROVE'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=39), content=\"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## StopMessageTermination\n", - "\n", - "In this scenario, the team terminates the conversation if any agent sends a `StopMessage`. So, when does an agent send a `StopMessage`? Typically, this is implemented in the `on_message` method of the agent, where the agent can check the incoming message and decide to send a `StopMessage` based on some condition. \n", - "\n", - "A common pattern here is prompt the agent (or some agent participating in the conversation) to emit a specific text string in it's response, which can be used to trigger the termination condition. \n", - "\n", - "In fact, if you review the code implementation for the default `CodingAssistantAgent` class provided by AgentChat, you will observe two things\n", - "- The default `system_message` instructs the agent to end their response with the word \"terminate\" if they deem the task to be completed\n", - "- in the `on_message` method, the agent checks if the incoming message contains the text \"terminate\" and returns a `StopMessage` if it does. " + "The team continued from where it left off, allowing the primary agent to respond to the feedback." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's show how termination conditions can be combined using the AND (`&`) and OR (`|`) operators to create more complex termination logic. For example, we'll create a team that stops either after 10 messages are generated or when the critic agent approves a message.\n" ] }, { @@ -144,37 +218,64 @@ "name": "stdout", "output_type": "stream", "text": [ - "\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:31.218855]:\u001b[0m\n", - "\n", + "---------- user ----------\n", "Write a unique, Haiku about the weather in Paris\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:31.752676], writing_assistant_agent:\u001b[0m\n", - "\n", - "Mist hugs the Eiffel, \n", - "Soft rain kisses cobblestones, \n", - "Autumn whispers past. \n", - "\n", - "TERMINATE\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:31.753265], Termination:\u001b[0m\n", + "---------- primary ----------\n", + "Spring breeze gently hums, \n", + "Cherry blossoms in full bloom— \n", + "Paris wakes to life.\n", + "[Prompt tokens: 467, Completion tokens: 19]\n", + "---------- critic ----------\n", + "The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\n", "\n", - "Stop message received" + "APPROVE\n", + "[Prompt tokens: 746, Completion tokens: 93]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Text 'APPROVE' mentioned\n", + "Total prompt tokens: 1213\n", + "Total completion tokens: 112\n", + "Duration: 2.75 seconds\n" ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=467, completion_tokens=19), content='Spring breeze gently hums, \\nCherry blossoms in full bloom— \\nParis wakes to life.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=746, completion_tokens=93), content='The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\\n\\nAPPROVE')], stop_reason=\"Text 'APPROVE' mentioned\")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ - "writing_assistant_agent = CodingAssistantAgent(\n", - " name=\"writing_assistant_agent\",\n", - " system_message=\"You are a helpful assistant that solve tasks by generating text responses and code. Respond with TERMINATE when the task is done.\",\n", - " model_client=model_client,\n", - ")\n", + "max_msg_termination = MaxMessageTermination(max_messages=10)\n", + "text_termination = TextMentionTermination(\"APPROVE\")\n", + "combined_termination = max_msg_termination | text_termination\n", "\n", - "text_termination = TextMentionTermination(\"TERMINATE\")\n", - "round_robin_team = RoundRobinGroupChat([writing_assistant_agent], termination_condition=text_termination)\n", + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=combined_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The conversation stopped after the critic agent approved the message, although it could have also stopped if 10 messages were generated.\n", "\n", - "round_robin_team_result = await round_robin_team.run(task=\"Write a unique, Haiku about the weather in Paris\")" + "Alternatively, if we want to stop the run only when both conditions are met, we can use the AND (`&`) operator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "combined_termination = max_msg_termination & text_termination" ] } ], @@ -194,7 +295,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.6" + "version": "3.11.5" } }, "nbformat": 4, From 9209b2ffc172d07aa6855994ba4ef20720c15919 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Mon, 25 Nov 2024 13:17:24 -0500 Subject: [PATCH 23/33] Remove duplicate model info (#4346) Co-authored-by: Eric Zhu --- .../components/models/_model_info.py | 122 ------------------ 1 file changed, 122 deletions(-) delete mode 100644 python/packages/autogen-core/src/autogen_core/components/models/_model_info.py diff --git a/python/packages/autogen-core/src/autogen_core/components/models/_model_info.py b/python/packages/autogen-core/src/autogen_core/components/models/_model_info.py deleted file mode 100644 index 2440d5b18682..000000000000 --- a/python/packages/autogen-core/src/autogen_core/components/models/_model_info.py +++ /dev/null @@ -1,122 +0,0 @@ -from typing import Dict - -from ._model_client import ModelCapabilities - -# Based on: https://platform.openai.com/docs/models/continuous-model-upgrades -# This is a moving target, so correctness is checked by the model value returned by openai against expected values at runtime`` -_MODEL_POINTERS = { - "gpt-4o": "gpt-4o-2024-08-06", - "gpt-4o-mini": "gpt-4o-mini-2024-07-18", - "gpt-4-turbo": "gpt-4-turbo-2024-04-09", - "gpt-4-turbo-preview": "gpt-4-0125-preview", - "gpt-4": "gpt-4-0613", - "gpt-4-32k": "gpt-4-32k-0613", - "gpt-3.5-turbo": "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613", -} - -_MODEL_CAPABILITIES: Dict[str, ModelCapabilities] = { - "gpt-4o-2024-08-06": { - "vision": True, - "function_calling": True, - "json_output": True, - }, - "gpt-4o-2024-05-13": { - "vision": True, - "function_calling": True, - "json_output": True, - }, - "gpt-4o-mini-2024-07-18": { - "vision": True, - "function_calling": True, - "json_output": True, - }, - "gpt-4-turbo-2024-04-09": { - "vision": True, - "function_calling": True, - "json_output": True, - }, - "gpt-4-0125-preview": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-4-1106-preview": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-4-1106-vision-preview": { - "vision": True, - "function_calling": False, - "json_output": False, - }, - "gpt-4-0613": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-4-32k-0613": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-3.5-turbo-0125": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-3.5-turbo-1106": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-3.5-turbo-instruct": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-3.5-turbo-0613": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-3.5-turbo-16k-0613": { - "vision": False, - "function_calling": True, - "json_output": True, - }, -} - -_MODEL_TOKEN_LIMITS: Dict[str, int] = { - "gpt-4o-2024-08-06": 128000, - "gpt-4o-2024-05-13": 128000, - "gpt-4o-mini-2024-07-18": 128000, - "gpt-4-turbo-2024-04-09": 128000, - "gpt-4-0125-preview": 128000, - "gpt-4-1106-preview": 128000, - "gpt-4-1106-vision-preview": 128000, - "gpt-4-0613": 8192, - "gpt-4-32k-0613": 32768, - "gpt-3.5-turbo-0125": 16385, - "gpt-3.5-turbo-1106": 16385, - "gpt-3.5-turbo-instruct": 4096, - "gpt-3.5-turbo-0613": 4096, - "gpt-3.5-turbo-16k-0613": 16385, -} - - -def resolve_model(model: str) -> str: - if model in _MODEL_POINTERS: - return _MODEL_POINTERS[model] - return model - - -def get_capabilities(model: str) -> ModelCapabilities: - resolved_model = resolve_model(model) - return _MODEL_CAPABILITIES[resolved_model] - - -def get_token_limit(model: str) -> int: - resolved_model = resolve_model(model) - return _MODEL_TOKEN_LIMITS[resolved_model] From 6bdd9c83b0adf5b48ac2de40b3f5a6d93d27fb78 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Mon, 25 Nov 2024 13:19:57 -0500 Subject: [PATCH 24/33] Add note for min python version (#4347) Co-authored-by: Eric Zhu --- .../docs/src/user-guide/agentchat-user-guide/installation.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md index ba3826fe2c37..b1dbca5192b7 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md @@ -64,6 +64,10 @@ Install the `autogen-agentchat` package using pip: pip install 'autogen-agentchat==0.4.0.dev6' ``` +```{note} +Python 3.10 or later is required. +``` + ## Install OpenAI for Model Client To use the OpenAI and Azure OpenAI models, you need to install the following From 9b967fc79a3abc262213dd950f7c9ed9ee10c0b7 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Mon, 25 Nov 2024 15:08:36 -0500 Subject: [PATCH 25/33] Update pyright and mypy (#4348) * Update pyright and mypy * fmt --- .../src/autogen_core/base/_serialization.py | 2 +- .../packages/autogen-core/tests/test_tools.py | 14 +++--- python/pyproject.toml | 4 +- python/uv.lock | 47 ++++++++++--------- 4 files changed, 35 insertions(+), 32 deletions(-) diff --git a/python/packages/autogen-core/src/autogen_core/base/_serialization.py b/python/packages/autogen-core/src/autogen_core/base/_serialization.py index 5c8e8cc772b0..51fd531feac5 100644 --- a/python/packages/autogen-core/src/autogen_core/base/_serialization.py +++ b/python/packages/autogen-core/src/autogen_core/base/_serialization.py @@ -195,7 +195,7 @@ def try_get_known_serializers_for_type(cls: type[Any]) -> list[MessageSerializer serializers: List[MessageSerializer[Any]] = [] if issubclass(cls, BaseModel): serializers.append(PydanticJsonMessageSerializer(cls)) - elif isinstance(cls, IsDataclass): + elif is_dataclass(cls): serializers.append(DataclassJsonMessageSerializer(cls)) elif issubclass(cls, Message): serializers.append(ProtobufMessageSerializer(cls)) diff --git a/python/packages/autogen-core/tests/test_tools.py b/python/packages/autogen-core/tests/test_tools.py index 27a89748c659..6632a7931e11 100644 --- a/python/packages/autogen-core/tests/test_tools.py +++ b/python/packages/autogen-core/tests/test_tools.py @@ -2,10 +2,12 @@ from typing import Annotated, List import pytest + from autogen_core.base import CancellationToken from autogen_core.components._function_utils import get_typed_signature from autogen_core.components.tools import BaseTool, FunctionTool from autogen_core.components.tools._base import ToolSchema + from pydantic import BaseModel, Field, model_serializer from pydantic_core import PydanticUndefined @@ -141,7 +143,7 @@ def my_function() -> str: sig = get_typed_signature(my_function) assert isinstance(sig, inspect.Signature) assert len(sig.parameters) == 0 - assert sig.return_annotation == str + assert sig.return_annotation is str def test_get_typed_signature_annotated() -> None: @@ -161,7 +163,7 @@ def my_function() -> "str": sig = get_typed_signature(my_function) assert isinstance(sig, inspect.Signature) assert len(sig.parameters) == 0 - assert sig.return_annotation == str + assert sig.return_annotation is str def test_func_tool() -> None: @@ -186,11 +188,11 @@ def my_function(my_arg: Annotated[str, "test description"]) -> str: assert issubclass(tool.args_type(), BaseModel) assert issubclass(tool.return_type(), str) assert tool.args_type().model_fields["my_arg"].description == "test description" - assert tool.args_type().model_fields["my_arg"].annotation == str + assert tool.args_type().model_fields["my_arg"].annotation is str assert tool.args_type().model_fields["my_arg"].is_required() is True assert tool.args_type().model_fields["my_arg"].default is PydanticUndefined assert len(tool.args_type().model_fields) == 1 - assert tool.return_type() == str + assert tool.return_type() is str assert tool.state_type() is None @@ -202,7 +204,7 @@ def my_function() -> Annotated[str, "test description"]: assert tool.name == "my_function" assert tool.description == "Function tool." assert issubclass(tool.args_type(), BaseModel) - assert tool.return_type() == str + assert tool.return_type() is str assert tool.state_type() is None @@ -215,7 +217,7 @@ def my_function() -> str: assert tool.description == "Function tool." assert issubclass(tool.args_type(), BaseModel) assert len(tool.args_type().model_fields) == 0 - assert tool.return_type() == str + assert tool.return_type() is str assert tool.state_type() is None diff --git a/python/pyproject.toml b/python/pyproject.toml index 83535c6f5402..3b099db535e8 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -3,8 +3,8 @@ members = ["packages/*"] [tool.uv] dev-dependencies = [ - "pyright==1.1.378", - "mypy==1.10.0", + "pyright==1.1.389", + "mypy==1.13.0", "ruff==0.4.8", "pytest", "pytest-asyncio", diff --git a/python/uv.lock b/python/uv.lock index 2a73eb34fb26..55af39091ee2 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -22,12 +22,12 @@ members = [ requirements = [ { name = "cookiecutter" }, { name = "grpcio-tools", specifier = "~=1.62.0" }, - { name = "mypy", specifier = "==1.10.0" }, + { name = "mypy", specifier = "==1.13.0" }, { name = "mypy-protobuf" }, { name = "packaging" }, { name = "poethepoet" }, { name = "polars" }, - { name = "pyright", specifier = "==1.1.378" }, + { name = "pyright", specifier = "==1.1.389" }, { name = "pytest" }, { name = "pytest-asyncio" }, { name = "pytest-mock" }, @@ -2411,31 +2411,31 @@ wheels = [ [[package]] name = "mypy" -version = "1.10.0" +version = "1.13.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c3/b6/297734bb9f20ddf5e831cf4a83f422ddef5a29a33463999f0959d9cdc2df/mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131", size = 3022145 } +sdist = { url = "https://files.pythonhosted.org/packages/e8/21/7e9e523537991d145ab8a0a2fd98548d67646dc2aaaf6091c31ad883e7c1/mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e", size = 3152532 } wheels = [ - { url = "https://files.pythonhosted.org/packages/69/82/2081dbfbbf1071e1370e57f9e327adeda060113688ec0d6bf7bbf4d7a5ad/mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2", size = 10819193 }, - { url = "https://files.pythonhosted.org/packages/e8/1b/b7c9caa89955a7d9c89eac79f31550f48f2c8233b5e12fe48ef55cd2e953/mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99", size = 9970689 }, - { url = "https://files.pythonhosted.org/packages/15/ae/03d3f767f1ca5576970720ea551b43b79254d12998484d8f3e63fc07561e/mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2", size = 12728098 }, - { url = "https://files.pythonhosted.org/packages/96/ba/8f5db8bd94c18d86033d09bbe634d471c1e9d7014cc621585973183ad1d0/mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9", size = 12798838 }, - { url = "https://files.pythonhosted.org/packages/0e/ad/d476f1055deea6e63a91e065ba046a7ee494705574c4f9730de439172810/mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051", size = 9365995 }, - { url = "https://files.pythonhosted.org/packages/86/ec/64ffed9ea554845ff846bd1f6fc7b07ab507be1d2e1b0d58790d7ac2ca4c/mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1", size = 10739848 }, - { url = "https://files.pythonhosted.org/packages/03/ac/f4fcb9d7a349953be5f4e78157a48b5110343a0e5228f77b3f7d1a1b8479/mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee", size = 9902362 }, - { url = "https://files.pythonhosted.org/packages/7e/36/ca2b82d89828f484f1a068d9e25c08840c4cc6f6549e7ea755f4391e351f/mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de", size = 12603712 }, - { url = "https://files.pythonhosted.org/packages/b5/7a/54edb45a41de3bc66e5c3d2b7512a392b3f0f8b9c3d9465b9a2456b6a115/mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7", size = 12676904 }, - { url = "https://files.pythonhosted.org/packages/39/a5/e5aad5567ace09fcb179fbc3047cc2a6173743d84447b1ff71413e1a9881/mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53", size = 9355997 }, - { url = "https://files.pythonhosted.org/packages/30/30/6da95275426cfd21fc0c2e96d85a45d35fc4f7d37bd3286fa49f8f465447/mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b", size = 10867123 }, - { url = "https://files.pythonhosted.org/packages/8c/d3/61cf1fae3b79d264f9f27de97e6e8fab8a37c85fdada5a46b6de333319f8/mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30", size = 9859921 }, - { url = "https://files.pythonhosted.org/packages/08/5d/a46e5222bd69a873a896ab4f0b5948979e03dce46c7712ccaa5204ca8d02/mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e", size = 12647776 }, - { url = "https://files.pythonhosted.org/packages/1d/6a/d8df60f2e48291f1a790ded56fd96421ac6a992f33c2571c0bdf0552d83a/mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5", size = 12726191 }, - { url = "https://files.pythonhosted.org/packages/5a/93/9a015720bcf484d4202ea7fc5960c328c82d5eb1578950d586339ec15084/mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda", size = 9450377 }, - { url = "https://files.pythonhosted.org/packages/e9/39/0148f7ee1b7f3a86d378a23b88cb85c432f83914ceb60364efa1769c598f/mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee", size = 2580084 }, + { url = "https://files.pythonhosted.org/packages/5e/8c/206de95a27722b5b5a8c85ba3100467bd86299d92a4f71c6b9aa448bfa2f/mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a", size = 11020731 }, + { url = "https://files.pythonhosted.org/packages/ab/bb/b31695a29eea76b1569fd28b4ab141a1adc9842edde080d1e8e1776862c7/mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80", size = 10184276 }, + { url = "https://files.pythonhosted.org/packages/a5/2d/4a23849729bb27934a0e079c9c1aad912167d875c7b070382a408d459651/mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7", size = 12587706 }, + { url = "https://files.pythonhosted.org/packages/5c/c3/d318e38ada50255e22e23353a469c791379825240e71b0ad03e76ca07ae6/mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f", size = 13105586 }, + { url = "https://files.pythonhosted.org/packages/4a/25/3918bc64952370c3dbdbd8c82c363804678127815febd2925b7273d9482c/mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372", size = 9632318 }, + { url = "https://files.pythonhosted.org/packages/d0/19/de0822609e5b93d02579075248c7aa6ceaddcea92f00bf4ea8e4c22e3598/mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d", size = 10939027 }, + { url = "https://files.pythonhosted.org/packages/c8/71/6950fcc6ca84179137e4cbf7cf41e6b68b4a339a1f5d3e954f8c34e02d66/mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d", size = 10108699 }, + { url = "https://files.pythonhosted.org/packages/26/50/29d3e7dd166e74dc13d46050b23f7d6d7533acf48f5217663a3719db024e/mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b", size = 12506263 }, + { url = "https://files.pythonhosted.org/packages/3f/1d/676e76f07f7d5ddcd4227af3938a9c9640f293b7d8a44dd4ff41d4db25c1/mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73", size = 12984688 }, + { url = "https://files.pythonhosted.org/packages/9c/03/5a85a30ae5407b1d28fab51bd3e2103e52ad0918d1e68f02a7778669a307/mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca", size = 9626811 }, + { url = "https://files.pythonhosted.org/packages/fb/31/c526a7bd2e5c710ae47717c7a5f53f616db6d9097caf48ad650581e81748/mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5", size = 11077900 }, + { url = "https://files.pythonhosted.org/packages/83/67/b7419c6b503679d10bd26fc67529bc6a1f7a5f220bbb9f292dc10d33352f/mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e", size = 10074818 }, + { url = "https://files.pythonhosted.org/packages/ba/07/37d67048786ae84e6612575e173d713c9a05d0ae495dde1e68d972207d98/mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2", size = 12589275 }, + { url = "https://files.pythonhosted.org/packages/1f/17/b1018c6bb3e9f1ce3956722b3bf91bff86c1cefccca71cec05eae49d6d41/mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0", size = 13037783 }, + { url = "https://files.pythonhosted.org/packages/cb/32/cd540755579e54a88099aee0287086d996f5a24281a673f78a0e14dba150/mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2", size = 9726197 }, + { url = "https://files.pythonhosted.org/packages/3b/86/72ce7f57431d87a7ff17d442f521146a6585019eb8f4f31b7c02801f78ad/mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a", size = 2647043 }, ] [[package]] @@ -3314,14 +3314,15 @@ wheels = [ [[package]] name = "pyright" -version = "1.1.378" +version = "1.1.389" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nodeenv" }, + { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3d/f0/e8aa5555410d88f898bef04da2102b0a9bf144658c98d34872e91621ced2/pyright-1.1.378.tar.gz", hash = "sha256:78a043be2876d12d0af101d667e92c7734f3ebb9db71dccc2c220e7e7eb89ca2", size = 17486 } +sdist = { url = "https://files.pythonhosted.org/packages/72/4e/9a5ab8745e7606b88c2c7ca223449ac9d82a71fd5e31df47b453f2cb39a1/pyright-1.1.389.tar.gz", hash = "sha256:716bf8cc174ab8b4dcf6828c3298cac05c5ed775dda9910106a5dcfe4c7fe220", size = 21940 } wheels = [ - { url = "https://files.pythonhosted.org/packages/38/c6/f0d4bc20c13b20cecfbf13c699477c825e45767f1dc5068137323f86e495/pyright-1.1.378-py3-none-any.whl", hash = "sha256:8853776138b01bc284da07ac481235be7cc89d3176b073d2dba73636cb95be79", size = 18222 }, + { url = "https://files.pythonhosted.org/packages/1b/26/c288cabf8cfc5a27e1aa9e5029b7682c0f920b8074f45d22bf844314d66a/pyright-1.1.389-py3-none-any.whl", hash = "sha256:41e9620bba9254406dc1f621a88ceab5a88af4c826feb4f614d95691ed243a60", size = 18581 }, ] [[package]] From 8347881776da61dfe55127ddb104ea5cd460bda5 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Mon, 25 Nov 2024 15:50:52 -0500 Subject: [PATCH 26/33] Fix intervention handler none check (#4351) * Fix none check * fix func * fmt, lint --------- Co-authored-by: Eric Zhu --- .../_single_threaded_agent_runtime.py | 20 +++++++++++++ .../src/autogen_core/base/intervention.py | 30 ++++++------------- .../packages/autogen-core/tests/test_tools.py | 2 -- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py b/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py index f511bd782d87..52d24c64d0cb 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py +++ b/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py @@ -147,6 +147,23 @@ def _stop_when_idle(self) -> bool: return self._run_state == RunContext.RunState.UNTIL_IDLE and self._runtime.idle +def _warn_if_none(value: Any, handler_name: str) -> None: + """ + Utility function to check if the intervention handler returned None and issue a warning. + + Args: + value: The return value to check + handler_name: Name of the intervention handler method for the warning message + """ + if value is None: + warnings.warn( + f"Intervention handler {handler_name} returned None. This might be unintentional. " + "Consider returning the original message or DropMessage explicitly.", + RuntimeWarning, + stacklevel=2, + ) + + class SingleThreadedAgentRuntime(AgentRuntime): def __init__( self, @@ -433,6 +450,7 @@ async def process_next(self) -> None: ): try: temp_message = await handler.on_send(message, sender=sender, recipient=recipient) + _warn_if_none(temp_message, "on_send") except BaseException as e: future.set_exception(e) return @@ -456,6 +474,7 @@ async def process_next(self) -> None: ): try: temp_message = await handler.on_publish(message, sender=sender) + _warn_if_none(temp_message, "on_publish") except BaseException as e: # TODO: we should raise the intervention exception to the publisher. logger.error(f"Exception raised in in intervention handler: {e}", exc_info=True) @@ -474,6 +493,7 @@ async def process_next(self) -> None: for handler in self._intervention_handlers: try: temp_message = await handler.on_response(message, sender=sender, recipient=recipient) + _warn_if_none(temp_message, "on_response") except BaseException as e: # TODO: should we raise the exception to sender of the response instead? future.set_exception(e) diff --git a/python/packages/autogen-core/src/autogen_core/base/intervention.py b/python/packages/autogen-core/src/autogen_core/base/intervention.py index 4b06fa19f94f..3b771c931a33 100644 --- a/python/packages/autogen-core/src/autogen_core/base/intervention.py +++ b/python/packages/autogen-core/src/autogen_core/base/intervention.py @@ -1,4 +1,3 @@ -import warnings from typing import Any, Awaitable, Callable, Protocol, final from autogen_core.base import AgentId @@ -15,27 +14,15 @@ class DropMessage: ... -def _warn_if_none(value: Any, handler_name: str) -> None: - """ - Utility function to check if the intervention handler returned None and issue a warning. - - Args: - value: The return value to check - handler_name: Name of the intervention handler method for the warning message - """ - if value is None: - warnings.warn( - f"Intervention handler {handler_name} returned None. This might be unintentional. " - "Consider returning the original message or DropMessage explicitly.", - RuntimeWarning, - stacklevel=2, - ) - - InterventionFunction = Callable[[Any], Any | Awaitable[type[DropMessage]]] class InterventionHandler(Protocol): + """An intervention handler is a class that can be used to modify, log or drop messages that are being processed by the :class:`autogen_core.base.AgentRuntime`. + + Note: Returning None from any of the intervention handler methods will result in a warning being issued and treated as "no change". If you intend to drop a message, you should return :class:`DropMessage` explicitly. + """ + async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]: ... async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any | type[DropMessage]: ... async def on_response( @@ -44,14 +31,15 @@ async def on_response( class DefaultInterventionHandler(InterventionHandler): + """Simple class that provides a default implementation for all intervention + handler methods, that simply returns the message unchanged. Allows for easy + subclassing to override only the desired methods.""" + async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]: - _warn_if_none(message, "on_send") return message async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any | type[DropMessage]: - _warn_if_none(message, "on_publish") return message async def on_response(self, message: Any, *, sender: AgentId, recipient: AgentId | None) -> Any | type[DropMessage]: - _warn_if_none(message, "on_response") return message diff --git a/python/packages/autogen-core/tests/test_tools.py b/python/packages/autogen-core/tests/test_tools.py index 6632a7931e11..e7995969802f 100644 --- a/python/packages/autogen-core/tests/test_tools.py +++ b/python/packages/autogen-core/tests/test_tools.py @@ -2,12 +2,10 @@ from typing import Annotated, List import pytest - from autogen_core.base import CancellationToken from autogen_core.components._function_utils import get_typed_signature from autogen_core.components.tools import BaseTool, FunctionTool from autogen_core.components.tools._base import ToolSchema - from pydantic import BaseModel, Field, model_serializer from pydantic_core import PydanticUndefined From 6c8b656588cc3b99ff1e9d5fcf2d545068f02f2b Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Mon, 25 Nov 2024 16:07:45 -0500 Subject: [PATCH 27/33] Fix examples in docstrings (#4356) * Fix examples in docstrings * formatting * Update python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py Co-authored-by: Eric Zhu * Update python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py Co-authored-by: Eric Zhu * Update python/packages/autogen-ext/src/autogen_ext/agents/_openai_assistant_agent.py Co-authored-by: Eric Zhu * Update python/packages/autogen-ext/src/autogen_ext/agents/_openai_assistant_agent.py Co-authored-by: Eric Zhu * Update python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py Co-authored-by: Eric Zhu * Update python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py Co-authored-by: Eric Zhu * Formattinggp --------- Co-authored-by: Eric Zhu --- .../agents/_code_executor_agent.py | 7 +- .../autogen_agentchat/base/_termination.py | 6 +- .../autogen_agentchat/task/_terminations.py | 2 + .../teams/_group_chat/_selector_group_chat.py | 8 +- .../src/autogen_core/base/_agent_runtime.py | 27 ------ .../components/_default_subscription.py | 6 -- .../autogen_core/components/_routed_agent.py | 22 ++++- .../components/_type_subscription.py | 2 + .../_impl/local_commandline_code_executor.py | 32 ++++--- .../components/tools/_function_tool.py | 19 +++-- .../agents/_openai_assistant_agent.py | 50 ++++++----- .../models/_openai/_openai_client.py | 5 +- .../models/_reply_chat_completion_client.py | 83 ++++++++++++------- 13 files changed, 150 insertions(+), 119 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py index 7abe68132d9d..5dd4c9008ef5 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py @@ -24,9 +24,11 @@ class CodeExecutorAgent(BaseChatAgent): .. code-block:: python - + import asyncio from autogen_agentchat.agents import CodeExecutorAgent + from autogen_agentchat.messages import TextMessage from autogen_ext.code_executors import DockerCommandLineCodeExecutor + from autogen_core.base import CancellationToken async def run_code_executor_agent() -> None: @@ -51,8 +53,7 @@ async def run_code_executor_agent() -> None: await code_executor.stop() - # Use asyncio.run(run_code_executor_agent()) when running in a script. - await run_code_executor_agent() + asyncio.run(run_code_executor_agent()) """ diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py index 859740fa093e..c923e8ced51c 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py @@ -23,15 +23,15 @@ class TerminationCondition(ABC): .. code-block:: python import asyncio - from autogen_agentchat.teams import MaxTurnsTermination, TextMentionTermination + from autogen_agentchat.task import MaxMessageTermination, TextMentionTermination async def main() -> None: # Terminate the conversation after 10 turns or if the text "TERMINATE" is mentioned. - cond1 = MaxTurnsTermination(10) | TextMentionTermination("TERMINATE") + cond1 = MaxMessageTermination(10) | TextMentionTermination("TERMINATE") # Terminate the conversation after 10 turns and if the text "TERMINATE" is mentioned. - cond2 = MaxTurnsTermination(10) & TextMentionTermination("TERMINATE") + cond2 = MaxMessageTermination(10) & TextMentionTermination("TERMINATE") # ... diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py index 0053ff9821bd..9db5b584a31d 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py @@ -218,6 +218,8 @@ class ExternalTermination(TerminationCondition): .. code-block:: python + from autogen_agentchat.task import ExternalTermination + termination = ExternalTermination() # Run the team in an asyncio task. diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index e21e99e0f934..cfcebd3783ce 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -257,16 +257,18 @@ async def book_trip() -> str: .. code-block:: python import asyncio + from typing import Sequence from autogen_ext.models import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import SelectorGroupChat from autogen_agentchat.task import TextMentionTermination, Console + from autogen_agentchat.messages import AgentMessage async def main() -> None: model_client = OpenAIChatCompletionClient(model="gpt-4o") - def check_caculation(x: int, y: int, answer: int) -> str: + def check_calculation(x: int, y: int, answer: int) -> str: if x + y == answer: return "Correct!" else: @@ -281,12 +283,12 @@ def check_caculation(x: int, y: int, answer: int) -> str: agent2 = AssistantAgent( "Agent2", model_client, - tools=[check_caculation], + tools=[check_calculation], description="For checking calculation", system_message="Check the answer and respond with 'Correct!' or 'Incorrect!'", ) - def selector_func(messages): + def selector_func(messages: Sequence[AgentMessage]) -> str | None: if len(messages) == 1 or messages[-1].content == "Incorrect!": return "Agent1" if messages[-1].source == "Agent1": diff --git a/python/packages/autogen-core/src/autogen_core/base/_agent_runtime.py b/python/packages/autogen-core/src/autogen_core/base/_agent_runtime.py index defb1de72921..a8e7f0096324 100644 --- a/python/packages/autogen-core/src/autogen_core/base/_agent_runtime.py +++ b/python/packages/autogen-core/src/autogen_core/base/_agent_runtime.py @@ -89,19 +89,6 @@ async def register( agent_factory (Callable[[], T]): The factory that creates the agent, where T is a concrete Agent type. Inside the factory, use `autogen_core.base.AgentInstantiationContext` to access variables like the current runtime and agent ID. subscriptions (Callable[[], list[Subscription]] | list[Subscription] | None, optional): The subscriptions that the agent should be subscribed to. Defaults to None. - Example: - .. code-block:: python - - runtime.register( - "chat_agent", - lambda: ChatCompletionAgent( - description="A generic chat agent.", - system_messages=[SystemMessage("You are a helpful assistant")], - model_client=OpenAIChatCompletionClient(model="gpt-4o"), - memory=BufferedChatMemory(buffer_size=10), - ), - ) - """ ... @@ -117,20 +104,6 @@ async def register_factory( Args: type (str): The type of agent this factory creates. It is not the same as agent class name. The `type` parameter is used to differentiate between different factory functions rather than agent classes. agent_factory (Callable[[], T]): The factory that creates the agent, where T is a concrete Agent type. Inside the factory, use `autogen_core.base.AgentInstantiationContext` to access variables like the current runtime and agent ID. - - Example: - .. code-block:: python - - runtime.register( - "chat_agent", - lambda: ChatCompletionAgent( - description="A generic chat agent.", - system_messages=[SystemMessage("You are a helpful assistant")], - model_client=OpenAIChatCompletionClient(model="gpt-4o"), - memory=BufferedChatMemory(buffer_size=10), - ), - ) - """ ... diff --git a/python/packages/autogen-core/src/autogen_core/components/_default_subscription.py b/python/packages/autogen-core/src/autogen_core/components/_default_subscription.py index aea5c381692d..3e1c0c4301cd 100644 --- a/python/packages/autogen-core/src/autogen_core/components/_default_subscription.py +++ b/python/packages/autogen-core/src/autogen_core/components/_default_subscription.py @@ -10,12 +10,6 @@ class DefaultSubscription(TypeSubscription): This topic by default uses the "default" topic type and attempts to detect the agent type to use based on the instantiation context. - Example: - - .. code-block:: python - - await runtime.register("MyAgent", agent_factory, lambda: [DefaultSubscription()]) - Args: topic_type (str, optional): The topic type to subscribe to. Defaults to "default". agent_type (str, optional): The agent type to use for the subscription. Defaults to None, in which case it will attempt to detect the agent type based on the instantiation context. diff --git a/python/packages/autogen-core/src/autogen_core/components/_routed_agent.py b/python/packages/autogen-core/src/autogen_core/components/_routed_agent.py index 9c21670d0605..e7f266bf49d6 100644 --- a/python/packages/autogen-core/src/autogen_core/components/_routed_agent.py +++ b/python/packages/autogen-core/src/autogen_core/components/_routed_agent.py @@ -422,9 +422,24 @@ class RoutedAgent(BaseAgent): .. code-block:: python + from dataclasses import dataclass from autogen_core.base import MessageContext from autogen_core.components import RoutedAgent, event, rpc - # Assume Message, MessageWithContent, and Response are defined elsewhere. + + + @dataclass + class Message: + pass + + + @dataclass + class MessageWithContent: + content: str + + + @dataclass + class Response: + pass class MyAgent(RoutedAgent): @@ -433,9 +448,10 @@ def __init__(self): @event async def handle_event_message(self, message: Message, ctx: MessageContext) -> None: - self.publish_message(MessageWithContent("event handled"), ctx.topic_id) + assert ctx.topic_id is not None + await self.publish_message(MessageWithContent("event handled"), ctx.topic_id) - @rpc(match=lambda message, ctx: message.content == "special") + @rpc(match=lambda message, ctx: message.content == "special") # type: ignore async def handle_special_rpc_message(self, message: MessageWithContent, ctx: MessageContext) -> Response: return Response() """ diff --git a/python/packages/autogen-core/src/autogen_core/components/_type_subscription.py b/python/packages/autogen-core/src/autogen_core/components/_type_subscription.py index d212317566f5..92709a457aec 100644 --- a/python/packages/autogen-core/src/autogen_core/components/_type_subscription.py +++ b/python/packages/autogen-core/src/autogen_core/components/_type_subscription.py @@ -14,6 +14,8 @@ class TypeSubscription(Subscription): .. code-block:: python + from autogen_core.components import TypeSubscription + subscription = TypeSubscription(topic_type="t1", agent_type="a1") In this case: diff --git a/python/packages/autogen-core/src/autogen_core/components/code_executor/_impl/local_commandline_code_executor.py b/python/packages/autogen-core/src/autogen_core/components/code_executor/_impl/local_commandline_code_executor.py index deca8355fbb3..31779f65679f 100644 --- a/python/packages/autogen-core/src/autogen_core/components/code_executor/_impl/local_commandline_code_executor.py +++ b/python/packages/autogen-core/src/autogen_core/components/code_executor/_impl/local_commandline_code_executor.py @@ -67,25 +67,31 @@ class LocalCommandLineCodeExecutor(CodeExecutor): import venv from pathlib import Path + import asyncio from autogen_core.base import CancellationToken from autogen_core.components.code_executor import CodeBlock, LocalCommandLineCodeExecutor - work_dir = Path("coding") - work_dir.mkdir(exist_ok=True) - venv_dir = work_dir / ".venv" - venv_builder = venv.EnvBuilder(with_pip=True) - venv_builder.create(venv_dir) - venv_context = venv_builder.ensure_directories(venv_dir) + async def example(): + work_dir = Path("coding") + work_dir.mkdir(exist_ok=True) - local_executor = LocalCommandLineCodeExecutor(work_dir=work_dir, virtual_env_context=venv_context) - await local_executor.execute_code_blocks( - code_blocks=[ - CodeBlock(language="bash", code="pip install matplotlib"), - ], - cancellation_token=CancellationToken(), - ) + venv_dir = work_dir / ".venv" + venv_builder = venv.EnvBuilder(with_pip=True) + venv_builder.create(venv_dir) + venv_context = venv_builder.ensure_directories(venv_dir) + + local_executor = LocalCommandLineCodeExecutor(work_dir=work_dir, virtual_env_context=venv_context) + await local_executor.execute_code_blocks( + code_blocks=[ + CodeBlock(language="bash", code="pip install matplotlib"), + ], + cancellation_token=CancellationToken(), + ) + + + asyncio.run(example()) """ diff --git a/python/packages/autogen-core/src/autogen_core/components/tools/_function_tool.py b/python/packages/autogen-core/src/autogen_core/components/tools/_function_tool.py index 462374116d04..cc9145fab1e1 100644 --- a/python/packages/autogen-core/src/autogen_core/components/tools/_function_tool.py +++ b/python/packages/autogen-core/src/autogen_core/components/tools/_function_tool.py @@ -41,6 +41,7 @@ class FunctionTool(BaseTool[BaseModel, BaseModel]): from autogen_core.base import CancellationToken from autogen_core.components.tools import FunctionTool from typing_extensions import Annotated + import asyncio async def get_stock_price(ticker: str, date: Annotated[str, "Date in YYYY/MM/DD"]) -> float: @@ -48,15 +49,19 @@ async def get_stock_price(ticker: str, date: Annotated[str, "Date in YYYY/MM/DD" return random.uniform(10, 200) - # Initialize a FunctionTool instance for retrieving stock prices. - stock_price_tool = FunctionTool(get_stock_price, description="Fetch the stock price for a given ticker.") + async def example(): + # Initialize a FunctionTool instance for retrieving stock prices. + stock_price_tool = FunctionTool(get_stock_price, description="Fetch the stock price for a given ticker.") - # Execute the tool with cancellation support. - cancellation_token = CancellationToken() - result = await stock_price_tool.run_json({"ticker": "AAPL", "date": "2021/01/01"}, cancellation_token) + # Execute the tool with cancellation support. + cancellation_token = CancellationToken() + result = await stock_price_tool.run_json({"ticker": "AAPL", "date": "2021/01/01"}, cancellation_token) - # Output the result as a formatted string. - print(stock_price_tool.return_value_as_string(result)) + # Output the result as a formatted string. + print(stock_price_tool.return_value_as_string(result)) + + + asyncio.run(example()) """ def __init__(self, func: Callable[..., Any], description: str, name: str | None = None) -> None: diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/_openai_assistant_agent.py index a6dd85b119be..7e1124728fbf 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/_openai_assistant_agent.py @@ -105,33 +105,41 @@ class OpenAIAssistantAgent(BaseChatAgent): from openai import AsyncClient from autogen_core.base import CancellationToken + import asyncio from autogen_ext.agents import OpenAIAssistantAgent from autogen_agentchat.messages import TextMessage - # Create an OpenAI client - client = AsyncClient(api_key="your-api-key", base_url="your-base-url") - - # Create an assistant with code interpreter - assistant = OpenAIAssistantAgent( - name="Python Helper", - description="Helps with Python programming", - client=client, - model="gpt-4", - instructions="You are a helpful Python programming assistant.", - tools=["code_interpreter"], - ) - # Upload files for the assistant to use - await assistant.on_upload_for_code_interpreter("data.csv", cancellation_token) + async def example(): + cancellation_token = CancellationToken() + + # Create an OpenAI client + client = AsyncClient(api_key="your-api-key", base_url="your-base-url") + + # Create an assistant with code interpreter + assistant = OpenAIAssistantAgent( + name="Python Helper", + description="Helps with Python programming", + client=client, + model="gpt-4", + instructions="You are a helpful Python programming assistant.", + tools=["code_interpreter"], + ) + + # Upload files for the assistant to use + await assistant.on_upload_for_code_interpreter("data.csv", cancellation_token) + + # Get response from the assistant + _response = await assistant.on_messages( + [TextMessage(source="user", content="Analyze the data in data.csv")], cancellation_token + ) + + # Clean up resources + await assistant.delete_uploaded_files(cancellation_token) + await assistant.delete_assistant(cancellation_token) - # Get response from the assistant - response = await assistant.on_messages( - [TextMessage(source="user", content="Analyze the data in data.csv")], cancellation_token - ) - # Clean up resources - await assistant.delete_uploaded_files(cancellation_token) - await assistant.delete_assistant(cancellation_token) + asyncio.run(example()) Args: name (str): Name of the assistant diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py index 28dc81a338ee..f7d4adb31ce2 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py @@ -917,12 +917,12 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): from autogen_ext.models import OpenAIChatCompletionClient from autogen_core.components.models import UserMessage - opneai_model_client = OpenAIChatCompletionClient( + openai_client = OpenAIChatCompletionClient( model="gpt-4o-2024-08-06", # api_key="sk-...", # Optional if you have an OPENAI_API_KEY environment variable set. ) - result = await opneai_model_client.create([UserMessage(content="What is the capital of France?", source="user")]) + result = await openai_client.create([UserMessage(content="What is the capital of France?", source="user")]) # type: ignore print(result) @@ -931,7 +931,6 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): .. code-block:: python from autogen_ext.models import OpenAIChatCompletionClient - from autogen_core.components.models import UserMessage custom_model_client = OpenAIChatCompletionClient( model="custom-model-name", diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py b/python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py index a4889f44b415..187dfdace14a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py @@ -38,50 +38,73 @@ class ReplayChatCompletionClient: .. code-block:: python - chat_completions = [ - "Hello, how can I assist you today?", - "I'm happy to help with any questions you have.", - "Is there anything else I can assist you with?", - ] - client = ReplayChatCompletionClient(chat_completions) - messages = [LLMMessage(content="What can you do?")] - response = await client.create(messages) - print(response.content) # Output: "Hello, how can I assist you today?" + from autogen_ext.models import ReplayChatCompletionClient + from autogen_core.components.models import UserMessage + + + async def example(): + chat_completions = [ + "Hello, how can I assist you today?", + "I'm happy to help with any questions you have.", + "Is there anything else I can assist you with?", + ] + client = ReplayChatCompletionClient(chat_completions) + messages = [UserMessage(content="What can you do?", source="user")] + response = await client.create(messages) + print(response.content) # Output: "Hello, how can I assist you today?" Simple streaming chat completion client to return pre-defined responses .. code-block:: python - chat_completions = [ - "Hello, how can I assist you today?", - "I'm happy to help with any questions you have.", - "Is there anything else I can assist you with?", - ] - client = ReplayChatCompletionClient(chat_completions) + import asyncio + from autogen_ext.models import ReplayChatCompletionClient + from autogen_core.components.models import UserMessage + + + async def example(): + chat_completions = [ + "Hello, how can I assist you today?", + "I'm happy to help with any questions you have.", + "Is there anything else I can assist you with?", + ] + client = ReplayChatCompletionClient(chat_completions) + messages = [UserMessage(content="What can you do?", source="user")] + + async for token in client.create_stream(messages): + print(token, end="") # Output: "Hello, how can I assist you today?" - async for token in client.create_stream(messages): - print(token, end="") # Output: "Hello, how can I assist you today?" + async for token in client.create_stream(messages): + print(token, end="") # Output: "I'm happy to help with any questions you have." - async for token in client.create_stream(messages): - print(token, end="") # Output: "I'm happy to help with any questions you have." + asyncio.run(example()) Using `.reset` to reset the chat client state .. code-block:: python - chat_completions = [ - "Hello, how can I assist you today?", - ] - client = ReplayChatCompletionClient(chat_completions) - messages = [LLMMessage(content="What can you do?")] - response = await client.create(messages) - print(response.content) # Output: "Hello, how can I assist you today?" + import asyncio + from autogen_ext.models import ReplayChatCompletionClient + from autogen_core.components.models import UserMessage + + + async def example(): + chat_completions = [ + "Hello, how can I assist you today?", + ] + client = ReplayChatCompletionClient(chat_completions) + messages = [UserMessage(content="What can you do?", source="user")] + response = await client.create(messages) + print(response.content) # Output: "Hello, how can I assist you today?" + + response = await client.create(messages) # Raises ValueError("No more mock responses available") + + client.reset() # Reset the client state (current index of message and token usages) + response = await client.create(messages) + print(response.content) # Output: "Hello, how can I assist you today?" again - response = await client.create(messages) # Raises ValueError("No more mock responses available") - client.reset() # Reset the client state (current index of message and token usages) - response = await client.create(messages) - print(response.content) # Output: "Hello, how can I assist you today?" again + asyncio.run(example()) """ From 7c1cabf07e25d26445a83b5678dc3d9f40b3a17b Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Mon, 25 Nov 2024 16:10:45 -0500 Subject: [PATCH 28/33] Add tool to check example in docstrings (#4353) * Add tool to check example in docstrings * update lock * add task * add ignored message * add example check CI --- .github/workflows/checks.yml | 21 ++++ .../docs/src/_extension/code_lint.py | 98 +++++++++++++++++++ python/packages/autogen-core/docs/src/conf.py | 11 ++- python/packages/autogen-core/pyproject.toml | 8 ++ python/uv.lock | 2 + 5 files changed, 139 insertions(+), 1 deletion(-) create mode 100644 python/packages/autogen-core/docs/src/_extension/code_lint.py diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index b6d8b112c22f..31869b86ccc2 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -153,6 +153,27 @@ jobs: poe --directory ${{ matrix.package }} docs-check working-directory: ./python + docs-example-check: + runs-on: ubuntu-latest + strategy: + matrix: + package: ["./packages/autogen-core"] + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v3 + with: + enable-cache: true + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + - run: uv sync --locked --all-extras + working-directory: ./python + - name: Run task + run: | + source ${{ github.workspace }}/python/.venv/bin/activate + poe --directory ${{ matrix.package }} docs-check-examples + working-directory: ./python + check-proto-changes-python: runs-on: ubuntu-latest steps: diff --git a/python/packages/autogen-core/docs/src/_extension/code_lint.py b/python/packages/autogen-core/docs/src/_extension/code_lint.py new file mode 100644 index 000000000000..b8c01bfd069f --- /dev/null +++ b/python/packages/autogen-core/docs/src/_extension/code_lint.py @@ -0,0 +1,98 @@ +# Modified from: https://github.com/kai687/sphinxawesome-codelinter + +import tempfile +from typing import AbstractSet, Any, Iterable + +from docutils import nodes +from sphinx.application import Sphinx +from sphinx.builders import Builder +from sphinx.util import logging +from sphinx.util.console import darkgreen, darkred, red, teal, faint # type: ignore[attr-defined] + +from pygments import highlight # type: ignore +from pygments.lexers import PythonLexer +from pygments.formatters import TerminalFormatter + +logger = logging.getLogger(__name__) + +__version__ = "0.1.0" + + +class CodeLinter(Builder): + """Iterate over all ``literal_block`` nodes. + + pipe them into any command line tool that + can read from standard input. + """ + + name = "code_lint" + allow_parallel = True + + def init(self) -> None: + """Initialize.""" + self._had_errors = False + pass + + def get_outdated_docs(self) -> str | Iterable[str]: + """Check for outdated files. + + Return an iterable of outdated output files, or a string describing what an + update will build. + """ + return self.env.found_docs + + def get_target_uri(self, docname: str, typ: str | None = None) -> str: + """Return Target URI for a document name.""" + return "" + + def prepare_writing(self, docnames: AbstractSet[str]) -> None: + """Run these steps before documents are written.""" + return + + def write_doc(self, docname: str, doctree: nodes.Node) -> None: + path_prefix: str = self.app.config.code_lint_path_prefix + supported_languages = set(["python"]) + + if not docname.startswith(path_prefix): + return + + for code in doctree.findall(nodes.literal_block): + if code["language"] in supported_languages: + logger.info("Checking a code block in %s...", docname, nonl=True) + if "ignore" in code["classes"]: + logger.info(" " + darkgreen("OK[ignored]")) + continue + + # Create a temporary file to store the code block + with tempfile.NamedTemporaryFile(mode="wb", suffix=".py") as temp_file: + temp_file.write(code.astext().encode()) + temp_file.flush() + + # Run pyright on the temporary file using subprocess.run + import subprocess + + result = subprocess.run(["pyright", temp_file.name], capture_output=True, text=True) + if result.returncode != 0: + logger.info(" " + darkred("FAIL")) + highlighted_code = highlight(code.astext(), PythonLexer(), TerminalFormatter()) # type: ignore + output = f"{faint('========================================================')}\n{red('Error')}: Pyright found issues in {teal(docname)}:\n{faint('--------------------------------------------------------')}\n{highlighted_code}\n{faint('--------------------------------------------------------')}\n\n{teal('pyright output:')}\n{red(result.stdout)}{faint('========================================================')}\n" + logger.info(output) + self._had_errors = True + else: + logger.info(" " + darkgreen("OK")) + + def finish(self) -> None: + """Finish the build process.""" + if self._had_errors: + raise RuntimeError("Code linting failed - see earlier output") + + +def setup(app: Sphinx) -> dict[str, Any]: + app.add_builder(CodeLinter) + app.add_config_value("code_lint_path_prefix", "", "env") + + return { + "version": __version__, + "parallel_read_safe": True, + "parallel_write_safe": True, + } diff --git a/python/packages/autogen-core/docs/src/conf.py b/python/packages/autogen-core/docs/src/conf.py index 11341873d141..9852428ae436 100644 --- a/python/packages/autogen-core/docs/src/conf.py +++ b/python/packages/autogen-core/docs/src/conf.py @@ -37,7 +37,8 @@ "sphinx_copybutton", "_extension.gallery_directive", "myst_nb", - "sphinxcontrib.autodoc_pydantic" + "sphinxcontrib.autodoc_pydantic", + "_extension.code_lint", ] suppress_warnings = ["myst.header"] @@ -148,6 +149,14 @@ intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} +code_lint_path_prefix = "reference/python" + +nb_mime_priority_overrides = [ + ('code_lint', 'image/jpeg', 100), + ('code_lint', 'image/png', 100), + ('code_lint', 'text/plain', 100) +] + def setup_to_main( app: Sphinx, pagename: str, templatename: str, context, doctree diff --git a/python/packages/autogen-core/pyproject.toml b/python/packages/autogen-core/pyproject.toml index d0dd291e6369..d0711091c9da 100644 --- a/python/packages/autogen-core/pyproject.toml +++ b/python/packages/autogen-core/pyproject.toml @@ -72,6 +72,7 @@ dev-dependencies = [ "sphinx", "sphinxcontrib-apidoc", "autodoc_pydantic~=2.2", + "pygments", # Documentation tooling "sphinx-autobuild", @@ -153,3 +154,10 @@ ref = "docs-apidoc-all" [[tool.poe.tasks.docs-check.sequence]] cmd = "sphinx-build --fail-on-warning docs/src docs/build" + +[[tool.poe.tasks.docs-check-examples.sequence]] +ref = "docs-apidoc-all" + +[[tool.poe.tasks.docs-check-examples.sequence]] +cmd = "sphinx-build -b code_lint docs/src docs/build" + diff --git a/python/uv.lock b/python/uv.lock index 55af39091ee2..1d833a69ab21 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -371,6 +371,7 @@ dev = [ { name = "pip" }, { name = "polars" }, { name = "pydata-sphinx-theme" }, + { name = "pygments" }, { name = "python-dotenv" }, { name = "requests" }, { name = "sphinx" }, @@ -427,6 +428,7 @@ dev = [ { name = "pip" }, { name = "polars" }, { name = "pydata-sphinx-theme", specifier = "==0.15.4" }, + { name = "pygments" }, { name = "python-dotenv" }, { name = "requests" }, { name = "sphinx" }, From a14f2085883cce46981594d90c43b853ea94ba53 Mon Sep 17 00:00:00 2001 From: Ryan Sweet Date: Mon, 25 Nov 2024 13:37:35 -0800 Subject: [PATCH 29/33] 4153 try to decouple abstractions package from orleans (#4355) * remove abstractions dep on orleans #4153 * fixing up defaults * fix some HelloAgent defaults --------- Co-authored-by: Xiaoyun Zhang --- dotnet/samples/Hello/HelloAgent/HelloAgent.csproj | 6 +++++- .../Abstractions/Microsoft.AutoGen.Abstractions.csproj | 2 -- .../Agents/Services/AgentWorkerHostingExtensions.cs | 2 +- .../{Abstractions => Agents/Services}/IGateway.cs | 3 ++- 4 files changed, 8 insertions(+), 5 deletions(-) rename dotnet/src/Microsoft.AutoGen/{Abstractions => Agents/Services}/IGateway.cs (84%) diff --git a/dotnet/samples/Hello/HelloAgent/HelloAgent.csproj b/dotnet/samples/Hello/HelloAgent/HelloAgent.csproj index dcb693a52225..93c996e32093 100644 --- a/dotnet/samples/Hello/HelloAgent/HelloAgent.csproj +++ b/dotnet/samples/Hello/HelloAgent/HelloAgent.csproj @@ -5,7 +5,11 @@ enable enable - + + + PreserveNewest + + diff --git a/dotnet/src/Microsoft.AutoGen/Abstractions/Microsoft.AutoGen.Abstractions.csproj b/dotnet/src/Microsoft.AutoGen/Abstractions/Microsoft.AutoGen.Abstractions.csproj index d9596f607ccb..39a90664057e 100644 --- a/dotnet/src/Microsoft.AutoGen/Abstractions/Microsoft.AutoGen.Abstractions.csproj +++ b/dotnet/src/Microsoft.AutoGen/Abstractions/Microsoft.AutoGen.Abstractions.csproj @@ -18,8 +18,6 @@ - -
diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorkerHostingExtensions.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorkerHostingExtensions.cs index fab29e86ce71..3736fc76cb61 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorkerHostingExtensions.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorkerHostingExtensions.cs @@ -29,7 +29,7 @@ public static IHostApplicationBuilder AddAgentService(this IHostApplicationBuild public static IHostApplicationBuilder AddLocalAgentService(this IHostApplicationBuilder builder, bool useGrpc = true) { - return builder.AddAgentService(local: false, useGrpc); + return builder.AddAgentService(local: true, useGrpc); } public static WebApplication MapAgentService(this WebApplication app, bool local = false, bool useGrpc = true) diff --git a/dotnet/src/Microsoft.AutoGen/Abstractions/IGateway.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/IGateway.cs similarity index 84% rename from dotnet/src/Microsoft.AutoGen/Abstractions/IGateway.cs rename to dotnet/src/Microsoft.AutoGen/Agents/Services/IGateway.cs index 79b7b63e7235..539ec3eca435 100644 --- a/dotnet/src/Microsoft.AutoGen/Abstractions/IGateway.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/IGateway.cs @@ -1,7 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // IGateway.cs +using Microsoft.AutoGen.Abstractions; -namespace Microsoft.AutoGen.Abstractions; +namespace Microsoft.AutoGen.Agents; public interface IGateway : IGrainObserver { From 1a02e2ba4bbe69a6f967168d66f05ad285310a57 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Mon, 25 Nov 2024 17:09:41 -0500 Subject: [PATCH 30/33] Update version to dev7 (#4359) * Update version to dev7 * update other references --- .github/workflows/docs.yml | 1 + README.md | 2 +- docs/switcher.json | 7 ++++++- python/packages/autogen-agentchat/pyproject.toml | 4 ++-- python/packages/autogen-core/docs/src/index.md | 4 ++-- .../packages/autogen-core/docs/src/packages/index.md | 12 ++++++------ .../user-guide/agentchat-user-guide/installation.md | 4 ++-- .../user-guide/agentchat-user-guide/quickstart.ipynb | 2 +- .../agentchat-user-guide/tutorial/models.ipynb | 4 ++-- .../framework/distributed-agent-runtime.ipynb | 2 +- python/packages/autogen-core/pyproject.toml | 2 +- python/packages/autogen-ext/pyproject.toml | 4 ++-- .../src/autogen_ext/models/_openai/_openai_client.py | 4 ++-- python/packages/autogen-studio/pyproject.toml | 8 ++++---- python/uv.lock | 6 +++--- 15 files changed, 36 insertions(+), 30 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 0218a370535a..57ceb261fdec 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -39,6 +39,7 @@ jobs: { ref: "v0.4.0.dev4", dest-dir: "0.4.0.dev4" }, { ref: "v0.4.0.dev5", dest-dir: "0.4.0.dev5" }, { ref: "v0.4.0.dev6", dest-dir: "0.4.0.dev6" }, + { ref: "v0.4.0.dev7", dest-dir: "0.4.0.dev7" }, ] steps: - name: Checkout diff --git a/README.md b/README.md index b5bda7de8e0e..253a6b0ea073 100644 --- a/README.md +++ b/README.md @@ -101,7 +101,7 @@ We look forward to your contributions! First install the packages: ```bash -pip install 'autogen-agentchat==0.4.0.dev6' 'autogen-ext[openai]==0.4.0.dev6' +pip install 'autogen-agentchat==0.4.0.dev7' 'autogen-ext[openai]==0.4.0.dev7' ``` The following code uses OpenAI's GPT-4o model and you need to provide your diff --git a/docs/switcher.json b/docs/switcher.json index a4e6748ebea7..5cfbe4c05516 100644 --- a/docs/switcher.json +++ b/docs/switcher.json @@ -41,7 +41,12 @@ { "name": "0.4.0.dev6", "version": "0.4.0.dev6", - "url": "/autogen/0.4.0.dev6/", + "url": "/autogen/0.4.0.dev6/" + }, + { + "name": "0.4.0.dev7", + "version": "0.4.0.dev7", + "url": "/autogen/0.4.0.dev7/", "preferred": true } ] diff --git a/python/packages/autogen-agentchat/pyproject.toml b/python/packages/autogen-agentchat/pyproject.toml index ab5b83e5ee4d..c2336a6eeba8 100644 --- a/python/packages/autogen-agentchat/pyproject.toml +++ b/python/packages/autogen-agentchat/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-agentchat" -version = "0.4.0.dev6" +version = "0.4.0.dev7" license = {file = "LICENSE-CODE"} description = "AutoGen agents and teams library" readme = "README.md" @@ -15,7 +15,7 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-core==0.4.0.dev6", + "autogen-core==0.4.0.dev7", ] [tool.uv] diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 674492e3f231..bee058bd9124 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -61,7 +61,7 @@ AgentChat High-level API that includes preset agents and teams for building multi-agent systems. ```sh -pip install 'autogen-agentchat==0.4.0.dev6' +pip install 'autogen-agentchat==0.4.0.dev7' ``` 💡 *Start here if you are looking for an API similar to AutoGen 0.2* @@ -82,7 +82,7 @@ Get Started Provides building blocks for creating asynchronous, event driven multi-agent systems. ```sh -pip install 'autogen-core==0.4.0.dev6' +pip install 'autogen-core==0.4.0.dev7' ``` +++ diff --git a/python/packages/autogen-core/docs/src/packages/index.md b/python/packages/autogen-core/docs/src/packages/index.md index dd86c4c95122..b659482e0d67 100644 --- a/python/packages/autogen-core/docs/src/packages/index.md +++ b/python/packages/autogen-core/docs/src/packages/index.md @@ -31,10 +31,10 @@ myst: Library that is at a similar level of abstraction as AutoGen 0.2, including default agents and group chat. ```sh -pip install 'autogen-agentchat==0.4.0.dev6' +pip install 'autogen-agentchat==0.4.0.dev7' ``` -[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/agentchat-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_agentchat/autogen_agentchat.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-agentchat/0.4.0.dev6/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-agentchat) +[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/agentchat-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_agentchat/autogen_agentchat.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-agentchat/0.4.0.dev7/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-agentchat) ::: (pkg-info-autogen-core)= @@ -46,10 +46,10 @@ pip install 'autogen-agentchat==0.4.0.dev6' Implements the core functionality of the AutoGen framework, providing basic building blocks for creating multi-agent systems. ```sh -pip install 'autogen-core==0.4.0.dev6' +pip install 'autogen-core==0.4.0.dev7' ``` -[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/core-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_core/autogen_core.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-core/0.4.0.dev6/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core) +[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/core-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_core/autogen_core.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-core/0.4.0.dev7/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core) ::: (pkg-info-autogen-ext)= @@ -61,7 +61,7 @@ pip install 'autogen-core==0.4.0.dev6' Implementations of core components that interface with external services, or use extra dependencies. For example, Docker based code execution. ```sh -pip install 'autogen-ext==0.4.0.dev6' +pip install 'autogen-ext==0.4.0.dev7' ``` Extras: @@ -71,7 +71,7 @@ Extras: - `docker` needed for {py:class}`~autogen_ext.code_executors.DockerCommandLineCodeExecutor` - `openai` needed for {py:class}`~autogen_ext.models.OpenAIChatCompletionClient` -[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/extensions-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_ext/autogen_ext.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-ext/0.4.0.dev6/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-ext) +[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/extensions-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_ext/autogen_ext.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-ext/0.4.0.dev7/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-ext) ::: (pkg-info-autogen-magentic-one)= diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md index b1dbca5192b7..74116bc39fa5 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md @@ -61,7 +61,7 @@ Install the `autogen-agentchat` package using pip: ```bash -pip install 'autogen-agentchat==0.4.0.dev6' +pip install 'autogen-agentchat==0.4.0.dev7' ``` ```{note} @@ -74,7 +74,7 @@ To use the OpenAI and Azure OpenAI models, you need to install the following extensions: ```bash -pip install 'autogen-ext[openai]==0.4.0.dev6' +pip install 'autogen-ext[openai]==0.4.0.dev7' ``` ## Install Docker for Code Execution diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb index 061284f78004..9e946a47340b 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb @@ -37,7 +37,7 @@ }, "outputs": [], "source": [ - "pip install 'autogen-agentchat==0.4.0.dev6' 'autogen-ext[openai]==0.4.0.dev6'" + "pip install 'autogen-agentchat==0.4.0.dev7' 'autogen-ext[openai]==0.4.0.dev7'" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb index 5251e17c098f..f44a187609ff 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb @@ -30,7 +30,7 @@ }, "outputs": [], "source": [ - "pip install 'autogen-ext[openai]==0.4.0.dev6'" + "pip install 'autogen-ext[openai]==0.4.0.dev7'" ] }, { @@ -110,7 +110,7 @@ }, "outputs": [], "source": [ - "pip install 'autogen-ext[openai,azure]==0.4.0.dev6'" + "pip install 'autogen-ext[openai,azure]==0.4.0.dev7'" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb index b0b9c5e3f1fc..8bdc396a3196 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb @@ -24,7 +24,7 @@ "````{note}\n", "The distributed agent runtime requires extra dependencies, install them using:\n", "```bash\n", - "pip install autogen-core[grpc]==0.4.0.dev6\n", + "pip install autogen-core[grpc]==0.4.0.dev7\n", "```\n", "````\n", "\n", diff --git a/python/packages/autogen-core/pyproject.toml b/python/packages/autogen-core/pyproject.toml index d0711091c9da..064928590b0f 100644 --- a/python/packages/autogen-core/pyproject.toml +++ b/python/packages/autogen-core/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-core" -version = "0.4.0.dev6" +version = "0.4.0.dev7" license = {file = "LICENSE-CODE"} description = "Foundational interfaces and agent runtime implementation for AutoGen" readme = "README.md" diff --git a/python/packages/autogen-ext/pyproject.toml b/python/packages/autogen-ext/pyproject.toml index f9d741842d7f..3d263df51a50 100644 --- a/python/packages/autogen-ext/pyproject.toml +++ b/python/packages/autogen-ext/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-ext" -version = "0.4.0.dev6" +version = "0.4.0.dev7" license = {file = "LICENSE-CODE"} description = "AutoGen extensions library" readme = "README.md" @@ -15,7 +15,7 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-core==0.4.0.dev6", + "autogen-core==0.4.0.dev7", ] diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py index f7d4adb31ce2..b00ba32da559 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py @@ -908,7 +908,7 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): .. code-block:: bash - pip install 'autogen-ext[openai]==0.4.0.dev6' + pip install 'autogen-ext[openai]==0.4.0.dev7' The following code snippet shows how to use the client with an OpenAI model: @@ -988,7 +988,7 @@ class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): .. code-block:: bash - pip install 'autogen-ext[openai,azure]==0.4.0.dev6' + pip install 'autogen-ext[openai,azure]==0.4.0.dev7' To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint, api version, and model capabilities. diff --git a/python/packages/autogen-studio/pyproject.toml b/python/packages/autogen-studio/pyproject.toml index 41e4dea45821..fc3acee48e0f 100644 --- a/python/packages/autogen-studio/pyproject.toml +++ b/python/packages/autogen-studio/pyproject.toml @@ -23,7 +23,7 @@ dependencies = [ "pydantic-settings", "fastapi", "typer", - "uvicorn", + "uvicorn", "python-dotenv", "websockets", "numpy < 2.0.0", @@ -32,9 +32,9 @@ dependencies = [ "alembic", "loguru", "pyyaml", - "autogen-core==0.4.0.dev6", - "autogen-agentchat==0.4.0.dev6", - "autogen-ext==0.4.0.dev6" + "autogen-core==0.4.0.dev7", + "autogen-agentchat==0.4.0.dev7", + "autogen-ext==0.4.0.dev7" ] optional-dependencies = {web = ["fastapi", "uvicorn"], database = ["psycopg"]} diff --git a/python/uv.lock b/python/uv.lock index 1d833a69ab21..4602559d0e01 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -315,7 +315,7 @@ wheels = [ [[package]] name = "autogen-agentchat" -version = "0.4.0.dev6" +version = "0.4.0.dev7" source = { editable = "packages/autogen-agentchat" } dependencies = [ { name = "autogen-core" }, @@ -329,7 +329,7 @@ dev = [] [[package]] name = "autogen-core" -version = "0.4.0.dev6" +version = "0.4.0.dev7" source = { editable = "packages/autogen-core" } dependencies = [ { name = "aiohttp" }, @@ -450,7 +450,7 @@ dev = [ [[package]] name = "autogen-ext" -version = "0.4.0.dev6" +version = "0.4.0.dev7" source = { editable = "packages/autogen-ext" } dependencies = [ { name = "autogen-core" }, From 3a1625f44b4812295425cf03248ff7e126ac886a Mon Sep 17 00:00:00 2001 From: Ryan Sweet Date: Mon, 25 Nov 2024 14:38:50 -0800 Subject: [PATCH 31/33] first attempt (#4362) --- dotnet/Directory.Packages.props | 61 +++++++++++++++++---------------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/dotnet/Directory.Packages.props b/dotnet/Directory.Packages.props index 0f6ecd10b1d9..9bde32e6d012 100644 --- a/dotnet/Directory.Packages.props +++ b/dotnet/Directory.Packages.props @@ -33,12 +33,12 @@ - + - + - + @@ -50,33 +50,33 @@ - - - - + + + + - - + + - - + + runtime; build; native; contentfiles; analyzers; buildtransitive all - - - - - - - - - - - + + + + + + + + + + + @@ -85,39 +85,40 @@ - + - - + + - + - - + + + - + - + \ No newline at end of file From b94abb2a6c10aca3c3f8d894b9c708e3f7edd37b Mon Sep 17 00:00:00 2001 From: Ryan Sweet Date: Mon, 25 Nov 2024 16:29:51 -0800 Subject: [PATCH 32/33] =?UTF-8?q?add=20default=20subscriptions=20for=20the?= =?UTF-8?q?=20agent=20type=20-=20Implicitly=20created=20sub=E2=80=A6=20(#4?= =?UTF-8?q?324)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add default subscriptions for the agent type - Implicitly created subscription for agent RPC #4321 * add default sub for agenttype+id * fix subscription implementation for in memory runtime --------- Co-authored-by: XiaoYun Zhang --- .../src/Microsoft.AutoGen/Agents/AgentBase.cs | 43 ++++++++++++------- .../Agents/Services/AgentWorker.cs | 32 +++++++++++--- .../Agents/Services/Grpc/GrpcGateway.cs | 16 +++++++ .../AgentBaseTests.cs | 4 ++ 4 files changed, 75 insertions(+), 20 deletions(-) diff --git a/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs b/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs index 345e6d34c826..01ad856a2d49 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs @@ -15,27 +15,40 @@ namespace Microsoft.AutoGen.Agents; public abstract class AgentBase : IAgentBase, IHandle { public static readonly ActivitySource s_source = new("AutoGen.Agent"); - public AgentId AgentId => _context.AgentId; + public AgentId AgentId => _runtime.AgentId; private readonly object _lock = new(); private readonly Dictionary> _pendingRequests = []; private readonly Channel _mailbox = Channel.CreateUnbounded(); - private readonly IAgentRuntime _context; + private readonly IAgentRuntime _runtime; public string Route { get; set; } = "base"; protected internal ILogger _logger; - public IAgentRuntime Context => _context; + public IAgentRuntime Context => _runtime; protected readonly EventTypes EventTypes; protected AgentBase( - IAgentRuntime context, + IAgentRuntime runtime, EventTypes eventTypes, ILogger? logger = null) { - _context = context; - context.AgentInstance = this; + _runtime = runtime; + runtime.AgentInstance = this; this.EventTypes = eventTypes; _logger = logger ?? LoggerFactory.Create(builder => { }).CreateLogger(); + var subscriptionRequest = new AddSubscriptionRequest + { + RequestId = Guid.NewGuid().ToString(), + Subscription = new Subscription + { + TypeSubscription = new TypeSubscription + { + AgentType = this.AgentId.Type, + TopicType = this.AgentId.Type + "/" + this.AgentId.Key + } + } + }; + _runtime.SendMessageAsync(new Message { AddSubscriptionRequest = subscriptionRequest }).AsTask().Wait(); Completion = Start(); } internal Task Completion { get; } @@ -131,19 +144,19 @@ public List Subscribe(string topic) } } }; - _context.SendMessageAsync(message).AsTask().Wait(); + _runtime.SendMessageAsync(message).AsTask().Wait(); return new List { topic }; } public async Task StoreAsync(AgentState state, CancellationToken cancellationToken = default) { - await _context.StoreAsync(state, cancellationToken).ConfigureAwait(false); + await _runtime.StoreAsync(state, cancellationToken).ConfigureAwait(false); return; } public async Task ReadAsync(AgentId agentId, CancellationToken cancellationToken = default) where T : IMessage, new() { - var agentState = await _context.ReadAsync(agentId, cancellationToken).ConfigureAwait(false); - return agentState.FromAgentState(); + var agentstate = await _runtime.ReadAsync(agentId, cancellationToken).ConfigureAwait(false); + return agentstate.FromAgentState(); } private void OnResponseCore(RpcResponse response) { @@ -171,7 +184,7 @@ private async Task OnRequestCoreAsync(RpcRequest request, CancellationToken canc { response = new RpcResponse { Error = ex.Message }; } - await _context.SendResponseAsync(request, response, cancellationToken).ConfigureAwait(false); + await _runtime.SendResponseAsync(request, response, cancellationToken).ConfigureAwait(false); } protected async Task RequestAsync(AgentId target, string method, Dictionary parameters) @@ -195,7 +208,7 @@ protected async Task RequestAsync(AgentId target, string method, Di activity?.SetTag("peer.service", target.ToString()); var completion = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); - _context.Update(request, activity); + _runtime.Update(request, activity); await this.InvokeWithActivityAsync( static async ((AgentBase Agent, RpcRequest Request, TaskCompletionSource) state, CancellationToken ct) => { @@ -206,7 +219,7 @@ static async ((AgentBase Agent, RpcRequest Request, TaskCompletionSource { - await state.Agent._context.PublishEventAsync(state.Event, ct).ConfigureAwait(false); + await state.Agent._runtime.PublishEventAsync(state.Event).ConfigureAwait(false); }, (this, item), activity, diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorker.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorker.cs index a69da96fb3d4..f9a5050534c8 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorker.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorker.cs @@ -24,6 +24,8 @@ public class AgentWorker : private readonly CancellationTokenSource _shutdownCts; private readonly IServiceProvider _serviceProvider; private readonly IEnumerable> _configuredAgentTypes; + private readonly ConcurrentDictionary _subscriptionsByAgentType = new(); + private readonly ConcurrentDictionary> _subscriptionsByTopic = new(); private readonly DistributedContextPropagator _distributedContextPropagator; private readonly CancellationTokenSource _shutdownCancellationToken = new(); private Task? _mailboxTask; @@ -96,11 +98,7 @@ public async Task RunMessagePump() if (message == null) { continue; } switch (message) { - case Message.MessageOneofCase.AddSubscriptionResponse: - break; - case Message.MessageOneofCase.RegisterAgentTypeResponse: - break; - case Message msg: + case Message msg when msg.CloudEvent != null: var item = msg.CloudEvent; @@ -110,6 +108,13 @@ public async Task RunMessagePump() agentToInvoke.ReceiveMessage(msg); } break; + case Message msg when msg.AddSubscriptionRequest != null: + await AddSubscriptionRequestAsync(msg.AddSubscriptionRequest).ConfigureAwait(true); + break; + case Message msg when msg.AddSubscriptionResponse != null: + break; + case Message msg when msg.RegisterAgentTypeResponse != null: + break; default: throw new InvalidOperationException($"Unexpected message '{message}'."); } @@ -123,6 +128,23 @@ public async Task RunMessagePump() } } } + private async ValueTask AddSubscriptionRequestAsync(AddSubscriptionRequest subscription) + { + var topic = subscription.Subscription.TypeSubscription.TopicType; + var agentType = subscription.Subscription.TypeSubscription.AgentType; + _subscriptionsByAgentType[agentType] = subscription.Subscription; + _subscriptionsByTopic.GetOrAdd(topic, _ => []).Add(agentType); + Message response = new() + { + AddSubscriptionResponse = new() + { + RequestId = subscription.RequestId, + Error = "", + Success = true + } + }; + await _mailbox.Writer.WriteAsync(response).ConfigureAwait(false); + } public async Task StartAsync(CancellationToken cancellationToken) { diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcGateway.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcGateway.cs index 45477c8eb5a6..ab24a0e15fe5 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcGateway.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcGateway.cs @@ -153,6 +153,22 @@ private async ValueTask RegisterAgentTypeAsync(GrpcWorkerConnection connection, Success = true } }; + // add a default subscription for the agent type + //TODO: we should consider having constraints on the namespace or at least migrate all our examples to use well typed namesspaces like com.microsoft.autogen/hello/HelloAgents etc + var subscriptionRequest = new AddSubscriptionRequest + { + RequestId = Guid.NewGuid().ToString(), + Subscription = new Subscription + { + TypeSubscription = new TypeSubscription + { + AgentType = msg.Type, + TopicType = msg.Type + } + } + }; + await AddSubscriptionAsync(connection, subscriptionRequest).ConfigureAwait(true); + await connection.ResponseStream.WriteAsync(response).ConfigureAwait(false); } private async ValueTask DispatchEventAsync(CloudEvent evt) diff --git a/dotnet/test/Microsoft.AutoGen.Agents.Tests/AgentBaseTests.cs b/dotnet/test/Microsoft.AutoGen.Agents.Tests/AgentBaseTests.cs index e58fdb00f0a0..7e272ce6bed9 100644 --- a/dotnet/test/Microsoft.AutoGen.Agents.Tests/AgentBaseTests.cs +++ b/dotnet/test/Microsoft.AutoGen.Agents.Tests/AgentBaseTests.cs @@ -23,6 +23,10 @@ public class AgentBaseTests(InMemoryAgentRuntimeFixture fixture) public async Task ItInvokeRightHandlerTestAsync() { var mockContext = new Mock(); + mockContext.SetupGet(x => x.AgentId).Returns(new AgentId("test", "test")); + // mock SendMessageAsync + mockContext.Setup(x => x.SendMessageAsync(It.IsAny(), It.IsAny())) + .Returns(new ValueTask()); var agent = new TestAgent(mockContext.Object, new EventTypes(TypeRegistry.Empty, [], []), new Logger(new LoggerFactory())); await agent.HandleObject("hello world"); From bcd6e71e7f762c94d6e2c5a48d4e9856aa46da59 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Mon, 25 Nov 2024 18:18:13 -0800 Subject: [PATCH 33/33] Fix assistant agent doc (#4365) --- .../src/autogen_agentchat/agents/_assistant_agent.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index cb1eff8d6f6e..0870a6c2f3b0 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -82,13 +82,13 @@ def _handoff_tool() -> str: class AssistantAgent(BaseChatAgent): """An agent that provides assistance with tool use. - It responds with a StopMessage when 'terminate' is detected in the response. - Args: name (str): The name of the agent. model_client (ChatCompletionClient): The model client to use for inference. tools (List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None, optional): The tools to register with the agent. - handoffs (List[Handoff | str] | None, optional): The handoff configurations for the agent, allowing it to transfer to other agents by responding with a HandoffMessage. + handoffs (List[Handoff | str] | None, optional): The handoff configurations for the agent, + allowing it to transfer to other agents by responding with a :class:`HandoffMessage`. + The transfer is only executed when the team is in :class:`~autogen_agentchat.teams.Swarm`. If a handoff is a string, it should represent the target agent's name. description (str, optional): The description of the agent. system_message (str, optional): The system message for the model.