Skip to content

Commit

Permalink
feat: add azure openai and vertexai (#15)
Browse files Browse the repository at this point in the history
Signed-off-by: Akihiko Kuroda <akihikokuroda2020@gmail.com>
  • Loading branch information
akihikokuroda authored Dec 9, 2024
1 parent 3a2f41a commit d563d1e
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 0 deletions.
12 changes: 12 additions & 0 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,18 @@ LLM_BACKEND="ollama"
# GENAI_API_KEY=""
# GENAI_MODEL="meta-llama/llama-3-1-70b-instruct"

## Azure OpenAI
# OPENAI_MODEL="gpt-4o-mini"
# OPENAI_API_VERSION="2024-08-01-preview"
# AZURE_DEPLOYMENT_NAME=""
# AZURE_OPENAI_API_KEY=""
# AZURE_OPENAI_ENDPOINT=""

## VertexAI
# VERTEXAI_MODEL="gemini-1.5-flash-001"
# VERTEXAI_LOCATION="us-central1"
# VERTEXAI_PROJECT=""

# Tools
CODE_INTERPRETER_URL="http://127.0.0.1:50051"

Expand Down
19 changes: 19 additions & 0 deletions src/helpers/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import { Client as BAMSDK } from "@ibm-generative-ai/node-sdk";
import { OpenAIChatLLM } from "bee-agent-framework/adapters/openai/chat";
import { OllamaChatLLM } from "bee-agent-framework/adapters/ollama/chat";
import { GroqChatLLM } from "bee-agent-framework/adapters/groq/chat";
import { VertexAIChatLLM } from "bee-agent-framework/adapters/vertexai/chat";
import { Ollama } from "ollama";
import Groq from "groq-sdk";

Expand All @@ -16,6 +17,8 @@ export const Providers = {
OLLAMA: "ollama",
OPENAI: "openai",
GROQ: "groq",
AZURE: "azure",
VERTEXAI: "vertexai",
} as const;
type Provider = (typeof Providers)[keyof typeof Providers];

Expand Down Expand Up @@ -59,6 +62,22 @@ export const LLMFactories: Record<Provider, () => ChatLLM<ChatLLMOutput>> = {
projectId: getEnv("WATSONX_PROJECT_ID"),
region: getEnv("WATSONX_REGION"),
}),
[Providers.AZURE]: () =>
new OpenAIChatLLM({
modelId: getEnv("OPENAI_MODEL") || "gpt-4o-mini",
azure: true,
parameters: {
temperature: 0,
max_tokens: 2048,
},
}),
[Providers.VERTEXAI]: () =>
new VertexAIChatLLM({
modelId: getEnv("VERTEXAI_MODEL") || "gemini-1.5-flash-001",
location: getEnv("VERTEXAI_LOCATION") || "us-central1",
project: getEnv("VERTEXAI_PROJECT"),
parameters: {},
}),
};

export function getChatLLM(provider?: Provider): ChatLLM<ChatLLMOutput> {
Expand Down

0 comments on commit d563d1e

Please sign in to comment.