Skip to content

Commit

Permalink
feat!: bump bee-ągent-framework amd bee-code-interpreter
Browse files Browse the repository at this point in the history
Signed-off-by: Tomas Dvorak <toomas2d@gmail.com>
  • Loading branch information
Tomas2D committed Jan 13, 2025
1 parent f2b9238 commit e154ee6
Show file tree
Hide file tree
Showing 7 changed files with 14 additions and 90 deletions.
8 changes: 2 additions & 6 deletions .env.template
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# LLM Provider (watsonx/ollama/openai/groq/bam)
# LLM Provider (watsonx/ollama/openai/groq)
LLM_BACKEND="ollama"

## WatsonX
Expand All @@ -19,10 +19,6 @@ LLM_BACKEND="ollama"
# GROQ_API_KEY=""
# GROQ_MODEL="llama-3.1-70b-versatile"

## BAM
# GENAI_API_KEY=""
# GENAI_MODEL="meta-llama/llama-3-1-70b-instruct"

## Azure OpenAI
# OPENAI_MODEL="gpt-4o-mini"
# OPENAI_API_VERSION="2024-08-01-preview"
Expand All @@ -36,7 +32,7 @@ LLM_BACKEND="ollama"
# VERTEXAI_PROJECT=""

# Tools
CODE_INTERPRETER_URL="http://127.0.0.1:50051"
CODE_INTERPRETER_URL="http://127.0.0.1:50081"

# Framework related
BEE_FRAMEWORK_LOG_PRETTY=true
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ The [Bee Code Interpreter](https://github.com/i-am-bee/bee-code-interpreter) is

> [!NOTE]
>
> Code Interpreter runs on `http://127.0.0.1:50051`.
> Code Interpreter runs on `http://127.0.0.1:50081`.
## 🔎 Observability

Expand Down
2 changes: 1 addition & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ services:
- ${CODE_INTEPRETER_TMPDIR:-./tmp/code_interpreter_target}:/storage
- ./infra/bee-code-interpreter.yaml:/var/lib/rancher/k3s/server/manifests/bee-code-interpreter.yaml
ports:
- "50051:30051"
- "50081:30051"
healthcheck:
test: "kubectl get pod code-interpreter | grep Running"
interval: 10s
Expand Down
10 changes: 5 additions & 5 deletions infra/bee-code-interpreter.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,12 @@ spec:
serviceAccountName: code-interpreter-sa
containers:
- name: code-interpreter
image: icr.io/i-am-bee/bee-code-interpreter:0.0.28
image: icr.io/i-am-bee/bee-code-interpreter:0.0.29
ports:
- containerPort: 50051
- containerPort: 50081
env:
- name: APP_EXECUTOR_IMAGE
value: icr.io/i-am-bee/bee-code-executor:0.0.28
value: icr.io/i-am-bee/bee-code-executor:0.0.29
- name: APP_FILE_STORAGE_PATH
value: /storage
- name: APP_EXECUTOR_POD_QUEUE_TARGET_LENGTH
Expand All @@ -61,8 +61,8 @@ metadata:
spec:
type: NodePort
ports:
- port: 50051
targetPort: 50051
- port: 50081
targetPort: 50081
nodePort: 30051
selector:
app: code-interpreter
71 changes: 4 additions & 67 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
"@google-cloud/vertexai": "^1.9.2",
"@ibm-generative-ai/node-sdk": "^3.2.4",
"@opentelemetry/sdk-node": "^0.57.0",
"bee-agent-framework": "^0.0.56",
"bee-agent-framework": "^0.0.57",
"bee-observe-connector": "^0.0.6",
"dotenv": "^16.4.5",
"groq-sdk": "^0.7.0",
Expand Down
9 changes: 0 additions & 9 deletions src/helpers/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ import { ChatLLM, ChatLLMOutput } from "bee-agent-framework/llms/chat";
import { getEnv, parseEnv } from "bee-agent-framework/internals/env";
import { z } from "zod";
import { WatsonXChatLLM } from "bee-agent-framework/adapters/watsonx/chat";
import { BAMChatLLM } from "bee-agent-framework/adapters/bam/chat";
import { Client as BAMSDK } from "@ibm-generative-ai/node-sdk";
import { OpenAIChatLLM } from "bee-agent-framework/adapters/openai/chat";
import { OllamaChatLLM } from "bee-agent-framework/adapters/ollama/chat";
import { GroqChatLLM } from "bee-agent-framework/adapters/groq/chat";
Expand All @@ -12,7 +10,6 @@ import { Ollama } from "ollama";
import Groq from "groq-sdk";

export const Providers = {
BAM: "bam",
WATSONX: "watsonx",
OLLAMA: "ollama",
OPENAI: "openai",
Expand All @@ -23,12 +20,6 @@ export const Providers = {
type Provider = (typeof Providers)[keyof typeof Providers];

export const LLMFactories: Record<Provider, () => ChatLLM<ChatLLMOutput>> = {
[Providers.BAM]: () =>
BAMChatLLM.fromPreset(getEnv("GENAI_MODEL") || "meta-llama/llama-3-1-70b-instruct", {
client: new BAMSDK({
apiKey: getEnv("GENAI_API_KEY"),
}),
}),
[Providers.GROQ]: () =>
new GroqChatLLM({
modelId: getEnv("GROQ_MODEL") || "llama-3.1-70b-versatile",
Expand Down

0 comments on commit e154ee6

Please sign in to comment.