Skip to content

Commit

Permalink
linting + BUILD files
Browse files Browse the repository at this point in the history
  • Loading branch information
logan-markewich committed Jan 10, 2025
1 parent 001e69a commit aa05077
Show file tree
Hide file tree
Showing 11 changed files with 78 additions and 88 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -86,4 +86,4 @@ dmypy.json
.pyre/

# Poetry
poetry.lock
poetry.lock
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
package(default_visibility = ["//visibility:public"])

exports_files(["pyproject.toml"])
poetry_requirements(
name="poetry",
)
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,4 @@ install:
poetry install --with dev

install_editable:
pip install -e ".[dev]"
pip install -e ".[dev]"
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ llm = BedrockMultiModal(
model="anthropic.claude-3-haiku-20240307-v1:0", # or other Bedrock multi-modal models
temperature=0.0,
max_tokens=300,
region_name="eu-central-1" # make sure to use the region where the model access is granted
region_name="eu-central-1", # make sure to use the region where the model access is granted
)

# Method 1: Load images using SimpleDirectoryReader
Expand All @@ -36,13 +36,13 @@ image_documents = SimpleDirectoryReader(
image_doc = ImageDocument(
image_path="/path/to/image.jpg", # Local file path
# OR
image="base64_encoded_image_string" # Base64 encoded image
image="base64_encoded_image_string", # Base64 encoded image
)

# Get a completion with both text and image
response = llm.complete(
prompt="Describe this image in detail:",
image_documents=image_documents # or [image_doc]
image_documents=image_documents, # or [image_doc]
)

print(response.text)
Expand All @@ -53,23 +53,26 @@ print(response.text)
You can authenticate with AWS Bedrock in several ways:

1. Environment variables:

```bash
export AWS_ACCESS_KEY_ID=your_access_key
export AWS_SECRET_ACCESS_KEY=your_secret_key
export AWS_REGION=us-east-1 # optional
```

2. Explicit credentials:

```python
llm = BedrockMultiModal(
model="anthropic.claude-3-haiku-20240307-v1:0",
aws_access_key_id="your_access_key",
aws_secret_access_key="your_secret_key",
region_name="eu-central-1"
region_name="eu-central-1",
)
```

3. AWS CLI configuration:

```bash
aws configure
```
Expand All @@ -96,8 +99,7 @@ image_docs = SimpleDirectoryReader(
).load_data()

response = llm.complete(
prompt="Compare these two images:",
image_documents=image_docs
prompt="Compare these two images:", image_documents=image_docs
)

# Custom parameters
Expand All @@ -109,7 +111,7 @@ llm = BedrockMultiModal(
max_retries=10, # Maximum number of API retries
additional_kwargs={
# Add other model-specific parameters
}
},
)

# Response includes token counts
Expand All @@ -133,4 +135,4 @@ pytest tests/

## License

This project is licensed under the MIT License.
This project is licensed under the MIT License.
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
python_sources()
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from llama_index.multi_modal_llms.bedrock.base import BedrockMultiModal

__all__ = ["BedrockMultiModal"]
__all__ = ["BedrockMultiModal"]
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
from typing import Any, Callable, Dict, List, Optional, Sequence
from typing import Any, Callable, Dict, Optional, Sequence

import boto3
import aioboto3
from botocore.config import Config
from llama_index.core.base.llms.types import (
CompletionResponse,
CompletionResponseGen,
CompletionResponseAsyncGen,
MessageRole,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CallbackManager
Expand Down Expand Up @@ -147,7 +144,7 @@ def _get_client(self) -> Any:
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.region_name,
)
return session.client('bedrock-runtime', config=self._config)
return session.client("bedrock-runtime", config=self._config)

@classmethod
def class_name(cls) -> str:
Expand All @@ -169,19 +166,19 @@ def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
"contentType": "application/json",
"accept": "application/json",
}

if self.model.startswith("anthropic.claude"):
model_kwargs["body"] = {
"anthropic_version": "bedrock-2023-05-31",
"max_tokens": self.max_tokens if self.max_tokens is not None else 300,
"temperature": self.temperature,
}

# Add any additional kwargs
if "body" in model_kwargs:
model_kwargs["body"].update(self.additional_kwargs)
model_kwargs["body"].update(kwargs)

return model_kwargs

def _complete(
Expand All @@ -203,6 +200,7 @@ def _complete(
# Convert body to JSON string
if isinstance(model_kwargs.get("body"), dict):
import json

body_str = json.dumps(model_kwargs["body"])
del model_kwargs["body"]
else:
Expand All @@ -219,7 +217,7 @@ def _complete(

# Parse the streaming response body
response_body = json.loads(response["body"].read())

# Parse response based on model
if self.model.startswith("anthropic.claude"):
completion = response_body["content"][0]["text"]
Expand All @@ -231,8 +229,12 @@ def _complete(
text=completion,
raw=response_body,
additional_kwargs={
"input_tokens": response["ResponseMetadata"]["HTTPHeaders"].get("x-amzn-bedrock-input-token-count"),
"output_tokens": response["ResponseMetadata"]["HTTPHeaders"].get("x-amzn-bedrock-output-token-count"),
"input_tokens": response["ResponseMetadata"]["HTTPHeaders"].get(
"x-amzn-bedrock-input-token-count"
),
"output_tokens": response["ResponseMetadata"]["HTTPHeaders"].get(
"x-amzn-bedrock-output-token-count"
),
},
)

Expand Down Expand Up @@ -261,6 +263,7 @@ async def acomplete(
# Convert body to JSON string
if isinstance(model_kwargs.get("body"), dict):
import json

body_str = json.dumps(model_kwargs["body"])
del model_kwargs["body"]
else:
Expand All @@ -278,7 +281,7 @@ async def acomplete(

# Parse the streaming response body
response_body = json.loads(await response["body"].read())

# Parse response based on model
if self.model.startswith("anthropic.claude"):
completion = response_body["content"][0]["text"]
Expand All @@ -290,39 +293,43 @@ async def acomplete(
text=completion,
raw=response_body,
additional_kwargs={
"input_tokens": response["ResponseMetadata"]["HTTPHeaders"].get("x-amzn-bedrock-input-token-count"),
"output_tokens": response["ResponseMetadata"]["HTTPHeaders"].get("x-amzn-bedrock-output-token-count"),
"input_tokens": response["ResponseMetadata"]["HTTPHeaders"].get(
"x-amzn-bedrock-input-token-count"
),
"output_tokens": response["ResponseMetadata"]["HTTPHeaders"].get(
"x-amzn-bedrock-output-token-count"
),
},
)

def chat(self, messages: Sequence[Any], **kwargs: Any) -> Any:
"""Chat with the model."""
raise NotImplementedError("Chat is not supported for this model.")

def stream_chat(
self, messages: Sequence[Any], **kwargs: Any
) -> Any:
def stream_chat(self, messages: Sequence[Any], **kwargs: Any) -> Any:
"""Stream chat with the model."""
raise NotImplementedError("Stream chat is not supported for this model.")

async def achat(self, messages: Sequence[Any], **kwargs: Any) -> Any:
"""Chat with the model asynchronously."""
raise NotImplementedError("Async chat is not supported for this model.")

async def astream_chat(
self, messages: Sequence[Any], **kwargs: Any
) -> Any:
async def astream_chat(self, messages: Sequence[Any], **kwargs: Any) -> Any:
"""Stream chat with the model asynchronously."""
raise NotImplementedError("Async stream chat is not supported for this model.")

def stream_complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> Any:
"""Complete the prompt with image support in a streaming fashion."""
raise NotImplementedError("Streaming completion is not supported for this model.")
raise NotImplementedError(
"Streaming completion is not supported for this model."
)

async def astream_complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> Any:
"""Complete the prompt with image support in a streaming fashion asynchronously."""
raise NotImplementedError("Async streaming completion is not supported for this model.")
raise NotImplementedError(
"Async streaming completion is not supported for this model."
)
Loading

0 comments on commit aa05077

Please sign in to comment.