diff --git a/README.md b/README.md index 8b7e9f370..061fba78f 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ If you don't have enough resources to run it, you can use bitsnbytes to quantize - Scripts - Script that creates similarity search index for other libraries. -- Frontend - Frontend uses Vite and React. +- Frontend - Frontend uses Vite and React. ## QuickStart diff --git a/application/parser/file/openapi3_parser.py b/application/parser/file/openapi3_parser.py new file mode 100644 index 000000000..3c5082fa2 --- /dev/null +++ b/application/parser/file/openapi3_parser.py @@ -0,0 +1,51 @@ +from urllib.parse import urlparse + +from openapi_parser import parse + +try: + from application.parser.file.base_parser import BaseParser +except ModuleNotFoundError: + from base_parser import BaseParser + + +class OpenAPI3Parser(BaseParser): + def init_parser(self) -> None: + return super().init_parser() + + def get_base_urls(self, urls): + base_urls = [] + for i in urls: + parsed_url = urlparse(i) + base_url = parsed_url.scheme + "://" + parsed_url.netloc + if base_url not in base_urls: + base_urls.append(base_url) + return base_urls + + def get_info_from_paths(self, path): + info = "" + if path.operations: + for operation in path.operations: + info += ( + f"\n{operation.method.value}=" + f"{operation.responses[0].description}" + ) + return info + + def parse_file(self, file_path): + data = parse(file_path) + results = "" + base_urls = self.get_base_urls(link.url for link in data.servers) + base_urls = ",".join([base_url for base_url in base_urls]) + results += f"Base URL:{base_urls}\n" + i = 1 + for path in data.paths: + info = self.get_info_from_paths(path) + results += ( + f"Path{i}: {path.url}\n" + f"description: {path.description}\n" + f"parameters: {path.parameters}\nmethods: {info}\n" + ) + i += 1 + with open("results.txt", "w") as f: + f.write(results) + return results diff --git a/application/requirements.txt b/application/requirements.txt index 9c60e4219..693e62831 100644 --- a/application/requirements.txt +++ b/application/requirements.txt @@ -57,6 +57,7 @@ nltk==3.8.1 numcodecs==0.11.0 numpy==1.24.2 openai==0.27.8 +openapi3-parser==1.1.14 packaging==23.0 pathos==0.3.0 Pillow==10.0.1 diff --git a/docs/pages/Deploying/_meta.json b/docs/pages/Deploying/_meta.json index 27090f4b5..bcc9bcde9 100644 --- a/docs/pages/Deploying/_meta.json +++ b/docs/pages/Deploying/_meta.json @@ -6,5 +6,9 @@ "Quickstart": { "title": "⚡️Quickstart", "href": "/Deploying/Quickstart" + }, + "Railway-Deploying": { + "title": "🚂Deploying on Rainway", + "href": "/Deploying/Railway-Deploying" } } \ No newline at end of file diff --git a/docs/pages/Guides/How-to-use-different-LLM.md b/docs/pages/Guides/How-to-use-different-LLM.md index 8d7ccccec..c300bef33 100644 --- a/docs/pages/Guides/How-to-use-different-LLM.md +++ b/docs/pages/Guides/How-to-use-different-LLM.md @@ -1,36 +1,42 @@ -Fortunately, there are many providers for LLMs, and some of them can even be run locally. +# Setting Up Local Language Models for Your App -There are two models used in the app: -1. Embeddings. -2. Text generation. +Your app relies on two essential models: Embeddings and Text Generation. While OpenAI's default models work seamlessly, you have the flexibility to switch providers or even run the models locally. -By default, we use OpenAI's models, but if you want to change it or even run it locally, it's very simple! +## Step 1: Configure Environment Variables -### Go to .env file or set environment variables: +Navigate to the `.env` file or set the following environment variables: -`LLM_NAME=` +```env +LLM_NAME= +API_KEY= +EMBEDDINGS_NAME= +EMBEDDINGS_KEY= +VITE_API_STREAMING= +``` -`API_KEY=` +You can omit the keys if users provide their own. Ensure you set `LLM_NAME` and `EMBEDDINGS_NAME`. -`EMBEDDINGS_NAME=` +## Step 2: Choose Your Models -`EMBEDDINGS_KEY=` +**Options for `LLM_NAME`:** +- openai +- manifest +- cohere +- Arc53/docsgpt-14b +- Arc53/docsgpt-7b-falcon +- llama.cpp -`VITE_API_STREAMING=` +**Options for `EMBEDDINGS_NAME`:** +- openai_text-embedding-ada-002 +- huggingface_sentence-transformers/all-mpnet-base-v2 +- huggingface_hkunlp/instructor-large +- cohere_medium -You don't need to provide keys if you are happy with users providing theirs, so make sure you set `LLM_NAME` and `EMBEDDINGS_NAME`. +If using Llama, set `EMBEDDINGS_NAME` to `huggingface_sentence-transformers/all-mpnet-base-v2`. Download the required model and place it in the `models/` folder. -Options: -LLM_NAME (openai, manifest, cohere, Arc53/docsgpt-14b, Arc53/docsgpt-7b-falcon, llama.cpp) -EMBEDDINGS_NAME (openai_text-embedding-ada-002, huggingface_sentence-transformers/all-mpnet-base-v2, huggingface_hkunlp/instructor-large, cohere_medium) +Alternatively, for local Llama setup, run `setup.sh` and choose option 1. The script handles the DocsGPT model addition. -If using Llama, set the `EMBEDDINGS_NAME` to `huggingface_sentence-transformers/all-mpnet-base-v2` and be sure to download [this model](https://d3dg1063dc54p9.cloudfront.net/models/docsgpt-7b-f16.gguf) into the `models/` folder: `https://d3dg1063dc54p9.cloudfront.net/models/docsgpt-7b-f16.gguf`. +## Step 3: Local Hosting for Privacy -Alternatively, if you wish to run Llama locally, you can run `setup.sh` and choose option 1 when prompted. You do not need to manually add the DocsGPT model mentioned above to your `models/` folder if you use `setup.sh`, as the script will manage that step for you. - -That's it! - -### Hosting everything locally and privately (for using our optimised open-source models) -If you are working with critical data and don't want anything to leave your premises. - -Make sure you set `SELF_HOSTED_MODEL` as true in your `.env` variable, and for your `LLM_NAME`, you can use anything that is on Hugging Face. +If working with sensitive data, host everything locally by setting `SELF_HOSTED_MODEL` to true in your `.env`. For `LLM_NAME`, use any model available on Hugging Face. +That's it! Your app is now configured for local and private hosting, ensuring optimal security for critical data. diff --git a/frontend/src/Navigation.tsx b/frontend/src/Navigation.tsx index aed0965ea..388ea19b6 100644 --- a/frontend/src/Navigation.tsx +++ b/frontend/src/Navigation.tsx @@ -172,7 +172,7 @@ export default function Navigation({ navOpen, setNavOpen }: NavigationProps) { <> {!navOpen && (