forked from anthonysandesh/LegalLLM
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patharchitecture.py
45 lines (38 loc) · 1.51 KB
/
architecture.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# -*- coding: utf-8 -*-
"""Untitled4.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1eV4-E7VDAHekOoakymatKd3grGwUSTAC
"""
from langchain_groq import ChatGroq
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
class LegalLLMArchitecture:
def __init__(self):
# Document existing architecture choices
self.base_model = ChatGroq(model="llama-3.1-70b-versatile", temperature=0)
self.embeddings = HuggingFaceEmbeddings()
def architecture_modifications(self):
"""
Document our architectural modifications:
1. RAG Integration
- Added vector store for legal document retrieval
- Implemented similarity search
2. Custom Memory Management
- Implemented ConversationBufferMemory for context retention
3. Chain Architecture
- Added ConversationalRetrievalChain for document retrieval
- Custom prompt templates for legal domain
"""
return {
"rag_enabled": True,
"memory_type": "ConversationBufferMemory",
"chain_type": "ConversationalRetrievalChain",
"modifications": [
"Legal domain-specific prompt engineering",
"Vector store integration",
"Similarity search implementation",
"Conversation memory management"
]
}