Skip to content

Commit

Permalink
Add additional_files to text models
Browse files Browse the repository at this point in the history
  • Loading branch information
timonv committed Jan 9, 2025
1 parent cce12f0 commit b733f15
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 6 deletions.
5 changes: 5 additions & 0 deletions src/models/image_embedding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,34 +24,39 @@ pub fn models_list() -> Vec<ModelInfo<ImageEmbeddingModel>> {
description: String::from("CLIP vision encoder based on ViT-B/32"),
model_code: String::from("Qdrant/clip-ViT-B-32-vision"),
model_file: String::from("model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: ImageEmbeddingModel::Resnet50,
dim: 2048,
description: String::from("ResNet-50 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__."),
model_code: String::from("Qdrant/resnet50-onnx"),
model_file: String::from("model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: ImageEmbeddingModel::UnicomVitB16,
dim: 768,
description: String::from("Unicom Unicom-ViT-B-16 from open-metric-learning"),
model_code: String::from("Qdrant/Unicom-ViT-B-16"),
model_file: String::from("model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: ImageEmbeddingModel::UnicomVitB32,
dim: 512,
description: String::from("Unicom Unicom-ViT-B-32 from open-metric-learning"),
model_code: String::from("Qdrant/Unicom-ViT-B-32"),
model_file: String::from("model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: ImageEmbeddingModel::NomicEmbedVisionV15,
dim: 768,
description: String::from("Nomic NomicEmbedVisionV15"),
model_code: String::from("nomic-ai/nomic-embed-vision-v1.5"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
];

Expand Down
1 change: 1 addition & 0 deletions src/models/model_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ pub struct ModelInfo<T> {
pub description: String,
pub model_code: String,
pub model_file: String,
pub additional_files: Vec<String>,
}
1 change: 1 addition & 0 deletions src/models/sparse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ pub fn models_list() -> Vec<ModelInfo<SparseModel>> {
description: String::from("Splade sparse vector model for commercial use, v1"),
model_code: String::from("Qdrant/Splade_PP_en_v1"),
model_file: String::from("model.onnx"),
additional_files: Vec::new(),
}]
}

Expand Down
29 changes: 29 additions & 0 deletions src/models/text_embedding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,62 +80,71 @@ fn init_models_map() -> HashMap<EmbeddingModel, ModelInfo<EmbeddingModel>> {
description: String::from("Sentence Transformer model, MiniLM-L6-v2"),
model_code: String::from("Qdrant/all-MiniLM-L6-v2-onnx"),
model_file: String::from("model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::AllMiniLML6V2Q,
dim: 384,
description: String::from("Quantized Sentence Transformer model, MiniLM-L6-v2"),
model_code: String::from("Xenova/all-MiniLM-L6-v2"),
model_file: String::from("onnx/model_quantized.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::AllMiniLML12V2,
dim: 384,
description: String::from("Sentence Transformer model, MiniLM-L12-v2"),
model_code: String::from("Xenova/all-MiniLM-L12-v2"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::AllMiniLML12V2Q,
dim: 384,
description: String::from("Quantized Sentence Transformer model, MiniLM-L12-v2"),
model_code: String::from("Xenova/all-MiniLM-L12-v2"),
model_file: String::from("onnx/model_quantized.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::BGEBaseENV15,
dim: 768,
description: String::from("v1.5 release of the base English model"),
model_code: String::from("Xenova/bge-base-en-v1.5"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::BGEBaseENV15Q,
dim: 768,
description: String::from("Quantized v1.5 release of the large English model"),
model_code: String::from("Qdrant/bge-base-en-v1.5-onnx-Q"),
model_file: String::from("model_optimized.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::BGELargeENV15,
dim: 1024,
description: String::from("v1.5 release of the large English model"),
model_code: String::from("Xenova/bge-large-en-v1.5"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::BGELargeENV15Q,
dim: 1024,
description: String::from("Quantized v1.5 release of the large English model"),
model_code: String::from("Qdrant/bge-large-en-v1.5-onnx-Q"),
model_file: String::from("model_optimized.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::BGESmallENV15,
dim: 384,
description: String::from("v1.5 release of the fast and default English model"),
model_code: String::from("Xenova/bge-small-en-v1.5"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::BGESmallENV15Q,
Expand All @@ -145,20 +154,23 @@ fn init_models_map() -> HashMap<EmbeddingModel, ModelInfo<EmbeddingModel>> {
),
model_code: String::from("Qdrant/bge-small-en-v1.5-onnx-Q"),
model_file: String::from("model_optimized.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::NomicEmbedTextV1,
dim: 768,
description: String::from("8192 context length english model"),
model_code: String::from("nomic-ai/nomic-embed-text-v1"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::NomicEmbedTextV15,
dim: 768,
description: String::from("v1.5 release of the 8192 context length english model"),
model_code: String::from("nomic-ai/nomic-embed-text-v1.5"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::NomicEmbedTextV15Q,
Expand All @@ -168,20 +180,23 @@ fn init_models_map() -> HashMap<EmbeddingModel, ModelInfo<EmbeddingModel>> {
),
model_code: String::from("nomic-ai/nomic-embed-text-v1.5"),
model_file: String::from("onnx/model_quantized.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::ParaphraseMLMiniLML12V2Q,
dim: 384,
description: String::from("Quantized Multi-lingual model"),
model_code: String::from("Qdrant/paraphrase-multilingual-MiniLM-L12-v2-onnx-Q"),
model_file: String::from("model_optimized.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::ParaphraseMLMiniLML12V2,
dim: 384,
description: String::from("Multi-lingual model"),
model_code: String::from("Xenova/paraphrase-multilingual-MiniLM-L12-v2"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::ParaphraseMLMpnetBaseV2,
Expand All @@ -191,97 +206,111 @@ fn init_models_map() -> HashMap<EmbeddingModel, ModelInfo<EmbeddingModel>> {
),
model_code: String::from("Xenova/paraphrase-multilingual-mpnet-base-v2"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::BGESmallZHV15,
dim: 512,
description: String::from("v1.5 release of the small Chinese model"),
model_code: String::from("Xenova/bge-small-zh-v1.5"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::MultilingualE5Small,
dim: 384,
description: String::from("Small model of multilingual E5 Text Embeddings"),
model_code: String::from("intfloat/multilingual-e5-small"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::MultilingualE5Base,
dim: 768,
description: String::from("Base model of multilingual E5 Text Embeddings"),
model_code: String::from("intfloat/multilingual-e5-base"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::MultilingualE5Large,
dim: 1024,
description: String::from("Large model of multilingual E5 Text Embeddings"),
model_code: String::from("Qdrant/multilingual-e5-large-onnx"),
model_file: String::from("model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::MxbaiEmbedLargeV1,
dim: 1024,
description: String::from("Large English embedding model from MixedBreed.ai"),
model_code: String::from("mixedbread-ai/mxbai-embed-large-v1"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::MxbaiEmbedLargeV1Q,
dim: 1024,
description: String::from("Quantized Large English embedding model from MixedBreed.ai"),
model_code: String::from("mixedbread-ai/mxbai-embed-large-v1"),
model_file: String::from("onnx/model_quantized.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::GTEBaseENV15,
dim: 768,
description: String::from("Large multilingual embedding model from Alibaba"),
model_code: String::from("Alibaba-NLP/gte-base-en-v1.5"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::GTEBaseENV15Q,
dim: 768,
description: String::from("Quantized Large multilingual embedding model from Alibaba"),
model_code: String::from("Alibaba-NLP/gte-base-en-v1.5"),
model_file: String::from("onnx/model_quantized.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::GTELargeENV15,
dim: 1024,
description: String::from("Large multilingual embedding model from Alibaba"),
model_code: String::from("Alibaba-NLP/gte-large-en-v1.5"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::GTELargeENV15Q,
dim: 1024,
description: String::from("Quantized Large multilingual embedding model from Alibaba"),
model_code: String::from("Alibaba-NLP/gte-large-en-v1.5"),
model_file: String::from("onnx/model_quantized.onnx"),
additional_files: vec!["model.onnx_data".to_string()],
},
ModelInfo {
model: EmbeddingModel::ClipVitB32,
dim: 512,
description: String::from("CLIP text encoder based on ViT-B/32"),
model_code: String::from("Qdrant/clip-ViT-B-32-text"),
model_file: String::from("model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::JinaEmbeddingsV2BaseCode,
dim: 768,
description: String::from("Jina embeddings v2 base code"),
model_code: String::from("jinaai/jina-embeddings-v2-base-code"),
model_file: String::from("onnx/model.onnx"),
additional_files: Vec::new(),
},
ModelInfo {
model: EmbeddingModel::JinaEmbeddingsV3,
dim: 1024,
description: String::from("Jina embeddings v3"),
model_code: String::from("jinaai/jina-embeddings-v3"),
model_file: String::from("onnx/model.onnx"),
additional_files: vec!["onnx/model.onxx_data".to_string()],
},
];

Expand Down
12 changes: 6 additions & 6 deletions src/text_embedding/impl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,12 @@ impl TextEmbedding {
.get(model_file_name)
.context(format!("Failed to retrieve {}", model_file_name))?;

// TODO: If more models need .onnx_data, implement a better way to handle this
// Probably by adding `additional_files` field in the `ModelInfo` struct
if model_name == EmbeddingModel::MultilingualE5Large {
model_repo
.get("model.onnx_data")
.expect("Failed to retrieve model.onnx_data.");
if !model_info.additional_files.is_empty() {
for file in &model_info.additional_files {
model_repo
.get(file)
.context(format!("Failed to retrieve {}", file))?;
}
}

// prioritise loading pooling config if available, if not (thanks qdrant!), look for it in hardcoded
Expand Down

0 comments on commit b733f15

Please sign in to comment.