|
1 | 1 | use crate::cli::server::SupportedModels;
|
2 |
| -use crate::engine::ai::providers::ort::ORTProvider; |
3 | 2 | use crate::engine::ai::providers::ModelProviders;
|
4 | 3 | use crate::engine::ai::providers::ProviderTrait;
|
| 4 | +use crate::engine::ai::providers::ort::ORTProvider; |
5 | 5 | use crate::error::AIProxyError;
|
6 | 6 | use ahnlich_types::ai::execution_provider::ExecutionProvider;
|
7 | 7 | use ahnlich_types::ai::models::AiStoreInputType;
|
8 | 8 | use ahnlich_types::keyval::StoreKey;
|
9 |
| -use fast_image_resize::images::Image; |
10 |
| -use fast_image_resize::images::ImageRef; |
11 | 9 | use fast_image_resize::FilterType;
|
12 | 10 | use fast_image_resize::PixelType;
|
13 | 11 | use fast_image_resize::ResizeAlg;
|
14 | 12 | use fast_image_resize::ResizeOptions;
|
15 | 13 | use fast_image_resize::Resizer;
|
16 |
| -use image::imageops; |
| 14 | +use fast_image_resize::images::Image; |
| 15 | +use fast_image_resize::images::ImageRef; |
17 | 16 | use image::ImageReader;
|
18 | 17 | use image::RgbImage;
|
| 18 | +use image::imageops; |
19 | 19 | use ndarray::{Array, Ix3};
|
20 | 20 | use ndarray::{ArrayView, Ix4};
|
21 | 21 | use nonzero_ext::nonzero;
|
@@ -66,65 +66,65 @@ impl SupportedModels {
|
66 | 66 | embedding_size: nonzero!(384usize),
|
67 | 67 | },
|
68 | 68 | SupportedModels::AllMiniLML12V2 => ModelDetails {
|
69 |
| - model_type: ModelType::Text { |
70 |
| - // Token size source: https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2#intended-uses |
71 |
| - max_input_tokens: nonzero!(256usize), |
72 |
| - }, |
73 |
| - supported_model: SupportedModels::AllMiniLML12V2, |
74 |
| - description: String::from("Sentence Transformer model, with 12 layers, version 2."), |
75 |
| - embedding_size: nonzero!(384usize), |
| 69 | + model_type: ModelType::Text { |
| 70 | + // Token size source: https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2#intended-uses |
| 71 | + max_input_tokens: nonzero!(256usize), |
| 72 | + }, |
| 73 | + supported_model: SupportedModels::AllMiniLML12V2, |
| 74 | + description: String::from("Sentence Transformer model, with 12 layers, version 2."), |
| 75 | + embedding_size: nonzero!(384usize), |
76 | 76 | },
|
77 | 77 | SupportedModels::BGEBaseEnV15 => ModelDetails {
|
78 |
| - model_type: ModelType::Text { |
79 |
| - // Token size source: https://huggingface.co/BAAI/bge-large-en/discussions/11#64e44de1623074ac850aa1ae |
80 |
| - max_input_tokens: nonzero!(512usize), |
81 |
| - }, |
82 |
| - supported_model: SupportedModels::BGEBaseEnV15, |
83 |
| - description: String::from( |
84 |
| - "BAAI General Embedding model with English support, base scale, version 1.5.", |
85 |
| - ), |
86 |
| - embedding_size: nonzero!(768usize), |
| 78 | + model_type: ModelType::Text { |
| 79 | + // Token size source: https://huggingface.co/BAAI/bge-large-en/discussions/11#64e44de1623074ac850aa1ae |
| 80 | + max_input_tokens: nonzero!(512usize), |
| 81 | + }, |
| 82 | + supported_model: SupportedModels::BGEBaseEnV15, |
| 83 | + description: String::from( |
| 84 | + "BAAI General Embedding model with English support, base scale, version 1.5.", |
| 85 | + ), |
| 86 | + embedding_size: nonzero!(768usize), |
87 | 87 | },
|
88 | 88 | SupportedModels::BGELargeEnV15 => ModelDetails {
|
89 |
| - model_type: ModelType::Text { |
90 |
| - max_input_tokens: nonzero!(512usize), |
91 |
| - }, |
92 |
| - supported_model: SupportedModels::BGELargeEnV15, |
93 |
| - description: String::from( |
94 |
| - "BAAI General Embedding model with English support, large scale, version 1.5.", |
95 |
| - ), |
96 |
| - embedding_size: nonzero!(1024usize), |
| 89 | + model_type: ModelType::Text { |
| 90 | + max_input_tokens: nonzero!(512usize), |
| 91 | + }, |
| 92 | + supported_model: SupportedModels::BGELargeEnV15, |
| 93 | + description: String::from( |
| 94 | + "BAAI General Embedding model with English support, large scale, version 1.5.", |
| 95 | + ), |
| 96 | + embedding_size: nonzero!(1024usize), |
97 | 97 | },
|
98 | 98 | SupportedModels::Resnet50 => ModelDetails {
|
99 |
| - model_type: ModelType::Image { |
100 |
| - expected_image_dimensions: (nonzero!(224usize), nonzero!(224usize)), |
101 |
| - }, |
102 |
| - supported_model: SupportedModels::Resnet50, |
103 |
| - description: String::from("Residual Networks model, with 50 layers."), |
104 |
| - embedding_size: nonzero!(2048usize), |
| 99 | + model_type: ModelType::Image { |
| 100 | + expected_image_dimensions: (nonzero!(224usize), nonzero!(224usize)), |
| 101 | + }, |
| 102 | + supported_model: SupportedModels::Resnet50, |
| 103 | + description: String::from("Residual Networks model, with 50 layers."), |
| 104 | + embedding_size: nonzero!(2048usize), |
105 | 105 | },
|
106 | 106 | SupportedModels::ClipVitB32Image => ModelDetails {
|
107 |
| - model_type: ModelType::Image { |
108 |
| - expected_image_dimensions: (nonzero!(224usize), nonzero!(224usize)), |
109 |
| - }, |
110 |
| - supported_model: SupportedModels::ClipVitB32Image, |
111 |
| - description: String::from( |
112 |
| - "Contrastive Language-Image Pre-Training Vision transformer model, base scale.", |
113 |
| - ), |
114 |
| - embedding_size: nonzero!(512usize), |
| 107 | + model_type: ModelType::Image { |
| 108 | + expected_image_dimensions: (nonzero!(224usize), nonzero!(224usize)), |
| 109 | + }, |
| 110 | + supported_model: SupportedModels::ClipVitB32Image, |
| 111 | + description: String::from( |
| 112 | + "Contrastive Language-Image Pre-Training Vision transformer model, base scale.", |
| 113 | + ), |
| 114 | + embedding_size: nonzero!(512usize), |
115 | 115 | },
|
116 | 116 | SupportedModels::ClipVitB32Text => ModelDetails {
|
117 |
| - supported_model: SupportedModels::ClipVitB32Text, |
118 |
| - description: String::from( |
119 |
| - "Contrastive Language-Image Pre-Training Text transformer model, base scale. \ |
| 117 | + supported_model: SupportedModels::ClipVitB32Text, |
| 118 | + description: String::from( |
| 119 | + "Contrastive Language-Image Pre-Training Text transformer model, base scale. \ |
120 | 120 | Ideal for embedding very short text and using in combination with ClipVitB32Image",
|
121 |
| - ), |
122 |
| - embedding_size: nonzero!(512usize), |
123 |
| - model_type: ModelType::Text { |
124 |
| - // Token size source: https://github.com/UKPLab/sentence-transformers/issues/1269 |
125 |
| - max_input_tokens: nonzero!(77usize), |
126 |
| - }, |
| 121 | + ), |
| 122 | + embedding_size: nonzero!(512usize), |
| 123 | + model_type: ModelType::Text { |
| 124 | + // Token size source: https://github.com/UKPLab/sentence-transformers/issues/1269 |
| 125 | + max_input_tokens: nonzero!(77usize), |
127 | 126 | },
|
| 127 | + }, |
128 | 128 | }
|
129 | 129 | }
|
130 | 130 |
|
|
0 commit comments