Skip to content

Commit c5be63c

Browse files
committed
Updating rust edition and version
1 parent d86b20d commit c5be63c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+229
-220
lines changed

.github/workflows/release.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ jobs:
5656
- name: Get Cargo toolchain
5757
uses: actions-rs/toolchain@v1
5858
with:
59-
toolchain: 1.87.0
59+
toolchain: 1.88.0
6060

6161
- name: Build Linux Release for ${{ needs.prebuild_preparation.outputs.bin_name }}
6262
working-directory: ./ahnlich

.github/workflows/rust_tag_and_deploy.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ jobs:
6666
- name: Get Cargo toolchain
6767
uses: actions-rs/toolchain@v1
6868
with:
69-
toolchain: 1.87.0
69+
toolchain: 1.88.0
7070

7171
- name: Install Protoc
7272
uses: arduino/setup-protoc@v3

ahnlich/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM lukemathwalker/cargo-chef:latest-rust-1.87.0 AS chef
1+
FROM lukemathwalker/cargo-chef:latest-rust-1.88.0 AS chef
22
WORKDIR /app
33
RUN apt update && apt install lld clang protobuf-compiler -y
44
FROM chef AS planner

ahnlich/ai/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
[package]
22
name = "ai"
33
version = "0.1.0"
4-
edition = "2021"
4+
edition = "2024"
55

66
# only used for rust client test and not to be released
77
[lib]

ahnlich/ai/src/cli/server.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ impl SupportedModelArgs {
270270
let mut output = String::new();
271271

272272
for supported_model in SupportedModels::VARIANTS.iter() {
273-
output.push_str(format!("{}, ", supported_model).as_str())
273+
output.push_str(format!("{supported_model}, ").as_str())
274274
}
275275
output
276276
}
@@ -298,6 +298,6 @@ impl SupportedModelArgs {
298298
text.push_str(&self.list_supported_models());
299299
}
300300

301-
writeln!(&mut stdout, "{}", text).expect("Failed to write output");
301+
writeln!(&mut stdout, "{text}").expect("Failed to write output");
302302
}
303303
}

ahnlich/ai/src/engine/ai/models.rs

Lines changed: 51 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,21 @@
11
use crate::cli::server::SupportedModels;
2-
use crate::engine::ai::providers::ort::ORTProvider;
32
use crate::engine::ai::providers::ModelProviders;
43
use crate::engine::ai::providers::ProviderTrait;
4+
use crate::engine::ai::providers::ort::ORTProvider;
55
use crate::error::AIProxyError;
66
use ahnlich_types::ai::execution_provider::ExecutionProvider;
77
use ahnlich_types::ai::models::AiStoreInputType;
88
use ahnlich_types::keyval::StoreKey;
9-
use fast_image_resize::images::Image;
10-
use fast_image_resize::images::ImageRef;
119
use fast_image_resize::FilterType;
1210
use fast_image_resize::PixelType;
1311
use fast_image_resize::ResizeAlg;
1412
use fast_image_resize::ResizeOptions;
1513
use fast_image_resize::Resizer;
16-
use image::imageops;
14+
use fast_image_resize::images::Image;
15+
use fast_image_resize::images::ImageRef;
1716
use image::ImageReader;
1817
use image::RgbImage;
18+
use image::imageops;
1919
use ndarray::{Array, Ix3};
2020
use ndarray::{ArrayView, Ix4};
2121
use nonzero_ext::nonzero;
@@ -66,65 +66,65 @@ impl SupportedModels {
6666
embedding_size: nonzero!(384usize),
6767
},
6868
SupportedModels::AllMiniLML12V2 => ModelDetails {
69-
model_type: ModelType::Text {
70-
// Token size source: https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2#intended-uses
71-
max_input_tokens: nonzero!(256usize),
72-
},
73-
supported_model: SupportedModels::AllMiniLML12V2,
74-
description: String::from("Sentence Transformer model, with 12 layers, version 2."),
75-
embedding_size: nonzero!(384usize),
69+
model_type: ModelType::Text {
70+
// Token size source: https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2#intended-uses
71+
max_input_tokens: nonzero!(256usize),
72+
},
73+
supported_model: SupportedModels::AllMiniLML12V2,
74+
description: String::from("Sentence Transformer model, with 12 layers, version 2."),
75+
embedding_size: nonzero!(384usize),
7676
},
7777
SupportedModels::BGEBaseEnV15 => ModelDetails {
78-
model_type: ModelType::Text {
79-
// Token size source: https://huggingface.co/BAAI/bge-large-en/discussions/11#64e44de1623074ac850aa1ae
80-
max_input_tokens: nonzero!(512usize),
81-
},
82-
supported_model: SupportedModels::BGEBaseEnV15,
83-
description: String::from(
84-
"BAAI General Embedding model with English support, base scale, version 1.5.",
85-
),
86-
embedding_size: nonzero!(768usize),
78+
model_type: ModelType::Text {
79+
// Token size source: https://huggingface.co/BAAI/bge-large-en/discussions/11#64e44de1623074ac850aa1ae
80+
max_input_tokens: nonzero!(512usize),
81+
},
82+
supported_model: SupportedModels::BGEBaseEnV15,
83+
description: String::from(
84+
"BAAI General Embedding model with English support, base scale, version 1.5.",
85+
),
86+
embedding_size: nonzero!(768usize),
8787
},
8888
SupportedModels::BGELargeEnV15 => ModelDetails {
89-
model_type: ModelType::Text {
90-
max_input_tokens: nonzero!(512usize),
91-
},
92-
supported_model: SupportedModels::BGELargeEnV15,
93-
description: String::from(
94-
"BAAI General Embedding model with English support, large scale, version 1.5.",
95-
),
96-
embedding_size: nonzero!(1024usize),
89+
model_type: ModelType::Text {
90+
max_input_tokens: nonzero!(512usize),
91+
},
92+
supported_model: SupportedModels::BGELargeEnV15,
93+
description: String::from(
94+
"BAAI General Embedding model with English support, large scale, version 1.5.",
95+
),
96+
embedding_size: nonzero!(1024usize),
9797
},
9898
SupportedModels::Resnet50 => ModelDetails {
99-
model_type: ModelType::Image {
100-
expected_image_dimensions: (nonzero!(224usize), nonzero!(224usize)),
101-
},
102-
supported_model: SupportedModels::Resnet50,
103-
description: String::from("Residual Networks model, with 50 layers."),
104-
embedding_size: nonzero!(2048usize),
99+
model_type: ModelType::Image {
100+
expected_image_dimensions: (nonzero!(224usize), nonzero!(224usize)),
101+
},
102+
supported_model: SupportedModels::Resnet50,
103+
description: String::from("Residual Networks model, with 50 layers."),
104+
embedding_size: nonzero!(2048usize),
105105
},
106106
SupportedModels::ClipVitB32Image => ModelDetails {
107-
model_type: ModelType::Image {
108-
expected_image_dimensions: (nonzero!(224usize), nonzero!(224usize)),
109-
},
110-
supported_model: SupportedModels::ClipVitB32Image,
111-
description: String::from(
112-
"Contrastive Language-Image Pre-Training Vision transformer model, base scale.",
113-
),
114-
embedding_size: nonzero!(512usize),
107+
model_type: ModelType::Image {
108+
expected_image_dimensions: (nonzero!(224usize), nonzero!(224usize)),
109+
},
110+
supported_model: SupportedModels::ClipVitB32Image,
111+
description: String::from(
112+
"Contrastive Language-Image Pre-Training Vision transformer model, base scale.",
113+
),
114+
embedding_size: nonzero!(512usize),
115115
},
116116
SupportedModels::ClipVitB32Text => ModelDetails {
117-
supported_model: SupportedModels::ClipVitB32Text,
118-
description: String::from(
119-
"Contrastive Language-Image Pre-Training Text transformer model, base scale. \
117+
supported_model: SupportedModels::ClipVitB32Text,
118+
description: String::from(
119+
"Contrastive Language-Image Pre-Training Text transformer model, base scale. \
120120
Ideal for embedding very short text and using in combination with ClipVitB32Image",
121-
),
122-
embedding_size: nonzero!(512usize),
123-
model_type: ModelType::Text {
124-
// Token size source: https://github.com/UKPLab/sentence-transformers/issues/1269
125-
max_input_tokens: nonzero!(77usize),
126-
},
121+
),
122+
embedding_size: nonzero!(512usize),
123+
model_type: ModelType::Text {
124+
// Token size source: https://github.com/UKPLab/sentence-transformers/issues/1269
125+
max_input_tokens: nonzero!(77usize),
127126
},
127+
},
128128
}
129129
}
130130

ahnlich/ai/src/engine/ai/providers/ort/executor.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ use ort::Session;
44
use crate::error::AIProxyError;
55
use strum::IntoEnumIterator;
66

7-
use super::{register_provider, InnerAIExecutionProvider};
7+
use super::{InnerAIExecutionProvider, register_provider};
88
use std::path::PathBuf;
99
use std::sync::Arc;
1010
use std::thread::available_parallelism;

ahnlich/ai/src/engine/ai/providers/ort/helper.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,18 +10,18 @@ use std::path::PathBuf;
1010
/// To be used when loading local model files.
1111
pub fn read_file_to_bytes(file: &PathBuf) -> Result<Vec<u8>, AIProxyError> {
1212
let mut file = File::open(file).map_err(|_| AIProxyError::ModelConfigLoadError {
13-
message: format!("failed to open file {:?}", file),
13+
message: format!("failed to open file {file:?}"),
1414
})?;
1515
let file_size = file
1616
.metadata()
1717
.map_err(|_| AIProxyError::ModelConfigLoadError {
18-
message: format!("failed to get metadata for file {:?}", file),
18+
message: format!("failed to get metadata for file {file:?}"),
1919
})?
2020
.len() as usize;
2121
let mut buffer = Vec::with_capacity(file_size);
2222
file.read_to_end(&mut buffer)
2323
.map_err(|_| AIProxyError::ModelConfigLoadError {
24-
message: format!("failed to read file {:?}", file),
24+
message: format!("failed to read file {file:?}"),
2525
})?;
2626
Ok(buffer)
2727
}
@@ -47,15 +47,15 @@ impl HFConfigReader {
4747
self.model_repo
4848
.get(config_name)
4949
.map_err(|e| AIProxyError::ModelConfigLoadError {
50-
message: format!("failed to fetch {}, {}", config_name, e),
50+
message: format!("failed to fetch {config_name}, {e}"),
5151
})?;
5252
let contents =
5353
read_file_to_bytes(&file).map_err(|e| AIProxyError::ModelConfigLoadError {
54-
message: format!("failed to read {}, {}", config_name, e),
54+
message: format!("failed to read {config_name}, {e}"),
5555
})?;
5656
let value: serde_json::Value =
5757
serde_json::from_slice(&contents).map_err(|e| AIProxyError::ModelConfigLoadError {
58-
message: format!("failed to parse {}, {}", config_name, e),
58+
message: format!("failed to parse {config_name}, {e}"),
5959
})?;
6060
self.cache
6161
.insert(config_name.to_string(), Ok(value.clone()));

ahnlich/ai/src/engine/ai/providers/ort/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ use ahnlich_types::ai::execution_provider::ExecutionProvider as AIExecutionProvi
99
use ahnlich_types::keyval::StoreKey;
1010
use executor::ExecutorWithSessionCache;
1111
use fallible_collections::FallibleVec;
12-
use hf_hub::{api::sync::ApiBuilder, Cache};
12+
use hf_hub::{Cache, api::sync::ApiBuilder};
1313
use itertools::Itertools;
1414
use ort::{
1515
CUDAExecutionProvider, CoreMLExecutionProvider, DirectMLExecutionProvider, ExecutionProvider,
@@ -325,7 +325,7 @@ impl ORTProvider {
325325
_ => {
326326
return Err(AIProxyError::AIModelNotSupported {
327327
model_name: self.supported_models.to_string(),
328-
})
328+
});
329329
}
330330
};
331331

ahnlich/ai/src/engine/ai/providers/processors/center_crop.rs

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
use crate::engine::ai::models::ImageArray;
22
use crate::engine::ai::providers::processors::{
3-
Preprocessor, PreprocessorData, CONV_NEXT_FEATURE_EXTRACTOR_CENTER_CROP_THRESHOLD,
3+
CONV_NEXT_FEATURE_EXTRACTOR_CENTER_CROP_THRESHOLD, Preprocessor, PreprocessorData,
44
};
55
use crate::error::AIProxyError;
66
use rayon::iter::{IntoParallelIterator, ParallelIterator};
@@ -87,9 +87,8 @@ impl CenterCrop {
8787
}
8888
_ => Err(AIProxyError::ModelConfigLoadError {
8989
message: format!(
90-
"The key 'image_processor_type' in the configuration has the wrong value: {}; \
91-
it should be either 'CLIPImageProcessor' or 'ConvNextFeatureExtractor'.",
92-
image_processor_type
90+
"The key 'image_processor_type' in the configuration has the wrong value: {image_processor_type}; \
91+
it should be either 'CLIPImageProcessor' or 'ConvNextFeatureExtractor'."
9392
)
9493
.to_string(),
9594
}),

0 commit comments

Comments
 (0)