Skip to content

Commit

Permalink
Release/0.6.1 (#104)
Browse files Browse the repository at this point in the history
* fix missing documentation in code

* suppress unintended warnings

* future proving, so some unused variables are allowed

* fix potential dependency vulnerability by beeing more specific with rustls version

*  adding test for code coverage

* fix some cargo warnings

* put in correct model as default for example chat cli
  • Loading branch information
Arend-Jan authored Jan 26, 2025
1 parent 18014fd commit 17a5963
Show file tree
Hide file tree
Showing 15 changed files with 572 additions and 54 deletions.
8 changes: 5 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "chat-gpt-lib-rs"
version = "0.6.0"
version = "0.6.1"
edition = "2021"
description = "A Rust library for interacting with OpenAI's ChatGPT API, providing a simple interface to make API requests and handle responses."
license = "Apache-2.0"
Expand All @@ -21,14 +21,16 @@ bytes = "1.9.0"
env_logger = "0.11"
futures-util = "0.3"
log = "0.4"
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls", "multipart", "stream"] }
rustls = ">=0.23.5, <0.24.0"
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls", "multipart", "stream", "blocking"] }
rustls = ">=0.23.13, <0.24.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
thiserror = "2.0"
tokio = { version = "1.37", features = ["full"] }

[dev-dependencies]
wiremock = "0.6"
serial_test = "3.2"
dotenvy = "0.15"
console = "0.15"
indicatif = "0.17"
1 change: 0 additions & 1 deletion examples/chat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ use chat_gpt_lib_rs::api_resources::chat::{
};
use chat_gpt_lib_rs::error::OpenAIError;
use chat_gpt_lib_rs::OpenAIClient;
use std::env;

#[tokio::main]
async fn main() -> Result<(), OpenAIError> {
Expand Down
2 changes: 1 addition & 1 deletion examples/cli-chat-example.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ async fn main() -> Result<(), OpenAIError> {
.to_lowercase()
.eq("true");

let model = env::var("CHAT_MODEL").unwrap_or_else(|_| "o1-preview".to_string());
let model = env::var("CHAT_MODEL").unwrap_or_else(|_| "gpt-4o".to_string());

let system_prompt = env::var("SYSTEM_PROMPT").unwrap_or_else(|_| {
"You are a high quality tech lead and are specialized in idiomatic Rust".to_string()
Expand Down
1 change: 0 additions & 1 deletion examples/completions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ use chat_gpt_lib_rs::api_resources::completions::{
};
use chat_gpt_lib_rs::error::OpenAIError;
use chat_gpt_lib_rs::OpenAIClient;
use std::env;

#[tokio::main]
async fn main() -> Result<(), OpenAIError> {
Expand Down
14 changes: 6 additions & 8 deletions examples/files.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,14 +61,12 @@ async fn main() -> Result<(), OpenAIError> {
);

// Delete the file if you no longer need it
/*
println!("\nDeleting the file...");
let delete_response = delete_file(&client, &uploaded_file.id).await?;
println!(
"File '{}' deleted: {}",
delete_response.id, delete_response.deleted
);
*/
println!("\nDeleting the file...");
let delete_response = delete_file(&client, &uploaded_file.id).await?;
println!(
"File '{}' deleted: {}",
delete_response.id, delete_response.deleted
);

Ok(())
}
47 changes: 22 additions & 25 deletions examples/fine_tunes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,14 +106,12 @@ async fn main() -> Result<(), OpenAIError> {
// If the job is still in progress (e.g., "pending", "running"), and you decide not to
// proceed, you can cancel it:
//
// println!("\nCancelling the fine-tune job...");
// let cancelled_ft = cancel_fine_tune(&client, &fine_tune_response.id).await?;
// println!(
// "Cancelled fine-tune: ID={} | Status={}",
// cancelled_ft.id, cancelled_ft.status
// );
//
// Uncomment the lines above to try out the cancel functionality.
println!("\nCancelling the fine-tune job...");
let cancelled_ft = cancel_fine_tune(&client, &fine_tune_response.id).await?;
println!(
"Cancelled fine-tune: ID={} | Status={}",
cancelled_ft.id, cancelled_ft.status
);

// -------------------------------------------------------------------------
// 6. (Optional) Delete the resulting fine-tuned model after it completes
Expand All @@ -125,23 +123,22 @@ async fn main() -> Result<(), OpenAIError> {
// If you want to remove that model from your account (and can do so, e.g. you own the model),
// you can delete it:
//
// if let Some(ref model_name) = retrieved_ft.fine_tuned_model {
// // Make sure the job actually succeeded before trying to delete, otherwise you'll get an error.
// if retrieved_ft.status == "succeeded" {
// println!("\nDeleting the fine-tuned model: {}...", model_name);
// let delete_response = delete_fine_tune_model(&client, model_name).await?;
// println!(
// "Model deletion response => object={}, id={}, deleted={}",
// delete_response.object, delete_response.id, delete_response.deleted
// );
// } else {
// println!(
// "\nThe job has not succeeded yet (status={}). Cannot delete model: {}",
// retrieved_ft.status,
// model_name
// );
// }
// }
if let Some(ref model_name) = retrieved_ft.fine_tuned_model {
// Make sure the job actually succeeded before trying to delete, otherwise you'll get an error.
if retrieved_ft.status == "succeeded" {
println!("\nDeleting the fine-tuned model: {}...", model_name);
let delete_response = delete_fine_tune_model(&client, model_name).await?;
println!(
"Model deletion response => object={}, id={}, deleted={}",
delete_response.object, delete_response.id, delete_response.deleted
);
} else {
println!(
"\nThe job has not succeeded yet (status={}). Cannot delete model: {}",
retrieved_ft.status, model_name
);
}
}

println!("\nExample completed. Check the logs above for details.");
Ok(())
Expand Down
6 changes: 5 additions & 1 deletion examples/moderations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,14 @@ async fn main() -> Result<(), OpenAIError> {
// Create a new client; this will look for the OPENAI_API_KEY environment variable.
let client = OpenAIClient::new(None)?;

let moderation_text = "I hate you and want to harm you.".to_string();

println!("Text to moderate: {}", &moderation_text);

// Create a Moderations request for a single piece of text.
// We can also provide multiple texts with `ModerationsInput::Strings(...)`.
let request = CreateModerationRequest {
input: ModerationsInput::String("I hate you and want to harm you.".to_string()),
input: ModerationsInput::String(moderation_text),
// Optionally, specify a model like "text-moderation-latest" or "text-moderation-stable":
model: None,
};
Expand Down
212 changes: 206 additions & 6 deletions src/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,11 @@
//! }
//! ```
use reqwest::StatusCode;
use serde::de::DeserializeOwned;
use serde::Serialize;

use crate::config::OpenAIClient;
use crate::error::{OpenAIAPIErrorBody, OpenAIError};
use crate::error::OpenAIError;

/// Sends a POST request with a JSON body to the given `endpoint`.
///
Expand Down Expand Up @@ -128,11 +127,14 @@ where
{
let status = response.status();
if status.is_success() {
// Deserialize the success response.
let parsed_response = response.json::<R>().await?;
Ok(parsed_response)
// 1) Read raw text from the response
let text = response.text().await?;

// 2) Attempt to parse with serde_json. If it fails, map to `OpenAIError::DeserializeError`
let parsed: R = serde_json::from_str(&text).map_err(OpenAIError::from)?; // This uses your `From<serde_json::Error> for OpenAIError::DeserializeError`

Ok(parsed)
} else {
// Attempt to parse the error body returned by the OpenAI API.
parse_error_response(response).await
}
}
Expand All @@ -159,3 +161,201 @@ pub async fn parse_error_response<R>(response: reqwest::Response) -> Result<R, O
}
}
}

#[cfg(test)]
mod tests {
/// # Tests for the `api` module
///
/// These tests use [`wiremock`](https://crates.io/crates/wiremock) to **mock** HTTP responses from
/// the OpenAI API, ensuring we can verify request-building, JSON handling, and error parsing logic
/// without hitting real servers.
///
use super::*;
use crate::config::OpenAIClient;
use crate::error::{OpenAIError, OpenAIError::APIError};
use serde::Deserialize;
use wiremock::matchers::{method, path};
use wiremock::{Mock, MockServer, ResponseTemplate};

#[derive(Debug, Deserialize)]
struct MockResponse {
pub foo: String,
pub bar: i32,
}

/// Tests that `post_json` correctly sends a JSON POST request and parses a successful JSON response.
#[tokio::test]
async fn test_post_json_success() {
// Start a local mock server
let mock_server = MockServer::start().await;

// Define an expected JSON response
let mock_data = serde_json::json!({ "foo": "hello", "bar": 42 });

// Mock a 200 OK response from the endpoint
Mock::given(method("POST"))
.and(path("/test-endpoint"))
.respond_with(ResponseTemplate::new(200).set_body_json(mock_data))
.mount(&mock_server)
.await;

// Construct an OpenAIClient that points to our mock server URL
let client = OpenAIClient::builder()
.with_api_key("test-key")
.with_base_url(&mock_server.uri()) // wiremock server
.build()
.unwrap();

// We’ll send some dummy request body
let request_body = serde_json::json!({ "dummy": true });

// Call the function under test
let result: Result<MockResponse, OpenAIError> =
post_json(&client, "test-endpoint", &request_body).await;

// Verify we got a success
assert!(result.is_ok(), "Expected Ok, got Err");
let parsed = result.unwrap();
assert_eq!(parsed.foo, "hello");
assert_eq!(parsed.bar, 42);
}

/// Tests that `post_json` handles non-2xx status codes and returns an `APIError`.
#[tokio::test]
async fn test_post_json_api_error() {
let mock_server = MockServer::start().await;

// Suppose the server returns a 400 with a JSON error body
let error_body = serde_json::json!({
"error": {
"message": "Invalid request",
"type": "invalid_request_error",
"param": null,
"code": "some_code"
}
});

Mock::given(method("POST"))
.and(path("/test-endpoint"))
.respond_with(ResponseTemplate::new(400).set_body_json(error_body))
.mount(&mock_server)
.await;

let client = OpenAIClient::builder()
.with_api_key("test-key")
.with_base_url(&mock_server.uri())
.build()
.unwrap();

let request_body = serde_json::json!({ "dummy": true });

let result: Result<MockResponse, OpenAIError> =
post_json(&client, "test-endpoint", &request_body).await;

// We should get an APIError with the parsed message
match result {
Err(APIError { message, .. }) => {
assert!(
message.contains("Invalid request"),
"Expected error message about invalid request, got: {}",
message
);
}
other => panic!("Expected APIError, got {:?}", other),
}
}

/// Tests that `post_json` surfaces a deserialization error if the server returns malformed JSON.
#[tokio::test]
async fn test_post_json_deserialize_error() {
let mock_server = MockServer::start().await;

// Return invalid JSON that won't match `MockResponse`
let invalid_json = r#"{"foo": 123, "bar": "not_an_integer"}"#;

Mock::given(method("POST"))
.and(path("/test-endpoint"))
.respond_with(ResponseTemplate::new(200).set_body_raw(invalid_json, "application/json"))
.mount(&mock_server)
.await;

let client = OpenAIClient::builder()
.with_api_key("test-key")
.with_base_url(&mock_server.uri())
.build()
.unwrap();

let request_body = serde_json::json!({ "dummy": true });

let result: Result<MockResponse, OpenAIError> =
post_json(&client, "test-endpoint", &request_body).await;

// We expect a DeserializeError
assert!(matches!(result, Err(OpenAIError::DeserializeError(_))));
}

/// Tests that `get_json` properly sends a GET request and parses a successful JSON response.
#[tokio::test]
async fn test_get_json_success() {
let mock_server = MockServer::start().await;

let mock_data = serde_json::json!({ "foo": "abc", "bar": 99 });

// Mock a GET response
Mock::given(method("GET"))
.and(path("/test-get"))
.respond_with(ResponseTemplate::new(200).set_body_json(mock_data))
.mount(&mock_server)
.await;

let client = OpenAIClient::builder()
.with_api_key("test-key")
.with_base_url(&mock_server.uri())
.build()
.unwrap();

// Call the function under test
let result: Result<MockResponse, OpenAIError> = get_json(&client, "test-get").await;

// Check the result
assert!(result.is_ok());
let parsed = result.unwrap();
assert_eq!(parsed.foo, "abc");
assert_eq!(parsed.bar, 99);
}

/// Tests that `get_json` handles a non-successful status code with an error body.
#[tokio::test]
async fn test_get_json_api_error() {
let mock_server = MockServer::start().await;

let error_body = serde_json::json!({
"error": {
"message": "Resource not found",
"type": "not_found",
"code": "missing_resource"
}
});

Mock::given(method("GET"))
.and(path("/test-get"))
.respond_with(ResponseTemplate::new(404).set_body_json(error_body))
.mount(&mock_server)
.await;

let client = OpenAIClient::builder()
.with_api_key("test-key")
.with_base_url(&mock_server.uri())
.build()
.unwrap();

let result: Result<MockResponse, OpenAIError> = get_json(&client, "test-get").await;

match result {
Err(APIError { message, .. }) => {
assert!(message.contains("Resource not found"));
}
other => panic!("Expected APIError, got {:?}", other),
}
}
}
Loading

0 comments on commit 17a5963

Please sign in to comment.