Skip to content

Commit

Permalink
0.1.1
Browse files Browse the repository at this point in the history
  • Loading branch information
maksimryndin committed Jan 28, 2024
1 parent f5a4f1a commit 0ec34f9
Show file tree
Hide file tree
Showing 5 changed files with 147 additions and 126 deletions.
1 change: 1 addition & 0 deletions .github/ISSUE_TEMPLATE/BUG_REPORT.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -80,4 +80,5 @@ body:
label: Which version of Goral do you run (`goral --version`)?
multiple: false
options:
- 0.1.1
- 0.1.0
11 changes: 11 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
* 0.1.1
- no panic for Google API access failure - just send an error to a messenger
- rule fetch timeout is increased to 3000ms (from 1000ms)
- if process user cannot be retrieved, NA is returned
- fix fetch of a user id of a process
- fix exponential backoff algorithm (decrease jittered)
- fix repetitive truncation warning and truncation algorithm
- binary size is reduced (by stripping debug info)

* 0.1.0
- first public release
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "goral"
version = "0.1.0"
version = "0.1.1"
edition = "2021"
author = "Maksim Ryndin"
license = "Apache-2.0"
Expand Down
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,15 +51,15 @@ You can install Goral

For example, for Linux
```sh
wget https://github.com/maksimryndin/goral/releases/download/0.1.0/goral-0.1.0-x86_64-unknown-linux-gnu.tar.gz
tar -xzf goral-0.1.0-x86_64-unknown-linux-gnu.tar.gz
sudo mv goral-0.1.0-x86_64-unknown-linux-gnu/goral /usr/local/bin/goral
wget https://github.com/maksimryndin/goral/releases/download/0.1.1/goral-0.1.1-x86_64-unknown-linux-gnu.tar.gz
tar -xzf goral-0.1.1-x86_64-unknown-linux-gnu.tar.gz
sudo mv goral-0.1.1-x86_64-unknown-linux-gnu/goral /usr/local/bin/goral
rm -rf goral*
```

2) from source (you need [Rust](https://www.rust-lang.org/tools/install)) with a command
```sh
git clone --depth 1 --branch 0.1.0 https://github.com/maksimryndin/goral
git clone --depth 1 --branch 0.1.1 https://github.com/maksimryndin/goral
cd goral
RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --target <target triple>
```
Expand Down Expand Up @@ -387,7 +387,7 @@ spreadsheet_id = "<spreadsheet_id>"
[system]
spreadsheet_id = "<spreadsheet_id>"
# push_interval_secs = 20
# autotruncate_at_usage_percent = 20
# autotruncate_at_usage_percent = 20 # approx 2 days of data under default settings
# scrape_interval_secs = 10
# scrape_timeout_ms = 3000
# messenger.url = "<messenger api url for sending messages>"
Expand Down
249 changes: 129 additions & 120 deletions src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -635,7 +635,7 @@ mod tests {
use crate::spreadsheet::sheet::tests::mock_ordinary_google_sheet;
use crate::spreadsheet::spreadsheet::tests::TestState;
use crate::tests::TEST_HOST_ID;
use crate::Sender;
use crate::{Notification, Sender};
use chrono::NaiveDate;
use google_sheets4::Error;
use tokio::sync::mpsc;
Expand Down Expand Up @@ -1147,138 +1147,147 @@ mod tests {

#[tokio::test]
async fn truncation_flow() {
let (tx, mut rx) = mpsc::channel(1);
tokio::spawn(async move {
let (tx, mut rx) = mpsc::channel::<Notification>(1);
let messages = tokio::spawn(async move {
let mut warn_count = 0;
while let Some(msg) = rx.recv().await {
if msg.message.contains("current spreadsheet usage") {
warn_count += 1;
}
println!("{msg:?}");
}
assert_eq!(warn_count, 1, "number of warnings is 1 after being sent");
});
let tx = Sender::new(tx, GENERAL_SERVICE_NAME);
let sheets_api = SpreadsheetAPI::new(tx.clone(), TestState::new(vec![], None, None));
let storage = Arc::new(Storage::new(
TEST_HOST_ID.to_string(),
sheets_api,
tx.clone(),
));
// for simplicity we create logs with one key to easily
// make assertions on rows count (only two columns - timestamp and key)
let mut log = AppendableLog::new(
storage,
"spreadsheet1".to_string(),
GENERAL_SERVICE_NAME.to_string(),
Some(tx.clone()),
0.01, // 0.01% of 10 000 000 cells means 1000 cells or 500 rows
);

let timestamp = NaiveDate::from_ymd_opt(2023, 10, 19)
.expect("test assert: static date")
.and_hms_opt(0, 0, 0)
.expect("test assert: static time");

let mut datarows = Vec::with_capacity(400); // going to add 400 rows or 800 cells
for _ in 0..200 {
datarows.push(Datarow::new(
"log_name1".to_string(),
timestamp,
vec![(format!("key11"), Datavalue::Size(400_u64))],
));
datarows.push(Datarow::new(
"log_name2".to_string(),
timestamp,
vec![(format!("key21"), Datavalue::Size(400_u64))],
{
let tx = Sender::new(tx, GENERAL_SERVICE_NAME);
let sheets_api = SpreadsheetAPI::new(tx.clone(), TestState::new(vec![], None, None));
let storage = Arc::new(Storage::new(
TEST_HOST_ID.to_string(),
sheets_api,
tx.clone(),
));
}
datarows.push(Datarow::new(
RULES_LOG_NAME.to_string(),
timestamp,
vec![(format!("key21"), Datavalue::Size(400_u64))],
)); // 2 rows of rules (including header row) or 4 cells

log.append(datarows).await.unwrap(); // 808 cells of log_name1, log_name2 and rules including headers
// for simplicity we create logs with one key to easily
// make assertions on rows count (only two columns - timestamp and key)
let mut log = AppendableLog::new(
storage,
"spreadsheet1".to_string(),
GENERAL_SERVICE_NAME.to_string(),
Some(tx.clone()),
0.01, // 0.01% of 10 000 000 cells means 1000 cells or 500 rows
);

let all_sheets = log
.storage
.google
.sheets_filtered_by_metadata(&log.spreadsheet_id, &Metadata::new(vec![]))
.await
.unwrap();
assert_eq!(
all_sheets.len(),
3,
"`log_name1`, `log_name2`, `{}` sheets have been created",
RULES_LOG_NAME
);
for i in 0..3 {
if all_sheets[i].title().contains("log_name1")
|| all_sheets[i].title().contains("log_name2")
{
assert_eq!(
all_sheets[i].row_count(),
Some(201),
"`log_name..` contains header row and 248 rows of data"
);
} else {
assert_eq!(
all_sheets[i].row_count(),
Some(2),
"`{}` contains header row and 1 row of data",
RULES_LOG_NAME
);
let timestamp = NaiveDate::from_ymd_opt(2023, 10, 19)
.expect("test assert: static date")
.and_hms_opt(0, 0, 0)
.expect("test assert: static time");

let mut datarows = Vec::with_capacity(400); // going to add 400 rows or 800 cells
for _ in 0..200 {
datarows.push(Datarow::new(
"log_name1".to_string(),
timestamp,
vec![(format!("key11"), Datavalue::Size(400_u64))],
));
datarows.push(Datarow::new(
"log_name2".to_string(),
timestamp,
vec![(format!("key21"), Datavalue::Size(400_u64))],
));
}
}

// we have 808 cells used out of 1000 (limit)
// now add 200 datarows => above the limit
// for log_name1 the key has changed - new sheet will be created

let mut datarows = Vec::with_capacity(10);
for _ in 0..100 {
datarows.push(Datarow::new(
"log_name1".to_string(),
timestamp,
vec![(format!("key12"), Datavalue::Size(400_u64))],
));
datarows.push(Datarow::new(
"log_name2".to_string(),
RULES_LOG_NAME.to_string(),
timestamp,
vec![(format!("key21"), Datavalue::Size(400_u64))],
));
} // log_name1 - new sheet to be created with headers,
// so old log_name1 - 201 rows, new log_name1 - 101 rows, log_name2 - 301 rows, rules - 2 rows - total 605 rows or 1210 cells
// we remove 210 and 1000 * 30% or 510 cells
// cells = (201+101+301) * 2 = 1206
// (201+101)*2/1206 = 50.08% of log_name1 or 256 cells or 128 rows
// 301*2/1206 = 49.91% of logn_name2 or 256 cells or 128 rows
)); // 2 rows of rules (including header row) or 4 cells

log.append(datarows).await.unwrap();
log.append(datarows).await.unwrap(); // 808 cells of log_name1, log_name2 and rules including headers

let all_sheets = log
.storage
.google
.sheets_filtered_by_metadata(&log.spreadsheet_id, &Metadata::new(vec![]))
.await
.unwrap();
assert_eq!(
all_sheets.len(),
4,
"`log_name1` with another key has been created"
);
let all_sheets = log
.storage
.google
.sheets_filtered_by_metadata(&log.spreadsheet_id, &Metadata::new(vec![]))
.await
.unwrap();
assert_eq!(
all_sheets.len(),
3,
"`log_name1`, `log_name2`, `{}` sheets have been created",
RULES_LOG_NAME
);
for i in 0..3 {
if all_sheets[i].title().contains("log_name1")
|| all_sheets[i].title().contains("log_name2")
{
assert_eq!(
all_sheets[i].row_count(),
Some(201),
"`log_name..` contains header row and 248 rows of data"
);
} else {
assert_eq!(
all_sheets[i].row_count(),
Some(2),
"`{}` contains header row and 1 row of data",
RULES_LOG_NAME
);
}
}

for i in 0..3 {
if all_sheets[i].title().contains("log_name2") {
assert_eq!(all_sheets[i].row_count(), Some(174));
} else if all_sheets[i].title().contains("log_name1") {
assert!(
all_sheets[i].row_count() == Some(73) || all_sheets[i].row_count() == Some(101),
);
} else {
assert_eq!(
all_sheets[i].row_count(),
Some(2),
"`{}` contains header row and 1 row of data",
RULES_LOG_NAME
);
// we have 808 cells used out of 1000 (limit)
// now add 200 datarows => above the limit
// for log_name1 the key has changed - new sheet will be created

let mut datarows = Vec::with_capacity(10);
for _ in 0..100 {
datarows.push(Datarow::new(
"log_name1".to_string(),
timestamp,
vec![(format!("key12"), Datavalue::Size(400_u64))],
));
datarows.push(Datarow::new(
"log_name2".to_string(),
timestamp,
vec![(format!("key21"), Datavalue::Size(400_u64))],
));
} // log_name1 - new sheet to be created with headers,
// so old log_name1 - 201 rows, new log_name1 - 101 rows, log_name2 - 301 rows, rules - 2 rows - total 605 rows or 1210 cells
// we remove 210 and 1000 * 30% or 510 cells
// cells = (201+101+301) * 2 = 1206
// (201+101)*2/1206 = 50.08% of log_name1 or 256 cells or 128 rows
// 301*2/1206 = 49.91% of logn_name2 or 256 cells or 128 rows

log.append(datarows).await.unwrap();

let all_sheets = log
.storage
.google
.sheets_filtered_by_metadata(&log.spreadsheet_id, &Metadata::new(vec![]))
.await
.unwrap();
assert_eq!(
all_sheets.len(),
4,
"`log_name1` with another key has been created"
);

for i in 0..3 {
if all_sheets[i].title().contains("log_name2") {
assert_eq!(all_sheets[i].row_count(), Some(174));
} else if all_sheets[i].title().contains("log_name1") {
assert!(
all_sheets[i].row_count() == Some(73)
|| all_sheets[i].row_count() == Some(101),
);
} else {
assert_eq!(
all_sheets[i].row_count(),
Some(2),
"`{}` contains header row and 1 row of data",
RULES_LOG_NAME
);
}
}
}
} // a scope to drop senders
messages.await.unwrap();
}
}

0 comments on commit 0ec34f9

Please sign in to comment.