Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prevents single split searches from different leaf_search from interleaving #5509

Merged
merged 2 commits into from
Oct 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions quickwit/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions quickwit/quickwit-search/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ quickwit-storage = { workspace = true }
[dev-dependencies]
assert-json-diff = { workspace = true }
proptest = { workspace = true }
rand = { workspace = true }
serde_json = { workspace = true }
typetag = { workspace = true }

Expand Down
20 changes: 13 additions & 7 deletions quickwit/quickwit-search/src/leaf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ use tracing::*;
use crate::collector::{make_collector_for_split, make_merge_collector, IncrementalCollector};
use crate::metrics::SEARCH_METRICS;
use crate::root::is_metadata_count_request_with_ast;
use crate::search_permit_provider::SearchPermit;
use crate::service::{deserialize_doc_mapper, SearcherContext};
use crate::{QuickwitAggregations, SearchError};

Expand Down Expand Up @@ -1183,7 +1184,6 @@ async fn resolve_storage_and_leaf_search(
aggregations_limits: AggregationLimitsGuard,
) -> crate::Result<LeafSearchResponse> {
let storage = storage_resolver.resolve(&index_uri).await?;

leaf_search(
searcher_context.clone(),
search_request.clone(),
Expand Down Expand Up @@ -1259,10 +1259,16 @@ pub async fn leaf_search(
let incremental_merge_collector = IncrementalCollector::new(merge_collector);
let incremental_merge_collector = Arc::new(Mutex::new(incremental_merge_collector));

for (split, mut request) in split_with_req {
let leaf_split_search_permit = searcher_context.leaf_search_split_semaphore
.clone()
.acquire_owned()
// We acquire all of the leaf search permits to make sure our single split search tasks
// do no interleave with other leaf search requests.
let permit_futures = searcher_context
.search_permit_provider
.get_permits(split_with_req.len());

for ((split, mut request), permit_fut) in
split_with_req.into_iter().zip(permit_futures.into_iter())
{
let leaf_split_search_permit = permit_fut
.instrument(info_span!("waiting_for_leaf_search_split_semaphore"))
.await
.expect("Failed to acquire permit. This should never happen! Please, report on https://github.com/quickwit-oss/quickwit/issues.");
Expand Down Expand Up @@ -1355,7 +1361,7 @@ async fn leaf_search_single_split_wrapper(
split: SplitIdAndFooterOffsets,
split_filter: Arc<RwLock<CanSplitDoBetter>>,
incremental_merge_collector: Arc<Mutex<IncrementalCollector>>,
leaf_split_search_permit: tokio::sync::OwnedSemaphorePermit,
search_permit: SearchPermit,
aggregations_limits: AggregationLimitsGuard,
) {
crate::SEARCH_METRICS.leaf_searches_splits_total.inc();
Expand All @@ -1374,7 +1380,7 @@ async fn leaf_search_single_split_wrapper(
.await;

// We explicitly drop it, to highlight it to the reader
std::mem::drop(leaf_split_search_permit);
std::mem::drop(search_permit);

if leaf_search_single_split_res.is_ok() {
timer.observe_duration();
Expand Down
1 change: 1 addition & 0 deletions quickwit/quickwit-search/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ mod service;
pub(crate) mod top_k_collector;

mod metrics;
mod search_permit_provider;

#[cfg(test)]
mod tests;
Expand Down
6 changes: 4 additions & 2 deletions quickwit/quickwit-search/src/list_terms.rs
Original file line number Diff line number Diff line change
Expand Up @@ -331,10 +331,12 @@ pub async fn leaf_list_terms(
let index_storage_clone = index_storage.clone();
let searcher_context_clone = searcher_context.clone();
async move {
let _leaf_split_search_permit = searcher_context_clone.leaf_search_split_semaphore.clone()
.acquire_owned()
let _leaf_split_search_permit = searcher_context_clone
.search_permit_provider
.get_permit()
.await
.expect("Failed to acquire permit. This should never happen! Please, report on https://github.com/quickwit-oss/quickwit/issues.");

// TODO dedicated counter and timer?
crate::SEARCH_METRICS.leaf_searches_splits_total.inc();
let timer = crate::SEARCH_METRICS
Expand Down
18 changes: 16 additions & 2 deletions quickwit/quickwit-search/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@

use once_cell::sync::Lazy;
use quickwit_common::metrics::{
exponential_buckets, linear_buckets, new_counter, new_counter_vec, new_histogram,
new_histogram_vec, Histogram, HistogramVec, IntCounter, IntCounterVec,
exponential_buckets, linear_buckets, new_counter, new_counter_vec, new_gauge_vec,
new_histogram, new_histogram_vec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge,
};

pub struct SearchMetrics {
Expand All @@ -35,6 +35,8 @@ pub struct SearchMetrics {
pub leaf_searches_splits_total: IntCounter,
pub leaf_search_split_duration_secs: Histogram,
pub job_assigned_total: IntCounterVec<1>,
pub leaf_search_single_split_tasks_pending: IntGauge,
pub leaf_search_single_split_tasks_ongoing: IntGauge,
}

impl Default for SearchMetrics {
Expand All @@ -50,6 +52,14 @@ impl Default for SearchMetrics {
.copied()
.collect();

let leaf_search_single_split_tasks = new_gauge_vec::<1>(
"leaf_search_single_split_tasks",
"Number of single split search tasks pending or ongoing",
"search",
&[],
["status"], // takes values "ongoing" or "pending"
);

SearchMetrics {
root_search_requests_total: new_counter_vec(
"root_search_requests_total",
Expand Down Expand Up @@ -110,6 +120,10 @@ impl Default for SearchMetrics {
"search",
exponential_buckets(0.001, 2.0, 15).unwrap(),
),
leaf_search_single_split_tasks_ongoing: leaf_search_single_split_tasks
.with_label_values(["ongoing"]),
leaf_search_single_split_tasks_pending: leaf_search_single_split_tasks
.with_label_values(["pending"]),
job_assigned_total: new_counter_vec(
"job_assigned_total",
"Number of job assigned to searchers, per affinity rank.",
Expand Down
220 changes: 220 additions & 0 deletions quickwit/quickwit-search/src/search_permit_provider.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
// Copyright (C) 2024 Quickwit, Inc.
//
// Quickwit is offered under the AGPL v3.0 and as commercial software.
// For commercial licensing, contact us at hello@quickwit.io.
//
// AGPL:
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.

use std::collections::VecDeque;
use std::sync::{Arc, Mutex};

use quickwit_common::metrics::GaugeGuard;
use tokio::sync::oneshot;

/// `SearchPermitProvider` is a distributor of permits to perform single split
/// search operation.
///
/// Requests are served in order.
#[derive(Clone)]
pub struct SearchPermitProvider {
inner_arc: Arc<Mutex<InnerSearchPermitProvider>>,
}

impl SearchPermitProvider {
pub fn new(num_permits: usize) -> SearchPermitProvider {
SearchPermitProvider {
inner_arc: Arc::new(Mutex::new(InnerSearchPermitProvider {
num_permits_available: num_permits,
permits_requests: VecDeque::new(),
})),
}
}

/// Returns a future permit in the form of a oneshot Receiver channel.
///
/// At this point the permit is not acquired yet.
#[must_use]
pub fn get_permit(&self) -> oneshot::Receiver<SearchPermit> {
let mut permits_lock = self.inner_arc.lock().unwrap();
permits_lock.get_permit(&self.inner_arc)
}

/// Returns a list of future permits in the form of oneshot Receiver channels.
///
/// The permits returned are guaranteed to be resolved in order.
/// In addition, the permits are guaranteed to be resolved before permits returned by
/// subsequent calls to this function (or `get_permit`).
#[must_use]
pub fn get_permits(&self, num_permits: usize) -> Vec<oneshot::Receiver<SearchPermit>> {
let mut permits_lock = self.inner_arc.lock().unwrap();
permits_lock.get_permits(num_permits, &self.inner_arc)
}
}

struct InnerSearchPermitProvider {
num_permits_available: usize,
permits_requests: VecDeque<oneshot::Sender<SearchPermit>>,
}

impl InnerSearchPermitProvider {
fn get_permit(
&mut self,
inner_arc: &Arc<Mutex<InnerSearchPermitProvider>>,
) -> oneshot::Receiver<SearchPermit> {
let (tx, rx) = oneshot::channel();
self.permits_requests.push_back(tx);
self.assign_available_permits(inner_arc);
rx
}

fn get_permits(
&mut self,
num_permits: usize,
inner_arc: &Arc<Mutex<InnerSearchPermitProvider>>,
) -> Vec<oneshot::Receiver<SearchPermit>> {
let mut permits = Vec::with_capacity(num_permits);
for _ in 0..num_permits {
let (tx, rx) = oneshot::channel();
self.permits_requests.push_back(tx);
permits.push(rx);
}
self.assign_available_permits(inner_arc);
permits
}

fn recycle_permit(&mut self, inner_arc: &Arc<Mutex<InnerSearchPermitProvider>>) {
self.num_permits_available += 1;
self.assign_available_permits(inner_arc);
}

fn assign_available_permits(&mut self, inner_arc: &Arc<Mutex<InnerSearchPermitProvider>>) {
while self.num_permits_available > 0 {
let Some(sender) = self.permits_requests.pop_front() else {
break;
};
let mut ongoing_gauge_guard = GaugeGuard::from_gauge(
&crate::SEARCH_METRICS.leaf_search_single_split_tasks_ongoing,
);
ongoing_gauge_guard.add(1);
let send_res = sender.send(SearchPermit {
_ongoing_gauge_guard: ongoing_gauge_guard,
inner_arc: inner_arc.clone(),
recycle_on_drop: true,
});
match send_res {
Ok(()) => {
self.num_permits_available -= 1;
}
Err(search_permit) => {
search_permit.drop_without_recycling_permit();
}
}
}
crate::SEARCH_METRICS
.leaf_search_single_split_tasks_pending
.set(self.permits_requests.len() as i64);
}
}

pub struct SearchPermit {
_ongoing_gauge_guard: GaugeGuard<'static>,
inner_arc: Arc<Mutex<InnerSearchPermitProvider>>,
recycle_on_drop: bool,
}

impl SearchPermit {
fn drop_without_recycling_permit(mut self) {
self.recycle_on_drop = false;
drop(self);
}
}

impl Drop for SearchPermit {
fn drop(&mut self) {
if !self.recycle_on_drop {
return;
}
let mut inner_guard = self.inner_arc.lock().unwrap();
inner_guard.recycle_permit(&self.inner_arc.clone());
}
}

#[cfg(test)]
mod tests {
use tokio::task::JoinSet;

use super::*;

#[tokio::test]
async fn test_search_permits_get_permits_future() {
// We test here that `get_permits_futures` does not interleave
let search_permits = SearchPermitProvider::new(1);
let mut all_futures = Vec::new();
let first_batch_of_permits = search_permits.get_permits(10);
assert_eq!(first_batch_of_permits.len(), 10);
all_futures.extend(
first_batch_of_permits
.into_iter()
.enumerate()
.map(move |(i, fut)| ((1, i), fut)),
);

let second_batch_of_permits = search_permits.get_permits(10);
assert_eq!(second_batch_of_permits.len(), 10);
all_futures.extend(
second_batch_of_permits
.into_iter()
.enumerate()
.map(move |(i, fut)| ((2, i), fut)),
);

use rand::seq::SliceRandom;
// not super useful, considering what join set does, but still a tiny bit more sound.
all_futures.shuffle(&mut rand::thread_rng());

let mut join_set = JoinSet::new();
for (res, fut) in all_futures {
join_set.spawn(async move {
let permit = fut.await;
(res, permit)
});
}
let mut ordered_result: Vec<(usize, usize)> = Vec::with_capacity(20);
while let Some(Ok(((batch_id, order), _permit))) = join_set.join_next().await {
ordered_result.push((batch_id, order));
}

assert_eq!(ordered_result.len(), 20);
for (i, res) in ordered_result[0..10].iter().enumerate() {
assert_eq!(res, &(1, i));
}
for (i, res) in ordered_result[10..20].iter().enumerate() {
assert_eq!(res, &(2, i));
}
}

#[tokio::test]
async fn test_search_permits_receiver_race_condition() {
// Here we test that we don't have a problem if the Receiver is dropped.
// In particular, we want to check that there is not a race condition where drop attempts to
// lock the mutex.
let search_permits = SearchPermitProvider::new(1);
let permit_rx = search_permits.get_permit();
let permit_rx2 = search_permits.get_permit();
drop(permit_rx2);
drop(permit_rx);
let _permit_rx = search_permits.get_permit();
}
}
Loading
Loading