diff --git a/pkg/segment/set.go b/pkg/segment/set.go index 025e180bf0..f494e0dba3 100644 --- a/pkg/segment/set.go +++ b/pkg/segment/set.go @@ -119,7 +119,7 @@ const ( // // +stateify savable type Set struct { - root node `state:".(*SegmentDataSlices)"` + root node `state:".([]FlatSegment)"` } // IsEmpty returns true if the set contains no segments. @@ -292,42 +292,68 @@ func (s *Set) UpperBoundGap(max Key) GapIterator { return seg.PrevGap() } -// Add inserts the given segment into the set and returns true. If the new -// segment can be merged with adjacent segments, Add will do so. If the new -// segment would overlap an existing segment, Add returns false. If Add -// succeeds, all existing iterators are invalidated. -func (s *Set) Add(r Range, val Value) bool { - if r.Length() <= 0 { - panic(fmt.Sprintf("invalid segment range %v", r)) +// FirstLargeEnoughGap returns the first gap in the set with at least the given +// length. If no such gap exists, FirstLargeEnoughGap returns a terminal +// iterator. +// +// Precondition: trackGaps must be 1. +func (s *Set) FirstLargeEnoughGap(minSize Key) GapIterator { + if trackGaps != 1 { + panic("set is not tracking gaps") } - gap := s.FindGap(r.Start) - if !gap.Ok() { - return false + gap := s.FirstGap() + if gap.Range().Length() >= minSize { + return gap } - if r.End > gap.End() { - return false + return gap.NextLargeEnoughGap(minSize) +} + +// LastLargeEnoughGap returns the last gap in the set with at least the given +// length. If no such gap exists, LastLargeEnoughGap returns a terminal +// iterator. +// +// Precondition: trackGaps must be 1. +func (s *Set) LastLargeEnoughGap(minSize Key) GapIterator { + if trackGaps != 1 { + panic("set is not tracking gaps") + } + gap := s.LastGap() + if gap.Range().Length() >= minSize { + return gap } - s.Insert(gap, r, val) - return true + return gap.PrevLargeEnoughGap(minSize) } -// AddWithoutMerging inserts the given segment into the set and returns true. -// If it would overlap an existing segment, AddWithoutMerging does nothing and -// returns false. If AddWithoutMerging succeeds, all existing iterators are -// invalidated. -func (s *Set) AddWithoutMerging(r Range, val Value) bool { - if r.Length() <= 0 { - panic(fmt.Sprintf("invalid segment range %v", r)) +// LowerBoundLargeEnoughGap returns the first gap in the set with at least the +// given length and whose range contains a key greater than or equal to min. If +// no such gap exists, LowerBoundLargeEnoughGap returns a terminal iterator. +// +// Precondition: trackGaps must be 1. +func (s *Set) LowerBoundLargeEnoughGap(min, minSize Key) GapIterator { + if trackGaps != 1 { + panic("set is not tracking gaps") } - gap := s.FindGap(r.Start) - if !gap.Ok() { - return false + gap := s.LowerBoundGap(min) + if gap.Range().Length() >= minSize { + return gap } - if r.End > gap.End() { - return false + return gap.NextLargeEnoughGap(minSize) +} + +// UpperBoundLargeEnoughGap returns the last gap in the set with at least the +// given length and whose range contains a key less than or equal to max. If no +// such gap exists, UpperBoundLargeEnoughGap returns a terminal iterator. +// +// Precondition: trackGaps must be 1. +func (s *Set) UpperBoundLargeEnoughGap(max, minSize Key) GapIterator { + if trackGaps != 1 { + panic("set is not tracking gaps") } - s.InsertWithoutMergingUnchecked(gap, r, val) - return true + gap := s.UpperBoundGap(max) + if gap.Range().Length() >= minSize { + return gap + } + return gap.PrevLargeEnoughGap(minSize) } // Insert inserts the given segment into the given gap. If the new segment can @@ -424,6 +450,107 @@ func (s *Set) InsertWithoutMergingUnchecked(gap GapIterator, r Range, val Value) return Iterator{gap.node, gap.index} } +// InsertRange inserts the given segment into the set. If the new segment can +// be merged with adjacent segments, InsertRange will do so. InsertRange +// returns an iterator to the segment containing the inserted value (which may +// have been merged with other values). All existing iterators (excluding the +// returned iterator) are invalidated. +// +// If the new segment would overlap an existing segment, or if r is invalid, +// InsertRange panics. +// +// InsertRange searches the set to find the gap to insert into. If the caller +// already has the appropriate GapIterator, or if the caller needs to do +// additional work between finding the gap and insertion, use Insert instead. +func (s *Set) InsertRange(r Range, val Value) Iterator { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + seg, gap := s.Find(r.Start) + if seg.Ok() { + panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, seg.Range())) + } + if gap.End() < r.End { + panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, gap.NextSegment().Range())) + } + return s.Insert(gap, r, val) +} + +// InsertWithoutMergingRange inserts the given segment into the set and returns +// an iterator to the inserted segment. All existing iterators (excluding the +// returned iterator) are invalidated. +// +// If the new segment would overlap an existing segment, or if r is invalid, +// InsertWithoutMergingRange panics. +// +// InsertWithoutMergingRange searches the set to find the gap to insert into. +// If the caller already has the appropriate GapIterator, or if the caller +// needs to do additional work between finding the gap and insertion, use +// InsertWithoutMerging instead. +func (s *Set) InsertWithoutMergingRange(r Range, val Value) Iterator { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + seg, gap := s.Find(r.Start) + if seg.Ok() { + panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, seg.Range())) + } + if gap.End() < r.End { + panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, gap.NextSegment().Range())) + } + return s.InsertWithoutMerging(gap, r, val) +} + +// TryInsertRange attempts to insert the given segment into the set. If the new +// segment can be merged with adjacent segments, TryInsertRange will do so. +// TryInsertRange returns an iterator to the segment containing the inserted +// value (which may have been merged with other values). All existing iterators +// (excluding the returned iterator) are invalidated. +// +// If the new segment would overlap an existing segment, TryInsertRange does +// nothing and returns a terminal iterator. +// +// TryInsertRange searches the set to find the gap to insert into. If the +// caller already has the appropriate GapIterator, or if the caller needs to do +// additional work between finding the gap and insertion, use Insert instead. +func (s *Set) TryInsertRange(r Range, val Value) Iterator { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + seg, gap := s.Find(r.Start) + if seg.Ok() { + return Iterator{} + } + if gap.End() < r.End { + return Iterator{} + } + return s.Insert(gap, r, val) +} + +// TryInsertWithoutMergingRange attempts to insert the given segment into the +// set. If successful, it returns an iterator to the inserted segment; all +// existing iterators (excluding the returned iterator) are invalidated. If the +// new segment would overlap an existing segment, TryInsertWithoutMergingRange +// does nothing and returns a terminal iterator. +// +// TryInsertWithoutMergingRange searches the set to find the gap to insert +// into. If the caller already has the appropriate GapIterator, or if the +// caller needs to do additional work between finding the gap and insertion, +// use InsertWithoutMerging instead. +func (s *Set) TryInsertWithoutMergingRange(r Range, val Value) Iterator { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + seg, gap := s.Find(r.Start) + if seg.Ok() { + return Iterator{} + } + if gap.End() < r.End { + return Iterator{} + } + return s.InsertWithoutMerging(gap, r, val) +} + // Remove removes the given segment and returns an iterator to the vacated gap. // All existing iterators (including seg, but not including the returned // iterator) are invalidated. @@ -470,6 +597,11 @@ func (s *Set) RemoveAll() { // RemoveRange removes all segments in the given range. An iterator to the // newly formed gap is returned, and all existing iterators are invalidated. +// +// RemoveRange searches the set to find segments to remove. If the caller +// already has an iterator to either end of the range of segments to remove, or +// if the caller needs to do additional work before removing each segment, +// iterate segments and call Remove in a loop instead. func (s *Set) RemoveRange(r Range) GapIterator { seg, gap := s.Find(r.Start) if seg.Ok() { @@ -477,12 +609,34 @@ func (s *Set) RemoveRange(r Range) GapIterator { gap = s.Remove(seg) } for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() { - seg = s.Isolate(seg, r) + seg = s.SplitAfter(seg, r.End) gap = s.Remove(seg) } return gap } +// RemoveFullRange is equivalent to RemoveRange, except that if any key in the +// given range does not correspond to a segment, RemoveFullRange panics. +func (s *Set) RemoveFullRange(r Range) GapIterator { + seg := s.FindSegment(r.Start) + if !seg.Ok() { + panic(fmt.Sprintf("missing segment at %v", r.Start)) + } + seg = s.SplitBefore(seg, r.Start) + for { + seg = s.SplitAfter(seg, r.End) + end := seg.End() + gap := s.Remove(seg) + if r.End <= end { + return gap + } + seg = gap.NextSegment() + if !seg.Ok() || seg.Start() != end { + panic(fmt.Sprintf("missing segment at %v", end)) + } + } +} + // Merge attempts to merge two neighboring segments. If successful, Merge // returns an iterator to the merged segment, and all existing iterators are // invalidated. Otherwise, Merge returns a terminal iterator. @@ -516,7 +670,68 @@ func (s *Set) MergeUnchecked(first, second Iterator) Iterator { return Iterator{} } -// MergeAll attempts to merge all adjacent segments in the set. All existing +// MergePrev attempts to merge the given segment with its predecessor if +// possible, and returns an updated iterator to the extended segment. All +// existing iterators (including seg, but not including the returned iterator) +// are invalidated. +// +// MergePrev is usually used when mutating segments while iterating them in +// order of increasing keys, to attempt merging of each mutated segment with +// its previously-mutated predecessor. In such cases, merging a mutated segment +// with its unmutated successor would incorrectly cause the latter to be +// skipped. +func (s *Set) MergePrev(seg Iterator) Iterator { + if prev := seg.PrevSegment(); prev.Ok() { + if mseg := s.MergeUnchecked(prev, seg); mseg.Ok() { + seg = mseg + } + } + return seg +} + +// MergeNext attempts to merge the given segment with its successor if +// possible, and returns an updated iterator to the extended segment. All +// existing iterators (including seg, but not including the returned iterator) +// are invalidated. +// +// MergeNext is usually used when mutating segments while iterating them in +// order of decreasing keys, to attempt merging of each mutated segment with +// its previously-mutated successor. In such cases, merging a mutated segment +// with its unmutated predecessor would incorrectly cause the latter to be +// skipped. +func (s *Set) MergeNext(seg Iterator) Iterator { + if next := seg.NextSegment(); next.Ok() { + if mseg := s.MergeUnchecked(seg, next); mseg.Ok() { + seg = mseg + } + } + return seg +} + +// Unisolate attempts to merge the given segment with its predecessor and +// successor if possible, and returns an updated iterator to the extended +// segment. All existing iterators (including seg, but not including the +// returned iterator) are invalidated. +// +// Unisolate is usually used in conjunction with Isolate when mutating part of +// a single segment in a way that may affect its mergeability. For the reasons +// described by MergePrev and MergeNext, it is usually incorrect to use the +// return value of Unisolate in a loop variable. +func (s *Set) Unisolate(seg Iterator) Iterator { + if prev := seg.PrevSegment(); prev.Ok() { + if mseg := s.MergeUnchecked(prev, seg); mseg.Ok() { + seg = mseg + } + } + if next := seg.NextSegment(); next.Ok() { + if mseg := s.MergeUnchecked(seg, next); mseg.Ok() { + seg = mseg + } + } + return seg +} + +// MergeAll merges all mergeable adjacent segments in the set. All existing // iterators are invalidated. func (s *Set) MergeAll() { seg := s.FirstSegment() @@ -533,15 +748,20 @@ func (s *Set) MergeAll() { } } -// MergeRange attempts to merge all adjacent segments that contain a key in the -// specific range. All existing iterators are invalidated. -func (s *Set) MergeRange(r Range) { +// MergeInsideRange attempts to merge all adjacent segments that contain a key +// in the specific range. All existing iterators are invalidated. +// +// MergeInsideRange only makes sense after mutating the set in a way that may +// change the mergeability of modified segments; callers should prefer to use +// MergePrev or MergeNext during the mutating loop instead (depending on the +// direction of iteration), in order to avoid a redundant search. +func (s *Set) MergeInsideRange(r Range) { seg := s.LowerBoundSegment(r.Start) if !seg.Ok() { return } next := seg.NextSegment() - for next.Ok() && next.Range().Start < r.End { + for next.Ok() && next.Start() < r.End { if mseg := s.MergeUnchecked(seg, next); mseg.Ok() { seg, next = mseg, mseg.NextSegment() } else { @@ -550,9 +770,14 @@ func (s *Set) MergeRange(r Range) { } } -// MergeAdjacent attempts to merge the segment containing r.Start with its +// MergeOutsideRange attempts to merge the segment containing r.Start with its // predecessor, and the segment containing r.End-1 with its successor. -func (s *Set) MergeAdjacent(r Range) { +// +// MergeOutsideRange only makes sense after mutating the set in a way that may +// change the mergeability of modified segments; callers should prefer to use +// MergePrev or MergeNext during the mutating loop instead (depending on the +// direction of iteration), in order to avoid two redundant searches. +func (s *Set) MergeOutsideRange(r Range) { first := s.FindSegment(r.Start) if first.Ok() { if prev := first.PrevSegment(); prev.Ok() { @@ -597,21 +822,58 @@ func (s *Set) SplitUnchecked(seg Iterator, split Key) (Iterator, Iterator) { return seg2.PrevSegment(), seg2 } -// SplitAt splits the segment straddling split, if one exists. SplitAt returns -// true if a segment was split and false otherwise. If SplitAt splits a -// segment, all existing iterators are invalidated. -func (s *Set) SplitAt(split Key) bool { - if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) { - s.SplitUnchecked(seg, split) - return true +// SplitBefore ensures that the given segment's start is at least start by +// splitting at start if necessary, and returns an updated iterator to the +// bounded segment. All existing iterators (including seg, but not including +// the returned iterator) are invalidated. +// +// SplitBefore is usually when mutating segments in a range. In such cases, +// when iterating segments in order of increasing keys, the first segment may +// extend beyond the start of the range to be mutated, and needs to be +// SplitBefore to ensure that only the part of the segment within the range is +// mutated. When iterating segments in order of decreasing keys, SplitBefore +// and SplitAfter; i.e. SplitBefore needs to be invoked on each segment, while +// SplitAfter only needs to be invoked on the first. +// +// Preconditions: start < seg.End(). +func (s *Set) SplitBefore(seg Iterator, start Key) Iterator { + if seg.Range().CanSplitAt(start) { + _, seg = s.SplitUnchecked(seg, start) } - return false + return seg } -// Isolate ensures that the given segment's range does not escape r by -// splitting at r.Start and r.End if necessary, and returns an updated iterator -// to the bounded segment. All existing iterators (including seg, but not -// including the returned iterators) are invalidated. +// SplitAfter ensures that the given segment's end is at most end by splitting +// at end if necessary, and returns an updated iterator to the bounded segment. +// All existing iterators (including seg, but not including the returned +// iterator) are invalidated. +// +// SplitAfter is usually used when mutating segments in a range. In such cases, +// when iterating segments in order of increasing keys, each iterated segment +// may extend beyond the end of the range to be mutated, and needs to be +// SplitAfter to ensure that only the part of the segment within the range is +// mutated. When iterating segments in order of decreasing keys, SplitBefore +// and SplitAfter exchange roles; i.e. SplitBefore needs to be invoked on each +// segment, while SplitAfter only needs to be invoked on the first. +// +// Preconditions: seg.Start() < end. +func (s *Set) SplitAfter(seg Iterator, end Key) Iterator { + if seg.Range().CanSplitAt(end) { + seg, _ = s.SplitUnchecked(seg, end) + } + return seg +} + +// Isolate ensures that the given segment's range is a subset of r by splitting +// at r.Start and r.End if necessary, and returns an updated iterator to the +// bounded segment. All existing iterators (including seg, but not including +// the returned iterators) are invalidated. +// +// Isolate is usually used when mutating part of a single segment, or when +// mutating segments in a range where the first segment is not necessarily +// split, making use of SplitBefore/SplitAfter complex. +// +// Preconditions: seg.Range().Overlaps(r). func (s *Set) Isolate(seg Iterator, r Range) Iterator { if seg.Range().CanSplitAt(r.Start) { _, seg = s.SplitUnchecked(seg, r.Start) @@ -622,33 +884,118 @@ func (s *Set) Isolate(seg Iterator, r Range) Iterator { return seg } -// ApplyContiguous applies a function to a contiguous range of segments, -// splitting if necessary. The function is applied until the first gap is -// encountered, at which point the gap is returned. If the function is applied -// across the entire range, a terminal gap is returned. All existing iterators -// are invalidated. +// LowerBoundSegmentSplitBefore combines LowerBoundSegment and SplitBefore. // -// N.B. The Iterator must not be invalidated by the function. -func (s *Set) ApplyContiguous(r Range, fn func(seg Iterator)) GapIterator { - seg, gap := s.Find(r.Start) - if !seg.Ok() { - return gap +// LowerBoundSegmentSplitBefore is usually used when mutating segments in a +// range while iterating them in order of increasing keys. In such cases, +// LowerBoundSegmentSplitBefore provides an iterator to the first segment to be +// mutated, suitable as the initial value for a loop variable. +func (s *Set) LowerBoundSegmentSplitBefore(min Key) Iterator { + seg := s.LowerBoundSegment(min) + if seg.Ok() { + seg = s.SplitBefore(seg, min) + } + return seg +} + +// UpperBoundSegmentSplitAfter combines UpperBoundSegment and SplitAfter. +// +// UpperBoundSegmentSplitAfter is usually used when mutating segments in a +// range while iterating them in order of decreasing keys. In such cases, +// UpperBoundSegmentSplitAfter provides an iterator to the first segment to be +// mutated, suitable as the initial value for a loop variable. +func (s *Set) UpperBoundSegmentSplitAfter(max Key) Iterator { + seg := s.UpperBoundSegment(max) + if seg.Ok() { + seg = s.SplitAfter(seg, max) } + return seg +} + +// VisitRange applies the function f to all segments intersecting the range r, +// in order of ascending keys. Segments will not be split, so f may be called +// on segments lying partially outside r. Non-empty gaps between segments are +// skipped. If a call to f returns false, VisitRange stops iteration +// immediately. +// +// N.B. f must not invalidate iterators into s. +func (s *Set) VisitRange(r Range, f func(seg Iterator) bool) { + for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() { + if !f(seg) { + return + } + } +} + +// VisitFullRange is equivalent to VisitRange, except that if any key in r that +// is visited before f returns false does not correspond to a segment, +// VisitFullRange panics. +func (s *Set) VisitFullRange(r Range, f func(seg Iterator) bool) { + pos := r.Start + seg := s.FindSegment(r.Start) for { - seg = s.Isolate(seg, r) - fn(seg) - if seg.End() >= r.End { - return GapIterator{} + if !seg.Ok() { + panic(fmt.Sprintf("missing segment at %v", pos)) } - gap = seg.NextGap() - if !gap.IsEmpty() { - return gap + if !f(seg) { + return } - seg = gap.NextSegment() - if !seg.Ok() { - // This implies that the last segment extended all the - // way to the maximum value, since the gap was empty. - return GapIterator{} + pos = seg.End() + if r.End <= pos { + return + } + seg, _ = seg.NextNonEmpty() + } +} + +// MutateRange applies the function f to all segments intersecting the range r, +// in order of ascending keys. Segments that lie partially outside r are split +// before f is called, such that f only observes segments entirely within r. +// Iterated segments are merged again after f is called. Non-empty gaps between +// segments are skipped. If a call to f returns false, MutateRange stops +// iteration immediately. +// +// MutateRange invalidates all existing iterators. +// +// N.B. f must not invalidate iterators into s. +func (s *Set) MutateRange(r Range, f func(seg Iterator) bool) { + seg := s.LowerBoundSegmentSplitBefore(r.Start) + for seg.Ok() && seg.Start() < r.End { + seg = s.SplitAfter(seg, r.End) + cont := f(seg) + seg = s.MergePrev(seg) + if !cont { + s.MergeNext(seg) + return + } + seg = seg.NextSegment() + } + if seg.Ok() { + s.MergePrev(seg) + } +} + +// MutateFullRange is equivalent to MutateRange, except that if any key in r +// that is visited before f returns false does not correspond to a segment, +// MutateFullRange panics. +func (s *Set) MutateFullRange(r Range, f func(seg Iterator) bool) { + seg := s.FindSegment(r.Start) + if !seg.Ok() { + panic(fmt.Sprintf("missing segment at %v", r.Start)) + } + seg = s.SplitBefore(seg, r.Start) + for { + seg = s.SplitAfter(seg, r.End) + cont := f(seg) + end := seg.End() + seg = s.MergePrev(seg) + if !cont || r.End <= end { + s.MergeNext(seg) + return + } + seg = seg.NextSegment() + if !seg.Ok() || seg.Start() != end { + panic(fmt.Sprintf("missing segment at %v", end)) } } } @@ -1358,11 +1705,10 @@ func (seg Iterator) NextGap() GapIterator { // Otherwise, exactly one of the iterators returned by PrevNonEmpty will be // non-terminal. func (seg Iterator) PrevNonEmpty() (Iterator, GapIterator) { - gap := seg.PrevGap() - if gap.Range().Length() != 0 { - return Iterator{}, gap + if prev := seg.PrevSegment(); prev.Ok() && prev.End() == seg.Start() { + return prev, GapIterator{} } - return gap.PrevSegment(), GapIterator{} + return Iterator{}, seg.PrevGap() } // NextNonEmpty returns the iterated segment's successor if it is adjacent, or @@ -1371,11 +1717,10 @@ func (seg Iterator) PrevNonEmpty() (Iterator, GapIterator) { // Otherwise, exactly one of the iterators returned by NextNonEmpty will be // non-terminal. func (seg Iterator) NextNonEmpty() (Iterator, GapIterator) { - gap := seg.NextGap() - if gap.Range().Length() != 0 { - return Iterator{}, gap + if next := seg.NextSegment(); next.Ok() && next.Start() == seg.End() { + return next, GapIterator{} } - return gap.NextSegment(), GapIterator{} + return Iterator{}, seg.NextGap() } // A GapIterator is conceptually one of: @@ -1495,38 +1840,39 @@ func (gap GapIterator) NextLargeEnoughGap(minSize Key) GapIterator { // // Preconditions: gap is NOT the trailing gap of a non-leaf node. func (gap GapIterator) nextLargeEnoughGapHelper(minSize Key) GapIterator { - // Crawl up the tree if no large enough gap in current node or the - // current gap is the trailing one on leaf level. - for gap.node != nil && - (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) { - gap.node, gap.index = gap.node.parent, gap.node.parentIndex - } - // If no large enough gap throughout the whole set, return a terminal - // gap iterator. - if gap.node == nil { - return GapIterator{} - } - // Iterate subsequent gaps. - gap.index++ - for gap.index <= gap.node.nrSegments { - if gap.node.hasChildren { - if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() { - return largeEnoughGap - } - } else { - if gap.Range().Length() >= minSize { - return gap - } + for { + // Crawl up the tree if no large enough gap in current node or the + // current gap is the trailing one on leaf level. + for gap.node != nil && + (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) { + gap.node, gap.index = gap.node.parent, gap.node.parentIndex + } + // If no large enough gap throughout the whole set, return a terminal + // gap iterator. + if gap.node == nil { + return GapIterator{} } + // Iterate subsequent gaps. gap.index++ - } - gap.node, gap.index = gap.node.parent, gap.node.parentIndex - if gap.node != nil && gap.index == gap.node.nrSegments { - // If gap is the trailing gap of a non-leaf node, crawl up to - // parent again and do recursion. + for gap.index <= gap.node.nrSegments { + if gap.node.hasChildren { + if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() { + return largeEnoughGap + } + } else { + if gap.Range().Length() >= minSize { + return gap + } + } + gap.index++ + } gap.node, gap.index = gap.node.parent, gap.node.parentIndex + if gap.node != nil && gap.index == gap.node.nrSegments { + // If gap is the trailing gap of a non-leaf node, crawl up to + // parent again and do recursion. + gap.node, gap.index = gap.node.parent, gap.node.parentIndex + } } - return gap.nextLargeEnoughGapHelper(minSize) } // PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or @@ -1553,38 +1899,39 @@ func (gap GapIterator) PrevLargeEnoughGap(minSize Key) GapIterator { // // Preconditions: gap is NOT the first gap of a non-leaf node. func (gap GapIterator) prevLargeEnoughGapHelper(minSize Key) GapIterator { - // Crawl up the tree if no large enough gap in current node or the - // current gap is the first one on leaf level. - for gap.node != nil && - (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) { - gap.node, gap.index = gap.node.parent, gap.node.parentIndex - } - // If no large enough gap throughout the whole set, return a terminal - // gap iterator. - if gap.node == nil { - return GapIterator{} - } - // Iterate previous gaps. - gap.index-- - for gap.index >= 0 { - if gap.node.hasChildren { - if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() { - return largeEnoughGap - } - } else { - if gap.Range().Length() >= minSize { - return gap - } + for { + // Crawl up the tree if no large enough gap in current node or the + // current gap is the first one on leaf level. + for gap.node != nil && + (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) { + gap.node, gap.index = gap.node.parent, gap.node.parentIndex } + // If no large enough gap throughout the whole set, return a terminal + // gap iterator. + if gap.node == nil { + return GapIterator{} + } + // Iterate previous gaps. gap.index-- - } - gap.node, gap.index = gap.node.parent, gap.node.parentIndex - if gap.node != nil && gap.index == 0 { - // If gap is the first gap of a non-leaf node, crawl up to - // parent again and do recursion. + for gap.index >= 0 { + if gap.node.hasChildren { + if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() { + return largeEnoughGap + } + } else { + if gap.Range().Length() >= minSize { + return gap + } + } + gap.index-- + } gap.node, gap.index = gap.node.parent, gap.node.parentIndex + if gap.node != nil && gap.index == 0 { + // If gap is the first gap of a non-leaf node, crawl up to + // parent again and do recursion. + gap.node, gap.index = gap.node.parent, gap.node.parentIndex + } } - return gap.prevLargeEnoughGapHelper(minSize) } // segmentBeforePosition returns the predecessor segment of the position given @@ -1669,50 +2016,49 @@ func (n *node) writeDebugString(buf *bytes.Buffer, prefix string) { } } -// SegmentDataSlices represents segments from a set as slices of start, end, and -// values. SegmentDataSlices is primarily used as an intermediate representation -// for save/restore and the layout here is optimized for that. +// FlatSegment represents a segment as a single object. FlatSegment is used as +// an intermediate representation for save/restore and tests. // // +stateify savable -type SegmentDataSlices struct { - Start []Key - End []Key - Values []Value +type FlatSegment struct { + Start Key + End Key + Value Value } -// ExportSortedSlices returns a copy of all segments in the given set, in -// ascending key order. -func (s *Set) ExportSortedSlices() *SegmentDataSlices { - var sds SegmentDataSlices +// ExportSlice returns a copy of all segments in the given set, in ascending +// key order. +func (s *Set) ExportSlice() []FlatSegment { + var fs []FlatSegment for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { - sds.Start = append(sds.Start, seg.Start()) - sds.End = append(sds.End, seg.End()) - sds.Values = append(sds.Values, seg.Value()) + fs = append(fs, FlatSegment{ + Start: seg.Start(), + End: seg.End(), + Value: seg.Value(), + }) } - sds.Start = sds.Start[:len(sds.Start):len(sds.Start)] - sds.End = sds.End[:len(sds.End):len(sds.End)] - sds.Values = sds.Values[:len(sds.Values):len(sds.Values)] - return &sds + return fs } -// ImportSortedSlices initializes the given set from the given slice. +// ImportSlice initializes the given set from the given slice. // // Preconditions: // - s must be empty. -// - sds must represent a valid set (the segments in sds must have valid +// - fs must represent a valid set (the segments in fs must have valid // lengths that do not overlap). -// - The segments in sds must be sorted in ascending key order. -func (s *Set) ImportSortedSlices(sds *SegmentDataSlices) error { +// - The segments in fs must be sorted in ascending key order. +func (s *Set) ImportSlice(fs []FlatSegment) error { if !s.IsEmpty() { return fmt.Errorf("cannot import into non-empty set %v", s) } gap := s.FirstGap() - for i := range sds.Start { - r := Range{sds.Start[i], sds.End[i]} + for i := range fs { + f := &fs[i] + r := Range{f.Start, f.End} if !gap.Range().IsSupersetOf(r) { - return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i]) + return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: %v => %v", r, f.Value) } - gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap() + gap = s.InsertWithoutMerging(gap, r, f.Value).NextGap() } return nil } diff --git a/pkg/segment/set_state.go b/pkg/segment/set_state.go index 76de925919..f7cc101056 100644 --- a/pkg/segment/set_state.go +++ b/pkg/segment/set_state.go @@ -14,12 +14,16 @@ package segment -func (s *Set) saveRoot() *SegmentDataSlices { - return s.ExportSortedSlices() +func (s *Set) saveRoot() []FlatSegment { + fs := s.ExportSlice() + // The state package saves data in slice capacity beyond slice length; save + // it some time by cutting ours off. + fs = fs[:len(fs):len(fs)] + return fs } -func (s *Set) loadRoot(sds *SegmentDataSlices) { - if err := s.ImportSortedSlices(sds); err != nil { +func (s *Set) loadRoot(fs []FlatSegment) { + if err := s.ImportSlice(fs); err != nil { panic(err) } } diff --git a/pkg/segment/test/segment_test.go b/pkg/segment/test/segment_test.go index 85fa19096a..450b5b8287 100644 --- a/pkg/segment/test/segment_test.go +++ b/pkg/segment/test/segment_test.go @@ -94,10 +94,7 @@ func TestAddRandom(t *testing.T) { order := rand.Perm(testSize) var nrInsertions int for i, j := range order { - if !s.AddWithoutMerging(Range{j, j + 1}, j+valueOffset) { - t.Errorf("Iteration %d: failed to insert segment with key %d", i, j) - break - } + s.InsertWithoutMergingRange(Range{j, j + 1}, j+valueOffset) nrInsertions++ if err := s.segmentTestCheck(nrInsertions, validate); err != nil { t.Errorf("Iteration %d: %v", i, err) @@ -116,9 +113,7 @@ func TestAddRandom(t *testing.T) { func TestRemoveRandom(t *testing.T) { var s Set for i := 0; i < testSize; i++ { - if !s.AddWithoutMerging(Range{i, i + 1}, i+valueOffset) { - t.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{i, i + 1}, i+valueOffset) } order := rand.Perm(testSize) var nrRemovals int @@ -150,10 +145,7 @@ func TestMaxGapAddRandom(t *testing.T) { order := rand.Perm(testSize) var nrInsertions int for i, j := range order { - if !s.AddWithoutMerging(Range{j, j + 1}, j+valueOffset) { - t.Errorf("Iteration %d: failed to insert segment with key %d", i, j) - break - } + s.InsertWithoutMergingRange(Range{j, j + 1}, j+valueOffset) nrInsertions++ if err := s.segmentTestCheck(nrInsertions, validate); err != nil { t.Errorf("Iteration %d: %v", i, err) @@ -178,10 +170,7 @@ func TestMaxGapAddRandomWithRandomInterval(t *testing.T) { order := randIntervalPermutation(testSize) var nrInsertions int for i, j := range order { - if !s.AddWithoutMerging(Range{j, j + rand.Intn(intervalLength-1) + 1}, j+valueOffset) { - t.Errorf("Iteration %d: failed to insert segment with key %d", i, j) - break - } + s.InsertWithoutMergingRange(Range{j, j + rand.Intn(intervalLength-1) + 1}, j+valueOffset) nrInsertions++ if err := s.segmentTestCheck(nrInsertions, validate); err != nil { t.Errorf("Iteration %d: %v", i, err) @@ -204,18 +193,14 @@ func TestMaxGapAddRandomWithRandomInterval(t *testing.T) { func TestMaxGapAddRandomWithMerge(t *testing.T) { var s gapSet order := randIntervalPermutation(testSize) - nrInsertions := 1 - for i, j := range order { - if !s.Add(Range{j, j + intervalLength}, j+valueOffset) { - t.Errorf("Iteration %d: failed to insert segment with key %d", i, j) - break - } + for _, j := range order { + s.InsertRange(Range{j, j + intervalLength}, 0) if err := checkSetMaxGap(&s); err != nil { t.Errorf("When inserting %d: %v", j, err) break } } - if got, want := s.countSegments(), nrInsertions; got != want { + if got, want := s.countSegments(), 1; got != want { t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want) } if t.Failed() { @@ -227,9 +212,7 @@ func TestMaxGapAddRandomWithMerge(t *testing.T) { func TestMaxGapRemoveRandom(t *testing.T) { var s gapSet for i := 0; i < testSize; i++ { - if !s.AddWithoutMerging(Range{i, i + 1}, i+valueOffset) { - t.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{i, i + 1}, i+valueOffset) } order := rand.Perm(testSize) var nrRemovals int @@ -264,9 +247,7 @@ func TestMaxGapRemoveRandom(t *testing.T) { func TestMaxGapRemoveHalfRandom(t *testing.T) { var s gapSet for i := 0; i < testSize; i++ { - if !s.AddWithoutMerging(Range{intervalLength * i, intervalLength*i + rand.Intn(intervalLength-1) + 1}, intervalLength*i+valueOffset) { - t.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{intervalLength * i, intervalLength*i + rand.Intn(intervalLength-1) + 1}, intervalLength*i+valueOffset) } order := randIntervalPermutation(testSize) order = order[:testSize/2] @@ -299,29 +280,15 @@ func TestMaxGapRemoveHalfRandom(t *testing.T) { } } -func TestMaxGapAddRandomRemoveRandomHalfWithMerge(t *testing.T) { +func TestMaxGapRemoveHalfRandomWithMerge(t *testing.T) { var s gapSet - order := randIntervalPermutation(testSize * 2) - order = order[:testSize] - for i, j := range order { - if !s.Add(Range{j, j + intervalLength}, j+valueOffset) { - t.Errorf("Iteration %d: failed to insert segment with key %d", i, j) - break - } - if err := checkSetMaxGap(&s); err != nil { - t.Errorf("When inserting %d: %v", j, err) - break - } - } - shuffle(order) + s.InsertRange(Range{0, intervalLength * testSize}, 0) + order := randIntervalPermutation(testSize) + order = order[:testSize/2] var nrRemovals int for _, j := range order { - seg := s.FindSegment(j) - if !seg.Ok() { - continue - } - temprange := seg.Range() - s.Remove(seg) + temprange := Range{j, j + intervalLength} + s.RemoveFullRange(temprange) nrRemovals++ if err := checkSetMaxGap(&s); err != nil { t.Errorf("When removing %v: %v", temprange, err) @@ -339,11 +306,8 @@ func TestNextLargeEnoughGap(t *testing.T) { var s gapSet order := randIntervalPermutation(testSize * 2) order = order[:testSize] - for i, j := range order { - if !s.Add(Range{j, j + rand.Intn(intervalLength-1) + 1}, j+valueOffset) { - t.Errorf("Iteration %d: failed to insert segment with key %d", i, j) - break - } + for _, j := range order { + s.InsertRange(Range{j, j + rand.Intn(intervalLength-1) + 1}, j+valueOffset) if err := checkSetMaxGap(&s); err != nil { t.Errorf("When inserting %d: %v", j, err) break @@ -392,11 +356,8 @@ func TestPrevLargeEnoughGap(t *testing.T) { var s gapSet order := randIntervalPermutation(testSize * 2) order = order[:testSize] - for i, j := range order { - if !s.Add(Range{j, j + rand.Intn(intervalLength-1) + 1}, j+valueOffset) { - t.Errorf("Iteration %d: failed to insert segment with key %d", i, j) - break - } + for _, j := range order { + s.InsertRange(Range{j, j + rand.Intn(intervalLength-1) + 1}, j+valueOffset) if err := checkSetMaxGap(&s); err != nil { t.Errorf("When inserting %d: %v", j, err) break @@ -445,9 +406,7 @@ func TestAddSequentialAdjacent(t *testing.T) { var s Set var nrInsertions int for i := 0; i < testSize; i++ { - if !s.AddWithoutMerging(Range{i, i + 1}, i+valueOffset) { - t.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{i, i + 1}, i+valueOffset) nrInsertions++ if err := s.segmentTestCheck(nrInsertions, validate); err != nil { t.Errorf("Iteration %d: %v", i, err) @@ -499,9 +458,7 @@ func TestAddSequentialNonAdjacent(t *testing.T) { for i := 0; i < testSize; i++ { // The range here differs from TestAddSequentialAdjacent so that // consecutive segments are not adjacent. - if !s.AddWithoutMerging(Range{2 * i, 2*i + 1}, 2*i+valueOffset) { - t.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{2 * i, 2*i + 1}, 2*i+valueOffset) nrInsertions++ if err := s.segmentTestCheck(nrInsertions, validate); err != nil { t.Errorf("Iteration %d: %v", i, err) @@ -527,7 +484,7 @@ func TestAddSequentialNonAdjacent(t *testing.T) { } } -func TestMergeSplit(t *testing.T) { +func TestMerge(t *testing.T) { tests := []struct { name string initial []Range @@ -536,60 +493,26 @@ func TestMergeSplit(t *testing.T) { final []Range }{ { - name: "Add merges after existing segment", + name: "InsertRange merges after existing segment", initial: []Range{{1000, 1100}, {1100, 1200}}, final: []Range{{1000, 1200}}, }, { - name: "Add merges before existing segment", + name: "InsertRange merges before existing segment", initial: []Range{{1100, 1200}, {1000, 1100}}, final: []Range{{1000, 1200}}, }, { - name: "Add merges between existing segments", + name: "InsertRange merges between existing segments", initial: []Range{{1000, 1100}, {1200, 1300}, {1100, 1200}}, final: []Range{{1000, 1300}}, }, - { - name: "SplitAt does nothing at a free address", - initial: []Range{{100, 200}}, - split: true, - splitAddr: 300, - final: []Range{{100, 200}}, - }, - { - name: "SplitAt does nothing at the beginning of a segment", - initial: []Range{{100, 200}}, - split: true, - splitAddr: 100, - final: []Range{{100, 200}}, - }, - { - name: "SplitAt does nothing at the end of a segment", - initial: []Range{{100, 200}}, - split: true, - splitAddr: 200, - final: []Range{{100, 200}}, - }, - { - name: "SplitAt splits in the middle of a segment", - initial: []Range{{100, 200}}, - split: true, - splitAddr: 150, - final: []Range{{100, 150}, {150, 200}}, - }, } Tests: for _, test := range tests { var s Set for _, r := range test.initial { - if !s.Add(r, 0) { - t.Errorf("%s: Add(%v) failed; set contents:\n%v", test.name, r, &s) - continue Tests - } - } - if test.split { - s.SplitAt(test.splitAddr) + s.InsertRange(r, 0) } var i int for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { @@ -667,13 +590,76 @@ Tests: } } +func TestMutateRange(t *testing.T) { + tests := []struct { + name string + initial []FlatSegment + increment Range + final []FlatSegment + }{ + { + name: "MutateRange no-op in empty set", + increment: Range{100, 200}, + }, + { + name: "MutateRange modifies existing segment", + initial: []FlatSegment{ + {100, 200, 0}, + }, + increment: Range{100, 200}, + final: []FlatSegment{ + {100, 200, 1}, + }, + }, + { + name: "MutateRange splits segments", + initial: []FlatSegment{ + {50, 150, 0}, + {150, 250, 2}, + }, + increment: Range{100, 200}, + final: []FlatSegment{ + {50, 100, 0}, + {100, 150, 1}, + {150, 200, 3}, + {200, 250, 2}, + }, + }, + { + name: "MutateRange merges compatible segments", + initial: []FlatSegment{ + {0, 100, 1}, + {100, 200, 0}, + {200, 300, 1}, + }, + increment: Range{100, 200}, + final: []FlatSegment{ + {0, 300, 1}, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var s Set + if err := s.ImportSlice(test.initial); err != nil { + t.Fatalf("Failed to import initial set: %v", err) + } + s.MutateRange(test.increment, func(seg Iterator) bool { + (*seg.ValuePtr())++ + return true + }) + if got := s.ExportSlice(); !reflect.DeepEqual(got, test.final) { + t.Errorf("Set mismatch after mutation: got %v, wanted %v", got, test.final) + } + }) + } +} + func benchmarkAddSequential(b *testing.B, size int) { for n := 0; n < b.N; n++ { var s Set for i := 0; i < size; i++ { - if !s.AddWithoutMerging(Range{i, i + 1}, i) { - b.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{i, i + 1}, i) } } } @@ -685,9 +671,7 @@ func benchmarkAddRandom(b *testing.B, size int) { for n := 0; n < b.N; n++ { var s Set for _, i := range order { - if !s.AddWithoutMerging(Range{i, i + 1}, i) { - b.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{i, i + 1}, i) } } } @@ -695,9 +679,7 @@ func benchmarkAddRandom(b *testing.B, size int) { func benchmarkFindSequential(b *testing.B, size int) { var s Set for i := 0; i < size; i++ { - if !s.AddWithoutMerging(Range{i, i + 1}, i) { - b.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{i, i + 1}, i) } b.ResetTimer() @@ -713,9 +695,7 @@ func benchmarkFindSequential(b *testing.B, size int) { func benchmarkFindRandom(b *testing.B, size int) { var s Set for i := 0; i < size; i++ { - if !s.AddWithoutMerging(Range{i, i + 1}, i) { - b.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{i, i + 1}, i) } order := rand.Perm(size) @@ -732,9 +712,7 @@ func benchmarkFindRandom(b *testing.B, size int) { func benchmarkIteration(b *testing.B, size int) { var s Set for i := 0; i < size; i++ { - if !s.AddWithoutMerging(Range{i, i + 1}, i) { - b.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{i, i + 1}, i) } b.ResetTimer() @@ -753,9 +731,7 @@ func benchmarkAddFindRemoveSequential(b *testing.B, size int) { for n := 0; n < b.N; n++ { var s Set for i := 0; i < size; i++ { - if !s.AddWithoutMerging(Range{i, i + 1}, i) { - b.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{i, i + 1}, i) } for i := 0; i < size; i++ { seg := s.FindSegment(i) @@ -777,9 +753,7 @@ func benchmarkAddFindRemoveRandom(b *testing.B, size int) { for n := 0; n < b.N; n++ { var s Set for _, i := range order { - if !s.AddWithoutMerging(Range{i, i + 1}, i) { - b.Fatalf("Failed to insert segment %d", i) - } + s.InsertWithoutMergingRange(Range{i, i + 1}, i) } for _, i := range order { seg := s.FindSegment(i) diff --git a/pkg/segment/test/set_functions.go b/pkg/segment/test/set_functions.go index 652c010daa..89effd3792 100644 --- a/pkg/segment/test/set_functions.go +++ b/pkg/segment/test/set_functions.go @@ -29,8 +29,8 @@ func (setFunctions) MaxKey() int { func (setFunctions) ClearValue(*int) {} -func (setFunctions) Merge(_ Range, val1 int, _ Range, _ int) (int, bool) { - return val1, true +func (setFunctions) Merge(_ Range, val1 int, _ Range, val2 int) (int, bool) { + return val1, val1 == val2 } func (setFunctions) Split(_ Range, val int, _ int) (int, int) { diff --git a/pkg/sentry/devices/accel/gasket.go b/pkg/sentry/devices/accel/gasket.go index a916739bef..04fa4c927f 100644 --- a/pkg/sentry/devices/accel/gasket.go +++ b/pkg/sentry/devices/accel/gasket.go @@ -15,8 +15,6 @@ package accel import ( - "fmt" - "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/gasket" "gvisor.dev/gvisor/pkg/abi/linux" @@ -117,12 +115,10 @@ func gasketMapBufferIoctl(ctx context.Context, t *kernel.Task, hostFd int32, fd defer fd.device.mu.Unlock() for _, pr := range prs { rlen := uint64(pr.Source.Length()) - if !fd.device.devAddrSet.Add(DevAddrRange{ + fd.device.devAddrSet.InsertRange(DevAddrRange{ devAddr, devAddr + rlen, - }, pinnedAccelMem{pinnedRange: pr, pageTableIndex: userIoctlParams.PageTableIndex}) { - panic(fmt.Sprintf("unexpected overlap of devaddr range [%#x-%#x)", devAddr, devAddr+rlen)) - } + }, pinnedAccelMem{pinnedRange: pr, pageTableIndex: userIoctlParams.PageTableIndex}) devAddr += rlen } return n, nil diff --git a/pkg/sentry/fsutil/dirty_set.go b/pkg/sentry/fsutil/dirty_set.go index 38383e7302..ce550cd753 100644 --- a/pkg/sentry/fsutil/dirty_set.go +++ b/pkg/sentry/fsutil/dirty_set.go @@ -73,57 +73,57 @@ func (dirtySetFunctions) Split(_ memmap.MappableRange, val DirtyInfo, _ uint64) // MarkClean marks all offsets in mr as not dirty, except for those to which // KeepDirty has been applied. -func (ds *DirtySet) MarkClean(mr memmap.MappableRange) { - seg := ds.LowerBoundSegment(mr.Start) +func (s *DirtySet) MarkClean(mr memmap.MappableRange) { + seg := s.LowerBoundSegment(mr.Start) for seg.Ok() && seg.Start() < mr.End { if seg.Value().Keep { seg = seg.NextSegment() continue } - seg = ds.Isolate(seg, mr) - seg = ds.Remove(seg).NextSegment() + seg = s.Isolate(seg, mr) + seg = s.Remove(seg).NextSegment() } } // KeepClean marks all offsets in mr as not dirty, even those that were // previously kept dirty by KeepDirty. -func (ds *DirtySet) KeepClean(mr memmap.MappableRange) { - ds.RemoveRange(mr) +func (s *DirtySet) KeepClean(mr memmap.MappableRange) { + s.RemoveRange(mr) } // MarkDirty marks all offsets in mr as dirty. -func (ds *DirtySet) MarkDirty(mr memmap.MappableRange) { - ds.setDirty(mr, false) +func (s *DirtySet) MarkDirty(mr memmap.MappableRange) { + s.setDirty(mr, false) } // KeepDirty marks all offsets in mr as dirty and prevents them from being // marked as clean by MarkClean. -func (ds *DirtySet) KeepDirty(mr memmap.MappableRange) { - ds.setDirty(mr, true) +func (s *DirtySet) KeepDirty(mr memmap.MappableRange) { + s.setDirty(mr, true) } -func (ds *DirtySet) setDirty(mr memmap.MappableRange, keep bool) { +func (s *DirtySet) setDirty(mr memmap.MappableRange, keep bool) { var changedAny bool defer func() { if changedAny { // Merge segments split by Isolate to reduce cost of iteration. - ds.MergeRange(mr) + s.MergeInsideRange(mr) } }() - seg, gap := ds.Find(mr.Start) + seg, gap := s.Find(mr.Start) for { switch { case seg.Ok() && seg.Start() < mr.End: if keep && !seg.Value().Keep { changedAny = true - seg = ds.Isolate(seg, mr) + seg = s.Isolate(seg, mr) seg.ValuePtr().Keep = true } seg, gap = seg.NextNonEmpty() case gap.Ok() && gap.Start() < mr.End: changedAny = true - seg = ds.Insert(gap, gap.Range().Intersect(mr), DirtyInfo{keep}) + seg = s.Insert(gap, gap.Range().Intersect(mr), DirtyInfo{keep}) seg, gap = seg.NextNonEmpty() default: @@ -135,18 +135,18 @@ func (ds *DirtySet) setDirty(mr memmap.MappableRange, keep bool) { // AllowClean allows MarkClean to mark offsets in mr as not dirty, ending the // effect of a previous call to KeepDirty. (It does not itself mark those // offsets as not dirty.) -func (ds *DirtySet) AllowClean(mr memmap.MappableRange) { +func (s *DirtySet) AllowClean(mr memmap.MappableRange) { var changedAny bool defer func() { if changedAny { // Merge segments split by Isolate to reduce cost of iteration. - ds.MergeRange(mr) + s.MergeInsideRange(mr) } }() - for seg := ds.LowerBoundSegment(mr.Start); seg.Ok() && seg.Start() < mr.End; seg = seg.NextSegment() { + for seg := s.LowerBoundSegment(mr.Start); seg.Ok() && seg.Start() < mr.End; seg = seg.NextSegment() { if seg.Value().Keep { changedAny = true - seg = ds.Isolate(seg, mr) + seg = s.Isolate(seg, mr) seg.ValuePtr().Keep = false } } @@ -163,7 +163,7 @@ func SyncDirty(ctx context.Context, mr memmap.MappableRange, cache *FileRangeSet defer func() { if changedDirty { // Merge segments split by Isolate to reduce cost of iteration. - dirty.MergeRange(mr) + dirty.MergeInsideRange(mr) } }() dseg := dirty.LowerBoundSegment(mr.Start) diff --git a/pkg/sentry/fsutil/dirty_set_test.go b/pkg/sentry/fsutil/dirty_set_test.go index 48448c97c3..1a1ebe8806 100644 --- a/pkg/sentry/fsutil/dirty_set_test.go +++ b/pkg/sentry/fsutil/dirty_set_test.go @@ -27,12 +27,10 @@ func TestDirtySet(t *testing.T) { set.MarkDirty(memmap.MappableRange{0, 2 * hostarch.PageSize}) set.KeepDirty(memmap.MappableRange{hostarch.PageSize, 2 * hostarch.PageSize}) set.MarkClean(memmap.MappableRange{0, 2 * hostarch.PageSize}) - want := &DirtySegmentDataSlices{ - Start: []uint64{hostarch.PageSize}, - End: []uint64{2 * hostarch.PageSize}, - Values: []DirtyInfo{{Keep: true}}, + want := []DirtyFlatSegment{ + {hostarch.PageSize, 2 * hostarch.PageSize, DirtyInfo{Keep: true}}, } - if got := set.ExportSortedSlices(); !reflect.DeepEqual(got, want) { + if got := set.ExportSlice(); !reflect.DeepEqual(got, want) { t.Errorf("set:\n\tgot %v,\n\twant %v", got, want) } } diff --git a/pkg/sentry/fsutil/file_range_set.go b/pkg/sentry/fsutil/file_range_set.go index f693ddaa4d..f6a8f1e622 100644 --- a/pkg/sentry/fsutil/file_range_set.go +++ b/pkg/sentry/fsutil/file_range_set.go @@ -80,9 +80,9 @@ func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) memmap.FileRan // PagesToFill returns the number of pages that that Fill() will allocate // for the given required and optional parameters. -func (frs *FileRangeSet) PagesToFill(required, optional memmap.MappableRange) uint64 { +func (s *FileRangeSet) PagesToFill(required, optional memmap.MappableRange) uint64 { var numPages uint64 - gap := frs.LowerBoundGap(required.Start) + gap := s.LowerBoundGap(required.Start) for gap.Ok() && gap.Start() < required.End { gr := gap.Range().Intersect(optional) numPages += gr.Length() / hostarch.PageSize @@ -111,8 +111,8 @@ func (frs *FileRangeSet) PagesToFill(required, optional memmap.MappableRange) ui // - required.Length() > 0. // - optional.IsSupersetOf(required). // - required and optional must be page-aligned. -func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, fileSize uint64, mf *pgalloc.MemoryFile, kind usage.MemoryKind, allocMode pgalloc.AllocationMode, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) (uint64, error) { - gap := frs.LowerBoundGap(required.Start) +func (s *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, fileSize uint64, mf *pgalloc.MemoryFile, kind usage.MemoryKind, allocMode pgalloc.AllocationMode, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) (uint64, error) { + gap := s.LowerBoundGap(required.Start) var pagesAlloced uint64 memCgID := pgalloc.MemoryCgroupIDFromContext(ctx) for gap.Ok() && gap.Start() < required.End { @@ -175,7 +175,7 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map if done := fr.Length(); done != 0 { gr.End = gr.Start + done pagesAlloced += gr.Length() / hostarch.PageSize - gap = frs.Insert(gap, gr, fr.Start).NextGap() + gap = s.Insert(gap, gr, fr.Start).NextGap() } if err != nil { @@ -189,43 +189,42 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map // corresponding memmap.FileRanges. // // Preconditions: mr must be page-aligned. -func (frs *FileRangeSet) Drop(mr memmap.MappableRange, mf *pgalloc.MemoryFile) { - seg := frs.LowerBoundSegment(mr.Start) +func (s *FileRangeSet) Drop(mr memmap.MappableRange, mf *pgalloc.MemoryFile) { + seg := s.LowerBoundSegment(mr.Start) for seg.Ok() && seg.Start() < mr.End { - seg = frs.Isolate(seg, mr) + seg = s.Isolate(seg, mr) mf.DecRef(seg.FileRange()) - seg = frs.Remove(seg).NextSegment() + seg = s.Remove(seg).NextSegment() } } // DropAll removes all segments in mr, freeing the corresponding // memmap.FileRanges. It returns the number of pages freed. -func (frs *FileRangeSet) DropAll(mf *pgalloc.MemoryFile) uint64 { +func (s *FileRangeSet) DropAll(mf *pgalloc.MemoryFile) uint64 { var pagesFreed uint64 - for seg := frs.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { mf.DecRef(seg.FileRange()) pagesFreed += seg.Range().Length() / hostarch.PageSize } - frs.RemoveAll() + s.RemoveAll() return pagesFreed } -// Truncate updates frs to reflect Mappable truncation to the given length: +// Truncate updates s to reflect Mappable truncation to the given length: // bytes after the new EOF on the same page are zeroed, and pages after the new // EOF are freed. It returns the number of pages freed. -func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) uint64 { +func (s *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) uint64 { var pagesFreed uint64 pgendaddr, ok := hostarch.Addr(end).RoundUp() if ok { pgend := uint64(pgendaddr) // Free truncated pages. - frs.SplitAt(pgend) - seg := frs.LowerBoundSegment(pgend) + seg := s.LowerBoundSegmentSplitBefore(pgend) for seg.Ok() { mf.DecRef(seg.FileRange()) pagesFreed += seg.Range().Length() / hostarch.PageSize - seg = frs.Remove(seg).NextSegment() + seg = s.Remove(seg).NextSegment() } if end == pgend { @@ -236,7 +235,7 @@ func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) uint64 { // Here we know end < end.RoundUp(). If the new EOF lands in the // middle of a page that we have, zero out its contents beyond the new // length. - seg := frs.FindSegment(end) + seg := s.FindSegment(end) if seg.Ok() { fr := seg.FileRange() fr.Start += end - seg.Start() diff --git a/pkg/sentry/fsutil/frame_ref_set.go b/pkg/sentry/fsutil/frame_ref_set.go index 6f95cffcb9..217090cad4 100644 --- a/pkg/sentry/fsutil/frame_ref_set.go +++ b/pkg/sentry/fsutil/frame_ref_set.go @@ -64,21 +64,21 @@ func (FrameRefSetFunctions) Split(_ memmap.FileRange, val FrameRefSegInfo, _ uin // are accounted as host page cache memory mappings. The new segments will be // associated with the memCgID, if the segment already exists then the memCgID // will not be changed. -func (frSet *FrameRefSet) IncRefAndAccount(fr memmap.FileRange, memCgID uint32) { - seg, gap := frSet.Find(fr.Start) +func (s *FrameRefSet) IncRefAndAccount(fr memmap.FileRange, memCgID uint32) { + seg, gap := s.Find(fr.Start) for { switch { case seg.Ok() && seg.Start() < fr.End: - seg = frSet.Isolate(seg, fr) + seg = s.Isolate(seg, fr) seg.ValuePtr().refs++ seg, gap = seg.NextNonEmpty() case gap.Ok() && gap.Start() < fr.End: newRange := gap.Range().Intersect(fr) usage.MemoryAccounting.Inc(newRange.Length(), usage.Mapped, memCgID) frInfo := FrameRefSegInfo{refs: 1, memCgID: memCgID} - seg, gap = frSet.InsertWithoutMerging(gap, newRange, frInfo).NextNonEmpty() + seg, gap = s.InsertWithoutMerging(gap, newRange, frInfo).NextNonEmpty() default: - frSet.MergeAdjacent(fr) + s.MergeOutsideRange(fr) return } } @@ -86,18 +86,18 @@ func (frSet *FrameRefSet) IncRefAndAccount(fr memmap.FileRange, memCgID uint32) // DecRefAndAccount removes a reference on the range fr and untracks segments // that are removed from memory accounting. -func (frSet *FrameRefSet) DecRefAndAccount(fr memmap.FileRange) { - seg := frSet.FindSegment(fr.Start) +func (s *FrameRefSet) DecRefAndAccount(fr memmap.FileRange) { + seg := s.FindSegment(fr.Start) for seg.Ok() && seg.Start() < fr.End { - seg = frSet.Isolate(seg, fr) + seg = s.Isolate(seg, fr) if old := seg.ValuePtr().refs; old == 1 { usage.MemoryAccounting.Dec(seg.Range().Length(), usage.Mapped, seg.ValuePtr().memCgID) - seg = frSet.Remove(seg).NextSegment() + seg = s.Remove(seg).NextSegment() } else { seg.ValuePtr().refs-- seg = seg.NextSegment() } } - frSet.MergeAdjacent(fr) + s.MergeOutsideRange(fr) } diff --git a/pkg/sentry/kernel/auth/id_map.go b/pkg/sentry/kernel/auth/id_map.go index 640a857dae..900c4c6e2f 100644 --- a/pkg/sentry/kernel/auth/id_map.go +++ b/pkg/sentry/kernel/auth/id_map.go @@ -185,10 +185,10 @@ func (ns *UserNamespace) trySetUIDMap(entries []IDMapEntry) error { return linuxerr.EPERM } // If either of these Adds fail, we have an overlapping range. - if !ns.uidMapFromParent.Add(idMapRange{e.FirstParentID, lastParentID}, e.FirstID) { + if !ns.uidMapFromParent.TryInsertRange(idMapRange{e.FirstParentID, lastParentID}, e.FirstID).Ok() { return linuxerr.EINVAL } - if !ns.uidMapToParent.Add(idMapRange{e.FirstID, lastID}, e.FirstParentID) { + if !ns.uidMapToParent.TryInsertRange(idMapRange{e.FirstID, lastID}, e.FirstParentID).Ok() { return linuxerr.EINVAL } } @@ -248,10 +248,10 @@ func (ns *UserNamespace) trySetGIDMap(entries []IDMapEntry) error { if !ns.parent.allIDsMapped(&ns.parent.gidMapToParent, e.FirstParentID, lastParentID) { return linuxerr.EPERM } - if !ns.gidMapFromParent.Add(idMapRange{e.FirstParentID, lastParentID}, e.FirstID) { + if !ns.gidMapFromParent.TryInsertRange(idMapRange{e.FirstParentID, lastParentID}, e.FirstID).Ok() { return linuxerr.EINVAL } - if !ns.gidMapToParent.Add(idMapRange{e.FirstID, lastID}, e.FirstParentID) { + if !ns.gidMapToParent.TryInsertRange(idMapRange{e.FirstID, lastID}, e.FirstParentID).Ok() { return linuxerr.EINVAL } } diff --git a/pkg/sentry/kernel/auth/user_namespace.go b/pkg/sentry/kernel/auth/user_namespace.go index 52b0cdf731..fb4b203b6a 100644 --- a/pkg/sentry/kernel/auth/user_namespace.go +++ b/pkg/sentry/kernel/auth/user_namespace.go @@ -76,9 +76,8 @@ func NewRootUserNamespace() *UserNamespace { &ns.gidMapFromParent, &ns.gidMapToParent, } { - if !m.Add(idMapRange{0, math.MaxUint32}, 0) { - panic("Failed to insert into empty ID map") - } + // Insertion into an empty map shouldn't fail. + m.InsertRange(idMapRange{0, math.MaxUint32}, 0) } return &ns } diff --git a/pkg/sentry/memmap/mapping_set.go b/pkg/sentry/memmap/mapping_set.go index 32863bb5e6..3a40a16b60 100644 --- a/pkg/sentry/memmap/mapping_set.go +++ b/pkg/sentry/memmap/mapping_set.go @@ -228,7 +228,7 @@ func (s *MappingSet) RemoveMapping(ms MappingSpace, ar hostarch.AddrRange, offse seg = seg.NextSegment() } } - s.MergeAdjacent(mr) + s.MergeOutsideRange(mr) return unmapped } diff --git a/pkg/sentry/mm/lifecycle.go b/pkg/sentry/mm/lifecycle.go index 9ded62e010..ae0d2d282b 100644 --- a/pkg/sentry/mm/lifecycle.go +++ b/pkg/sentry/mm/lifecycle.go @@ -144,7 +144,7 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) { mm2.activeMu.NestedLock(activeLockForked) defer mm2.activeMu.NestedUnlock(activeLockForked) if dontforks { - defer mm.pmas.MergeRange(mm.applicationAddrRange()) + defer mm.pmas.MergeInsideRange(mm.applicationAddrRange()) } srcvseg := mm.vmas.FirstSegment() dstpgap := mm2.pmas.FirstGap() diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go index 232dc02032..f33e0a567d 100644 --- a/pkg/sentry/mm/syscalls.go +++ b/pkg/sentry/mm/syscalls.go @@ -657,10 +657,10 @@ func (mm *MemoryManager) MProtect(addr hostarch.Addr, length uint64, realPerms h mm.activeMu.Lock() defer mm.activeMu.Unlock() defer func() { - mm.vmas.MergeRange(ar) - mm.vmas.MergeAdjacent(ar) - mm.pmas.MergeRange(ar) - mm.pmas.MergeAdjacent(ar) + mm.vmas.MergeInsideRange(ar) + mm.vmas.MergeOutsideRange(ar) + mm.pmas.MergeInsideRange(ar) + mm.pmas.MergeOutsideRange(ar) }() pseg := mm.pmas.LowerBoundSegment(ar.Start) var didUnmapAS bool @@ -869,8 +869,8 @@ func (mm *MemoryManager) MLock(ctx context.Context, addr hostarch.Addr, length u } vseg, _ = vseg.NextNonEmpty() } - mm.vmas.MergeRange(ar) - mm.vmas.MergeAdjacent(ar) + mm.vmas.MergeInsideRange(ar) + mm.vmas.MergeOutsideRange(ar) if unmapped { mm.mappingMu.Unlock() return linuxerr.ENOMEM @@ -1034,8 +1034,8 @@ func (mm *MemoryManager) SetNumaPolicy(addr hostarch.Addr, length uint64, policy mm.mappingMu.Lock() defer mm.mappingMu.Unlock() defer func() { - mm.vmas.MergeRange(ar) - mm.vmas.MergeAdjacent(ar) + mm.vmas.MergeInsideRange(ar) + mm.vmas.MergeOutsideRange(ar) }() vseg := mm.vmas.LowerBoundSegment(ar.Start) lastEnd := ar.Start @@ -1067,8 +1067,8 @@ func (mm *MemoryManager) SetDontFork(addr hostarch.Addr, length uint64, dontfork mm.mappingMu.Lock() defer mm.mappingMu.Unlock() defer func() { - mm.vmas.MergeRange(ar) - mm.vmas.MergeAdjacent(ar) + mm.vmas.MergeInsideRange(ar) + mm.vmas.MergeOutsideRange(ar) }() for vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() { diff --git a/pkg/sentry/pgalloc/pgalloc.go b/pkg/sentry/pgalloc/pgalloc.go index bb67fe788d..074ebced34 100644 --- a/pkg/sentry/pgalloc/pgalloc.go +++ b/pkg/sentry/pgalloc/pgalloc.go @@ -576,13 +576,11 @@ func (f *MemoryFile) allocate(length uint64, opts *AllocOpts) (memmap.FileRange, } } // Mark selected pages as in use. - if !f.usage.Add(fr, usageInfo{ + f.usage.InsertRange(fr, usageInfo{ kind: opts.Kind, refs: 1, memCgID: opts.MemCgID, - }) { - panic(fmt.Sprintf("allocating %v: failed to insert into usage set:\n%v", fr, &f.usage)) - } + }) return fr, nil } @@ -849,7 +847,7 @@ func (f *MemoryFile) markDecommitted(fr memmap.FileRange) { defer f.mu.Unlock() // Since we're changing the knownCommitted attribute, we need to merge // across the entire range to ensure that the usage tree is minimal. - gap := f.usage.ApplyContiguous(fr, func(seg usageIterator) { + f.usage.MutateFullRange(fr, func(seg usageIterator) bool { val := seg.ValuePtr() if val.knownCommitted { // Drop the usageExpected appropriately. @@ -859,11 +857,8 @@ func (f *MemoryFile) markDecommitted(fr memmap.FileRange) { val.knownCommitted = false } val.memCgID = 0 + return true }) - if gap.Ok() { - panic(fmt.Sprintf("Decommit(%v): attempted to decommit unallocated pages %v:\n%v", fr, gap.Range(), &f.usage)) - } - f.usage.MergeRange(fr) } // HasUniqueRef returns true if all pages in the given range have exactly one @@ -875,16 +870,15 @@ func (f *MemoryFile) markDecommitted(fr memmap.FileRange) { func (f *MemoryFile) HasUniqueRef(fr memmap.FileRange) bool { f.mu.Lock() defer f.mu.Unlock() - seg := f.usage.FindSegment(fr.Start) - for { + hasUniqueRef := true + f.usage.VisitFullRange(fr, func(seg usageIterator) bool { if seg.ValuePtr().refs != 1 { + hasUniqueRef = false return false } - seg = seg.NextSegment() - if !seg.Ok() || fr.End <= seg.Start() { - return true - } - } + return true + }) + return hasUniqueRef } // IncRef implements memmap.File.IncRef. @@ -896,14 +890,10 @@ func (f *MemoryFile) IncRef(fr memmap.FileRange, memCgID uint32) { f.mu.Lock() defer f.mu.Unlock() - gap := f.usage.ApplyContiguous(fr, func(seg usageIterator) { + f.usage.MutateFullRange(fr, func(seg usageIterator) bool { seg.ValuePtr().refs++ + return true }) - if gap.Ok() { - panic(fmt.Sprintf("IncRef(%v): attempted to IncRef on unallocated pages %v:\n%v", fr, gap.Range(), &f.usage)) - } - - f.usage.MergeAdjacent(fr) } // DecRef implements memmap.File.DecRef. @@ -917,15 +907,14 @@ func (f *MemoryFile) DecRef(fr memmap.FileRange) { f.mu.Lock() defer f.mu.Unlock() - for seg := f.usage.FindSegment(fr.Start); seg.Ok() && seg.Start() < fr.End; seg = seg.NextSegment() { - seg = f.usage.Isolate(seg, fr) + f.usage.MutateFullRange(fr, func(seg usageIterator) bool { val := seg.ValuePtr() if val.refs == 0 { panic(fmt.Sprintf("DecRef(%v): 0 existing references on %v:\n%v", fr, seg.Range(), &f.usage)) } val.refs-- if val.refs == 0 { - f.reclaim.Add(seg.Range(), reclaimSetValue{}) + f.reclaim.InsertRange(seg.Range(), reclaimSetValue{}) freed = true // Reclassify memory as System, until it's freed by the reclaim // goroutine. @@ -934,8 +923,8 @@ func (f *MemoryFile) DecRef(fr memmap.FileRange) { } val.kind = usage.System } - } - f.usage.MergeAdjacent(fr) + return true + }) if freed { f.reclaimable = true diff --git a/pkg/sentry/pgalloc/pgalloc_test.go b/pkg/sentry/pgalloc/pgalloc_test.go index 56d0285e8c..0cd88e27c9 100644 --- a/pkg/sentry/pgalloc/pgalloc_test.go +++ b/pkg/sentry/pgalloc/pgalloc_test.go @@ -30,7 +30,7 @@ const ( func TestFindUnallocatedRange(t *testing.T) { for _, test := range []struct { name string - usage *usageSegmentDataSlices + usage []usageFlatSegment fileSize int64 length uint64 alignment uint64 @@ -40,7 +40,6 @@ func TestFindUnallocatedRange(t *testing.T) { }{ { name: "Initial allocation succeeds", - usage: &usageSegmentDataSlices{}, length: page, alignment: page, direction: BottomUp, @@ -48,7 +47,6 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Initial allocation succeeds", - usage: &usageSegmentDataSlices{}, length: page, alignment: page, direction: TopDown, @@ -56,10 +54,8 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Allocation begins at start of file", - usage: &usageSegmentDataSlices{ - Start: []uint64{page}, - End: []uint64{2 * page}, - Values: []usageInfo{{refs: 1}}, + usage: []usageFlatSegment{ + {page, 2 * page, usageInfo{refs: 1}}, }, length: page, alignment: page, @@ -68,10 +64,8 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Allocation finds empty space at start of file", - usage: &usageSegmentDataSlices{ - Start: []uint64{page}, - End: []uint64{2 * page}, - Values: []usageInfo{{refs: 1}}, + usage: []usageFlatSegment{ + {page, 2 * page, usageInfo{refs: 1}}, }, fileSize: 2 * page, length: page, @@ -80,10 +74,8 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Allocation finds empty space at end of file", - usage: &usageSegmentDataSlices{ - Start: []uint64{0}, - End: []uint64{page}, - Values: []usageInfo{{refs: 1}}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, }, fileSize: 2 * page, length: page, @@ -93,10 +85,9 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "In-use frames are not allocatable", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, page}, - End: []uint64{page, 2 * page}, - Values: []usageInfo{{refs: 1}, {refs: 2}}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, + {page, 2 * page, usageInfo{refs: 2}}, }, length: page, alignment: page, @@ -105,10 +96,9 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "In-use frames are not allocatable", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, page}, - End: []uint64{page, 2 * page}, - Values: []usageInfo{{refs: 1}, {refs: 2}}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, + {page, 2 * page, usageInfo{refs: 2}}, }, fileSize: 2 * page, length: page, @@ -118,10 +108,10 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Reclaimable frames are not allocatable", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, page, 2 * page}, - End: []uint64{page, 2 * page, 3 * page}, - Values: []usageInfo{{refs: 1}, {refs: 0}, {refs: 1}}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, + {page, 2 * page, usageInfo{refs: 0}}, + {2 * page, 3 * page, usageInfo{refs: 1}}, }, length: page, alignment: page, @@ -130,10 +120,10 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Reclaimable frames are not allocatable", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, page, 2 * page}, - End: []uint64{page, 2 * page, 3 * page}, - Values: []usageInfo{{refs: 1}, {refs: 0}, {refs: 1}}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, + {page, 2 * page, usageInfo{refs: 0}}, + {2 * page, 3 * page, usageInfo{refs: 1}}, }, fileSize: 3 * page, length: page, @@ -143,10 +133,9 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Gaps between in-use frames are allocatable", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, 2 * page}, - End: []uint64{page, 3 * page}, - Values: []usageInfo{{refs: 1}, {refs: 1}}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, + {2 * page, 3 * page, usageInfo{refs: 1}}, }, length: page, alignment: page, @@ -155,10 +144,9 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Gaps between in-use frames are allocatable", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, 2 * page}, - End: []uint64{page, 3 * page}, - Values: []usageInfo{{refs: 1}, {refs: 1}}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, + {2 * page, 3 * page, usageInfo{refs: 1}}, }, fileSize: 3 * page, length: page, @@ -168,10 +156,9 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Inadequately-sized gaps are rejected", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, 2 * page}, - End: []uint64{page, 3 * page}, - Values: []usageInfo{{refs: 1}, {refs: 1}}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, + {2 * page, 3 * page, usageInfo{refs: 1}}, }, length: 2 * page, alignment: page, @@ -180,10 +167,9 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Inadequately-sized gaps are rejected", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, 2 * page}, - End: []uint64{page, 3 * page}, - Values: []usageInfo{{refs: 1}, {refs: 1}}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, + {2 * page, 3 * page, usageInfo{refs: 1}}, }, fileSize: 3 * page, length: 2 * page, @@ -193,12 +179,11 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Alignment is honored at end of file", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, hugepage + page}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, // Hugepage-sized gap here that shouldn't be allocated from // since it's incorrectly aligned. - End: []uint64{page, hugepage + 2*page}, - Values: []usageInfo{{refs: 1}, {refs: 1}}, + {hugepage + page, hugepage + 2*page, usageInfo{refs: 1}}, }, length: hugepage, alignment: hugepage, @@ -207,12 +192,11 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Alignment is honored at end of file", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, hugepage + page}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, // Hugepage-sized gap here that shouldn't be allocated from // since it's incorrectly aligned. - End: []uint64{page, hugepage + 2*page}, - Values: []usageInfo{{refs: 1}, {refs: 1}}, + {hugepage + page, hugepage + 2*page, usageInfo{refs: 1}}, }, fileSize: hugepage + 2*page, length: hugepage, @@ -222,11 +206,10 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Alignment is honored before end of file", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, 2*hugepage + page}, + usage: []usageFlatSegment{ + {0, page, usageInfo{refs: 1}}, // Page will need to be shifted down from top. - End: []uint64{page, 2*hugepage + 2*page}, - Values: []usageInfo{{refs: 1}, {refs: 1}}, + {2*hugepage + page, 2*hugepage + 2*page, usageInfo{refs: 1}}, }, fileSize: 2*hugepage + 2*page, length: hugepage, @@ -236,7 +219,6 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Allocation doubles file size more than once if necessary", - usage: &usageSegmentDataSlices{}, fileSize: page, length: 4 * page, alignment: page, @@ -245,7 +227,6 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Allocation doubles file size more than once if necessary", - usage: &usageSegmentDataSlices{}, fileSize: page, length: 4 * page, alignment: page, @@ -254,10 +235,9 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Allocations are compact if possible", - usage: &usageSegmentDataSlices{ - Start: []uint64{page, 3 * page}, - End: []uint64{2 * page, 4 * page}, - Values: []usageInfo{{refs: 1}, {refs: 2}}, + usage: []usageFlatSegment{ + {page, 2 * page, usageInfo{refs: 1}}, + {3 * page, 4 * page, usageInfo{refs: 2}}, }, fileSize: 4 * page, length: page, @@ -267,10 +247,10 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Top-down allocation within one gap", - usage: &usageSegmentDataSlices{ - Start: []uint64{page, 4 * page, 7 * page}, - End: []uint64{2 * page, 5 * page, 8 * page}, - Values: []usageInfo{{refs: 1}, {refs: 2}, {refs: 1}}, + usage: []usageFlatSegment{ + {page, 2 * page, usageInfo{refs: 1}}, + {4 * page, 5 * page, usageInfo{refs: 2}}, + {7 * page, 8 * page, usageInfo{refs: 1}}, }, fileSize: 8 * page, length: page, @@ -280,10 +260,10 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Top-down allocation between multiple gaps", - usage: &usageSegmentDataSlices{ - Start: []uint64{page, 3 * page, 5 * page}, - End: []uint64{2 * page, 4 * page, 6 * page}, - Values: []usageInfo{{refs: 1}, {refs: 2}, {refs: 1}}, + usage: []usageFlatSegment{ + {page, 2 * page, usageInfo{refs: 1}}, + {3 * page, 4 * page, usageInfo{refs: 2}}, + {5 * page, 6 * page, usageInfo{refs: 1}}, }, fileSize: 6 * page, length: page, @@ -293,10 +273,9 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Top-down allocation with large top gap", - usage: &usageSegmentDataSlices{ - Start: []uint64{page, 3 * page}, - End: []uint64{2 * page, 4 * page}, - Values: []usageInfo{{refs: 1}, {refs: 2}}, + usage: []usageFlatSegment{ + {page, 2 * page, usageInfo{refs: 1}}, + {3 * page, 4 * page, usageInfo{refs: 2}}, }, fileSize: 8 * page, length: page, @@ -306,10 +285,9 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Gaps found with possible overflow", - usage: &usageSegmentDataSlices{ - Start: []uint64{page, topPage - page}, - End: []uint64{2 * page, topPage}, - Values: []usageInfo{{refs: 1}, {refs: 1}}, + usage: []usageFlatSegment{ + {page, 2 * page, usageInfo{refs: 1}}, + {topPage - page, topPage, usageInfo{refs: 1}}, }, fileSize: topPage, length: page, @@ -319,10 +297,8 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Overflow detected", - usage: &usageSegmentDataSlices{ - Start: []uint64{page}, - End: []uint64{topPage}, - Values: []usageInfo{{refs: 1}}, + usage: []usageFlatSegment{ + {page, topPage, usageInfo{refs: 1}}, }, fileSize: topPage, length: 2 * page, @@ -332,10 +308,8 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "Overflow detected", - usage: &usageSegmentDataSlices{ - Start: []uint64{page}, - End: []uint64{topPage}, - Values: []usageInfo{{refs: 1}}, + usage: []usageFlatSegment{ + {page, topPage, usageInfo{refs: 1}}, }, fileSize: topPage, length: 2 * page, @@ -345,10 +319,9 @@ func TestFindUnallocatedRange(t *testing.T) { }, { name: "start may be in the middle of segment", - usage: &usageSegmentDataSlices{ - Start: []uint64{0, 3 * page}, - End: []uint64{2 * page, 4 * page}, - Values: []usageInfo{{refs: 1}, {refs: 2}}, + usage: []usageFlatSegment{ + {0, 2 * page, usageInfo{refs: 1}}, + {3 * page, 4 * page, usageInfo{refs: 2}}, }, length: page, alignment: page, @@ -359,7 +332,7 @@ func TestFindUnallocatedRange(t *testing.T) { name := fmt.Sprintf("%s (%v)", test.name, test.direction) t.Run(name, func(t *testing.T) { f := MemoryFile{fileSize: test.fileSize} - if err := f.usage.ImportSortedSlices(test.usage); err != nil { + if err := f.usage.ImportSlice(test.usage); err != nil { t.Fatalf("Failed to initialize usage from %v: %v", test.usage, err) } if fr, ok := f.findAvailableRange(test.length, test.alignment, test.direction); ok {