diff --git a/go.mod b/go.mod index b263b561..708b40f9 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.17 require ( github.com/go-redis/redis/v8 v8.11.5 + github.com/gogo/protobuf v1.3.2 github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.8.0 github.com/stretchr/testify v1.7.1 diff --git a/go.sum b/go.sum index a42faae6..ee27fc69 100644 --- a/go.sum +++ b/go.sum @@ -10,10 +10,14 @@ github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -28,12 +32,39 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/resource-management/cmds/service-api/app/server.go b/resource-management/cmds/service-api/app/server.go index 5ca91480..aeac81f9 100644 --- a/resource-management/cmds/service-api/app/server.go +++ b/resource-management/cmds/service-api/app/server.go @@ -11,7 +11,7 @@ import ( "global-resource-service/resource-management/pkg/aggregrator" localMetrics "global-resource-service/resource-management/pkg/common-lib/metrics" - "global-resource-service/resource-management/pkg/common-lib/types/event" + "global-resource-service/resource-management/pkg/common-lib/types" "global-resource-service/resource-management/pkg/distributor" "global-resource-service/resource-management/pkg/service-api/endpoints" "global-resource-service/resource-management/pkg/store/redis" @@ -88,7 +88,7 @@ func Run(c *Config) error { defer wg.Done() for { time.Sleep(c.EventMetricsDumpFrequency) - event.PrintLatencyReport() + types.PrintLatencyReport() } }() } diff --git a/resource-management/pkg/aggregrator/aggregator.go b/resource-management/pkg/aggregrator/aggregator.go index f95a023c..96cc5dfb 100644 --- a/resource-management/pkg/aggregrator/aggregator.go +++ b/resource-management/pkg/aggregrator/aggregator.go @@ -11,13 +11,15 @@ import ( distributor "global-resource-service/resource-management/pkg/common-lib/interfaces/distributor" "global-resource-service/resource-management/pkg/common-lib/metrics" + "global-resource-service/resource-management/pkg/common-lib/serializer" + "global-resource-service/resource-management/pkg/common-lib/serializer/protobuf" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" ) type Aggregator struct { urls []string EventProcessor distributor.Interface + serializer serializer.Serializer } // To be client of Resource Region Manager @@ -28,14 +30,6 @@ type ClientOfRRM struct { HTTPClient *http.Client } -// RRM: Resource Region Manager -// -type ResponseFromRRM struct { - RegionNodeEvents [][]*event.NodeEvent - RvMap types.TransitResourceVersionMap - Length uint64 -} - // RRM: Resource Region Manager // type PullDataFromRRM struct { @@ -55,6 +49,7 @@ func NewAggregator(urls []string, EventProcessor distributor.Interface) *Aggrega return &Aggregator{ urls: urls, EventProcessor: EventProcessor, + serializer: protobuf.NewSerializer("foo"), } } @@ -77,7 +72,7 @@ func (a *Aggregator) Run() (err error) { }() var crv types.TransitResourceVersionMap - var regionNodeEvents [][]*event.NodeEvent + var regionNodeEvents []types.RpNodeEvents var length uint64 var eventProcess bool @@ -86,20 +81,20 @@ func (a *Aggregator) Run() (err error) { klog.V(3).Infof("Starting loop pulling nodes from region: %v", a.urls[i]) for { - time.Sleep(100 * time.Millisecond) - // Call the Pull methods // when composite RV is nil, the method initPull is called; // otherwise the method subsequentPull is called. // To simplify the codes, we use one method initPullOrSubsequentPull instead + pullStarts := time.Now() regionNodeEvents, length = a.initPullOrSubsequentPull(c, DefaultBatchLength, crv) if length != 0 { - klog.V(4).Infof("Total (%v) region node events are pulled successfully in (%v) RPs", length, len(regionNodeEvents)) + pullEnds := time.Now() + klog.V(4).Infof("Total (%v) region node events are pulled successfully in (%v) RPs. pull duration %v", length, len(regionNodeEvents), pullEnds.Sub(pullStarts)) // Convert 2D array to 1D array - minRecordNodeEvents := make([]*event.NodeEvent, 0, length) + minRecordNodeEvents := make([]*types.NodeEvent, 0, length) for j := 0; j < len(regionNodeEvents); j++ { - minRecordNodeEvents = append(minRecordNodeEvents, regionNodeEvents[j]...) + minRecordNodeEvents = append(minRecordNodeEvents, regionNodeEvents[j].NodeEvents...) } klog.V(6).Infof("Total (%v) mini node events are converted successfully with length (%v)", len(minRecordNodeEvents), length) @@ -118,6 +113,9 @@ func (a *Aggregator) Run() (err error) { if eventProcess { a.postCRV(c, crv) } + } else { + // only wait for empty pulls + time.Sleep(100 * time.Millisecond) } } }(i) @@ -142,7 +140,7 @@ func (a *Aggregator) createClient(url string) *ClientOfRRM { // or // Call the resource region manager's SubsequentPull method {url}/resources/subsequentpull when crv is not nil // -func (a *Aggregator) initPullOrSubsequentPull(c *ClientOfRRM, batchLength uint64, crv types.TransitResourceVersionMap) ([][]*event.NodeEvent, uint64) { +func (a *Aggregator) initPullOrSubsequentPull(c *ClientOfRRM, batchLength uint64, crv types.TransitResourceVersionMap) ([]types.RpNodeEvents, uint64) { var path string if len(crv) == 0 { @@ -152,7 +150,7 @@ func (a *Aggregator) initPullOrSubsequentPull(c *ClientOfRRM, batchLength uint64 } bytes, _ := json.Marshal(PullDataFromRRM{BatchLength: batchLength, CRV: crv.Copy()}) - req, err := http.NewRequest(http.MethodGet, path, strings.NewReader((string(bytes)))) + req, err := http.NewRequest(http.MethodGet, path, strings.NewReader(string(bytes))) if err != nil { klog.Errorf(err.Error()) } @@ -172,21 +170,21 @@ func (a *Aggregator) initPullOrSubsequentPull(c *ClientOfRRM, batchLength uint64 return nil, 0 } - var ResponseObject ResponseFromRRM - err = json.Unmarshal(bodyBytes, &ResponseObject) - if err != nil { - klog.Errorf("Error from JSON Unmarshal:", err) + var ResponseObject types.ResponseFromRRM + _, err1 := a.serializer.Decode(bodyBytes, &ResponseObject) + if err1 != nil { + klog.Errorf("Error decode response body:", err) return nil, 0 } // log out node ids for debugging some prolonged node transitions if klog.V(9).Enabled() { for rp, rpNodes := range ResponseObject.RegionNodeEvents { - if len(rpNodes) == 0 { + if len(rpNodes.NodeEvents) == 0 { continue } - buf := make([]string, len(rpNodes)) - for i, node := range rpNodes { + buf := make([]string, len(rpNodes.NodeEvents)) + for i, node := range rpNodes.NodeEvents { buf[i] = node.Node.Id } @@ -196,9 +194,9 @@ func (a *Aggregator) initPullOrSubsequentPull(c *ClientOfRRM, batchLength uint64 if metrics.ResourceManagementMeasurement_Enabled { for i := 0; i < len(ResponseObject.RegionNodeEvents); i++ { - for j := 0; j < len(ResponseObject.RegionNodeEvents[i]); j++ { - if ResponseObject.RegionNodeEvents[i][j] != nil { - ResponseObject.RegionNodeEvents[i][j].SetCheckpoint(metrics.Aggregator_Received) + for j := 0; j < len(ResponseObject.RegionNodeEvents[i].NodeEvents); j++ { + if ResponseObject.RegionNodeEvents[i].NodeEvents[j] != nil { + ResponseObject.RegionNodeEvents[i].NodeEvents[j].SetCheckpoint(metrics.Aggregator_Received) } } } diff --git a/resource-management/pkg/clientSdk/rest/watch/decoder.go b/resource-management/pkg/clientSdk/rest/watch/decoder.go index c7465dca..f4ade289 100644 --- a/resource-management/pkg/clientSdk/rest/watch/decoder.go +++ b/resource-management/pkg/clientSdk/rest/watch/decoder.go @@ -21,7 +21,6 @@ import ( "fmt" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" ) // Decoder implements the watch.Decoder interface for io.ReadClosers that @@ -41,15 +40,15 @@ func NewDecoder(decoder *json.Decoder) *Decoder { // Decode blocks until it can return the next object in the reader. Returns an error // if the reader is closed or an object can't be decoded. -func (d *Decoder) Decode() (event.EventType, *types.LogicalNode, error) { - var got event.NodeEvent +func (d *Decoder) Decode() (types.EventType, *types.LogicalNode, error) { + var got types.NodeEvent err := d.decoder.Decode(&got) if err != nil { return "", nil, err } switch got.Type { - case event.Added, event.Modified, event.Deleted, event.Error, event.Bookmark: + case types.Added, types.Modified, types.Deleted, types.Error, types.Bookmark: default: return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type) } diff --git a/resource-management/pkg/clientSdk/rmsclient/rmsClient.go b/resource-management/pkg/clientSdk/rmsclient/rmsClient.go index 1feaa998..33bed673 100644 --- a/resource-management/pkg/clientSdk/rmsclient/rmsClient.go +++ b/resource-management/pkg/clientSdk/rmsclient/rmsClient.go @@ -134,7 +134,7 @@ func (c *rmsClient) List(clientId string, opts ListOptions) ([]*types.LogicalNod req = req.Name(c.Id) req = req.Timeout(c.config.RequestTimeout) req = req.Param("limit", strconv.Itoa(opts.Limit)) - + respRet, err := req.DoRaw() if err != nil { return nil, nil, err diff --git a/resource-management/pkg/clientSdk/watch/streamwatcher.go b/resource-management/pkg/clientSdk/watch/streamwatcher.go index 99228bff..5ec51267 100644 --- a/resource-management/pkg/clientSdk/watch/streamwatcher.go +++ b/resource-management/pkg/clientSdk/watch/streamwatcher.go @@ -19,7 +19,6 @@ package watch import ( "fmt" - "global-resource-service/resource-management/pkg/common-lib/types/event" "io" "k8s.io/klog/v2" "sync" @@ -34,7 +33,7 @@ type Decoder interface { // Decode should return the type of event, the decoded object, or an error. // An error will cause StreamWatcher to call Close(). Decode should block until // it has data or an error occurs. - Decode() (action event.EventType, object *types.LogicalNode, err error) + Decode() (action types.EventType, object *types.LogicalNode, err error) // Close should close the underlying io.Reader, signalling to the source of // the stream that it is no longer being watched. Close() must cause any @@ -55,7 +54,7 @@ type StreamWatcher struct { sync.Mutex source Decoder reporter Reporter - result chan event.NodeEvent + result chan types.NodeEvent stopped bool } @@ -67,14 +66,14 @@ func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher { // It's easy for a consumer to add buffering via an extra // goroutine/channel, but impossible for them to remove it, // so nonbuffered is better. - result: make(chan event.NodeEvent), + result: make(chan types.NodeEvent), } go sw.receive() return sw } // ResultChan implements Interface. -func (sw *StreamWatcher) ResultChan() <-chan event.NodeEvent { +func (sw *StreamWatcher) ResultChan() <-chan types.NodeEvent { return sw.result } @@ -117,15 +116,15 @@ func (sw *StreamWatcher) receive() { if net.IsProbableEOF(err) || net.IsTimeout(err) { klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err) } else { - sw.result <- event.NodeEvent{ - Type: event.Error, + sw.result <- types.NodeEvent{ + Type: types.Error, Node: sw.reporter.AsObject(fmt.Errorf("unable to decode an event from the watch stream: %v", err)), } } } return } - sw.result <- event.NodeEvent{ + sw.result <- types.NodeEvent{ Type: action, Node: obj, } diff --git a/resource-management/pkg/clientSdk/watch/watch.go b/resource-management/pkg/clientSdk/watch/watch.go index 44f325e8..aa2ae273 100644 --- a/resource-management/pkg/clientSdk/watch/watch.go +++ b/resource-management/pkg/clientSdk/watch/watch.go @@ -1,7 +1,7 @@ package watch import ( - "global-resource-service/resource-management/pkg/common-lib/types/event" + "global-resource-service/resource-management/pkg/common-lib/types" ) // Interface can be implemented by anything that knows how to watch and report changes. @@ -13,16 +13,16 @@ type Interface interface { // ResultChan returns a chan which will receive all the events. If an error occurs // or Stop() is called, the implementation will close this channel and // release any resources used by the watch. - ResultChan() <-chan event.NodeEvent + ResultChan() <-chan types.NodeEvent } -type emptyWatch chan event.NodeEvent +type emptyWatch chan types.NodeEvent // NewEmptyWatch returns a watch interface that returns no results and is closed. // May be used in certain error conditions where no information is available but // an error is not warranted. func NewEmptyWatch() Interface { - ch := make(chan event.NodeEvent) + ch := make(chan types.NodeEvent) close(ch) return emptyWatch(ch) } @@ -32,6 +32,6 @@ func (w emptyWatch) Stop() { } // ResultChan implements Interface -func (w emptyWatch) ResultChan() <-chan event.NodeEvent { - return chan event.NodeEvent(w) +func (w emptyWatch) ResultChan() <-chan types.NodeEvent { + return chan types.NodeEvent(w) } diff --git a/resource-management/pkg/common-lib/framer/framer.go b/resource-management/pkg/common-lib/framer/framer.go new file mode 100644 index 00000000..24c7f300 --- /dev/null +++ b/resource-management/pkg/common-lib/framer/framer.go @@ -0,0 +1,171 @@ +/* +Copyright 2015 The Kubernetes Authors. +Copyright 2022 Authors of Arktos - file modified. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package framer implements simple frame decoding techniques for an io.ReadCloser +package framer + +import ( + "encoding/binary" + "encoding/json" + "io" +) + +type lengthDelimitedFrameWriter struct { + w io.Writer + h [4]byte +} + +func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer { + return &lengthDelimitedFrameWriter{w: w} +} + +// Write writes a single frame to the nested writer, prepending it with the length in +// in bytes of data (as a 4 byte, bigendian uint32). +func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) { + binary.BigEndian.PutUint32(w.h[:], uint32(len(data))) + n, err := w.w.Write(w.h[:]) + if err != nil { + return 0, err + } + if n != len(w.h) { + return 0, io.ErrShortWrite + } + return w.w.Write(data) +} + +type lengthDelimitedFrameReader struct { + r io.ReadCloser + remaining int +} + +// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed +// frames off of a stream. +// +// The protocol is: +// +// stream: message ... +// message: prefix body +// prefix: 4 byte uint32 in BigEndian order, denotes length of body +// body: bytes (0..prefix) +// +// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead +// will be returned along with the number of bytes read. +func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser { + return &lengthDelimitedFrameReader{r: r} +} + +// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer +// is returned and subsequent calls will attempt to read the last frame. A frame is complete when +// err is nil. +func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) { + if r.remaining <= 0 { + header := [4]byte{} + n, err := io.ReadAtLeast(r.r, header[:4], 4) + if err != nil { + return 0, err + } + if n != 4 { + return 0, io.ErrUnexpectedEOF + } + frameLength := int(binary.BigEndian.Uint32(header[:])) + r.remaining = frameLength + } + + expect := r.remaining + max := expect + if max > len(data) { + max = len(data) + } + n, err := io.ReadAtLeast(r.r, data[:max], int(max)) + r.remaining -= n + if err == io.ErrShortBuffer || r.remaining > 0 { + return n, io.ErrShortBuffer + } + if err != nil { + return n, err + } + if n != expect { + return n, io.ErrUnexpectedEOF + } + + return n, nil +} + +func (r *lengthDelimitedFrameReader) Close() error { + return r.r.Close() +} + +type jsonFrameReader struct { + r io.ReadCloser + decoder *json.Decoder + remaining []byte +} + +// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off +// of a wire. +// +// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate +// the read. +func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser { + return &jsonFrameReader{ + r: r, + decoder: json.NewDecoder(r), + } +} + +// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned +// byte slice will be modified the next time ReadFrame is invoked and should not be altered. +func (r *jsonFrameReader) Read(data []byte) (int, error) { + // Return whatever remaining data exists from an in progress frame + if n := len(r.remaining); n > 0 { + if n <= len(data) { + //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. + data = append(data[0:0], r.remaining...) + r.remaining = nil + return n, nil + } + + n = len(data) + //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. + data = append(data[0:0], r.remaining[:n]...) + r.remaining = r.remaining[n:] + return n, io.ErrShortBuffer + } + + // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see + // data written to data, or be larger than data and a different array. + n := len(data) + m := json.RawMessage(data[:0]) + if err := r.decoder.Decode(&m); err != nil { + return 0, err + } + + // If capacity of data is less than length of the message, decoder will allocate a new slice + // and set m to it, which means we need to copy the partial result back into data and preserve + // the remaining result for subsequent reads. + if len(m) > n { + //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. + data = append(data[0:0], m[:n]...) + r.remaining = m[n:] + return n, io.ErrShortBuffer + } + return len(m), nil +} + +func (r *jsonFrameReader) Close() error { + return r.r.Close() +} diff --git a/resource-management/pkg/common-lib/framer/framer_test.go b/resource-management/pkg/common-lib/framer/framer_test.go new file mode 100644 index 00000000..bf42c15f --- /dev/null +++ b/resource-management/pkg/common-lib/framer/framer_test.go @@ -0,0 +1,177 @@ +/* +Copyright 2016 The Kubernetes Authors. +Copyright 2022 Authors of Arktos - file modified. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framer + +import ( + "bytes" + "io" + "io/ioutil" + "testing" +) + +func TestRead(t *testing.T) { + data := []byte{ + 0x00, 0x00, 0x00, 0x04, + 0x01, 0x02, 0x03, 0x04, + 0x00, 0x00, 0x00, 0x03, + 0x05, 0x06, 0x07, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x08, + } + b := bytes.NewBuffer(data) + r := NewLengthDelimitedFrameReader(ioutil.NopCloser(b)) + buf := make([]byte, 1) + if n, err := r.Read(buf); err != io.ErrShortBuffer && n != 1 && bytes.Equal(buf, []byte{0x01}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + if n, err := r.Read(buf); err != io.ErrShortBuffer && n != 1 && bytes.Equal(buf, []byte{0x02}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read the remaining frame + buf = make([]byte, 2) + if n, err := r.Read(buf); err != nil && n != 2 && bytes.Equal(buf, []byte{0x03, 0x04}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read with buffer equal to frame + buf = make([]byte, 3) + if n, err := r.Read(buf); err != nil && n != 3 && bytes.Equal(buf, []byte{0x05, 0x06, 0x07}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read empty frame + buf = make([]byte, 3) + if n, err := r.Read(buf); err != nil && n != 0 && bytes.Equal(buf, []byte{}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read with larger buffer than frame + buf = make([]byte, 3) + if n, err := r.Read(buf); err != nil && n != 1 && bytes.Equal(buf, []byte{0x08}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read EOF + if n, err := r.Read(buf); err != io.EOF && n != 0 { + t.Fatalf("unexpected: %v %d", err, n) + } +} + +func TestReadLarge(t *testing.T) { + data := []byte{ + 0x00, 0x00, 0x00, 0x04, + 0x01, 0x02, 0x03, 0x04, + 0x00, 0x00, 0x00, 0x03, + 0x05, 0x06, 0x07, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x08, + } + b := bytes.NewBuffer(data) + r := NewLengthDelimitedFrameReader(ioutil.NopCloser(b)) + buf := make([]byte, 40) + if n, err := r.Read(buf); err != nil && n != 4 && bytes.Equal(buf, []byte{0x01, 0x02, 0x03, 0x04}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + if n, err := r.Read(buf); err != nil && n != 3 && bytes.Equal(buf, []byte{0x05, 0x06, 0x7}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + if n, err := r.Read(buf); err != nil && n != 0 && bytes.Equal(buf, []byte{}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + if n, err := r.Read(buf); err != nil && n != 1 && bytes.Equal(buf, []byte{0x08}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read EOF + if n, err := r.Read(buf); err != io.EOF && n != 0 { + t.Fatalf("unexpected: %v %d", err, n) + } +} +func TestReadInvalidFrame(t *testing.T) { + data := []byte{ + 0x00, 0x00, 0x00, 0x04, + 0x01, 0x02, + } + b := bytes.NewBuffer(data) + r := NewLengthDelimitedFrameReader(ioutil.NopCloser(b)) + buf := make([]byte, 1) + if n, err := r.Read(buf); err != io.ErrShortBuffer && n != 1 && bytes.Equal(buf, []byte{0x01}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read the remaining frame + buf = make([]byte, 3) + if n, err := r.Read(buf); err != io.ErrUnexpectedEOF && n != 1 && bytes.Equal(buf, []byte{0x02}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read EOF + if n, err := r.Read(buf); err != io.EOF && n != 0 { + t.Fatalf("unexpected: %v %d", err, n) + } +} + +func TestJSONFrameReader(t *testing.T) { + b := bytes.NewBufferString("{\"test\":true}\n1\n[\"a\"]") + r := NewJSONFramedReader(ioutil.NopCloser(b)) + buf := make([]byte, 20) + if n, err := r.Read(buf); err != nil || n != 13 || string(buf[:n]) != `{"test":true}` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != nil || n != 1 || string(buf[:n]) != `1` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != nil || n != 5 || string(buf[:n]) != `["a"]` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != io.EOF || n != 0 { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } +} + +func TestJSONFrameReaderShortBuffer(t *testing.T) { + b := bytes.NewBufferString("{\"test\":true}\n1\n[\"a\"]") + r := NewJSONFramedReader(ioutil.NopCloser(b)) + buf := make([]byte, 3) + + if n, err := r.Read(buf); err != io.ErrShortBuffer || n != 3 || string(buf[:n]) != `{"t` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != io.ErrShortBuffer || n != 3 || string(buf[:n]) != `est` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != io.ErrShortBuffer || n != 3 || string(buf[:n]) != `":t` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != io.ErrShortBuffer || n != 3 || string(buf[:n]) != `rue` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != nil || n != 1 || string(buf[:n]) != `}` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + + if n, err := r.Read(buf); err != nil || n != 1 || string(buf[:n]) != `1` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + + if n, err := r.Read(buf); err != io.ErrShortBuffer || n != 3 || string(buf[:n]) != `["a` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != nil || n != 2 || string(buf[:n]) != `"]` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + + if n, err := r.Read(buf); err != io.EOF || n != 0 { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } +} diff --git a/resource-management/pkg/common-lib/interfaces/distributor/interfaces.go b/resource-management/pkg/common-lib/interfaces/distributor/interfaces.go index d0527404..2d25a3b6 100644 --- a/resource-management/pkg/common-lib/interfaces/distributor/interfaces.go +++ b/resource-management/pkg/common-lib/interfaces/distributor/interfaces.go @@ -2,13 +2,12 @@ package distributor import ( "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" ) type Interface interface { RegisterClient(*types.Client) error ListNodesForClient(clientId string) ([]*types.LogicalNode, types.TransitResourceVersionMap, error) - Watch(clientId string, rvs types.TransitResourceVersionMap, watchChan chan *event.NodeEvent, stopCh chan struct{}) error - ProcessEvents(events []*event.NodeEvent) (bool, types.TransitResourceVersionMap) + Watch(clientId string, rvs types.TransitResourceVersionMap, watchChan chan *types.NodeEvent, stopCh chan struct{}) error + ProcessEvents(events []*types.NodeEvent) (bool, types.TransitResourceVersionMap) } diff --git a/resource-management/pkg/common-lib/interfaces/store/interface.go b/resource-management/pkg/common-lib/interfaces/store/interface.go index c3efc109..b28ba5e5 100644 --- a/resource-management/pkg/common-lib/interfaces/store/interface.go +++ b/resource-management/pkg/common-lib/interfaces/store/interface.go @@ -2,7 +2,6 @@ package store import ( "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/location" ) const ( @@ -60,5 +59,5 @@ func (assignment *VirtualNodeAssignment) GetKey() string { type VirtualNodeConfig struct { Lowerbound float64 Upperbound float64 - Location location.Location + Location types.Location } diff --git a/resource-management/pkg/common-lib/serializer/interfaces.go b/resource-management/pkg/common-lib/serializer/interfaces.go new file mode 100644 index 00000000..aa8b61d5 --- /dev/null +++ b/resource-management/pkg/common-lib/serializer/interfaces.go @@ -0,0 +1,74 @@ +/* +Copyright 2014 The Kubernetes Authors. +Copyright 2022 Authors of Arktos - file modified. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "io" +) + +// MemoryAllocator is responsible for allocating memory. +// By encapsulating memory allocation into its own interface, we can reuse the memory +// across many operations in places we know it can significantly improve the performance. +type MemoryAllocator interface { + // Allocate reserves memory for n bytes. + // Note that implementations of this method are not required to zero the returned array. + // It is the caller's responsibility to clean the memory if needed. + Allocate(n uint64) []byte +} + +// SimpleAllocator a wrapper around make([]byte) +// conforms to the MemoryAllocator interface +type SimpleAllocator struct{} + +var _ MemoryAllocator = &SimpleAllocator{} + +func (sa *SimpleAllocator) Allocate(n uint64) []byte { + return make([]byte, n, n) +} + +type ObjectTyper string + +// Identifier represents an identifier. +// Identitier of two different objects should be equal if and only if for every +// input the output they produce is exactly the same. +type Identifier string + +// Encoder writes objects to a serialized form +type Encoder interface { + Encode(interface{}, io.Writer) error + Identifier() Identifier +} + +// Decoder attempts to load an object from data. +type Decoder interface { + Decode([]byte, interface{}) (interface{}, error) +} + +// Serializer is the core interface for transforming objects into a serialized format and back. +// Implementations may choose to perform conversion of the object, but no assumptions should be made. +type Serializer interface { + Encoder + Decoder + Marshal(interface{}) ([]byte, error) +} + +// Framer is a factory for creating readers and writers that obey a particular framing pattern. +type Framer interface { + NewFrameReader(r io.ReadCloser) io.ReadCloser + NewFrameWriter(w io.Writer) io.Writer +} diff --git a/resource-management/pkg/common-lib/serializer/json/json.go b/resource-management/pkg/common-lib/serializer/json/json.go new file mode 100644 index 00000000..29af2c7c --- /dev/null +++ b/resource-management/pkg/common-lib/serializer/json/json.go @@ -0,0 +1,141 @@ +/* +Copyright 2015 The Kubernetes Authors. +Copyright 2022 Authors of Arktos - file modified. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/json" + "io" + "strconv" + + "k8s.io/klog/v2" + + "global-resource-service/resource-management/pkg/common-lib/serializer" +) + +func NewSerializer(typer serializer.ObjectTyper, pretty bool) *Serializer { + return NewSerializerWithOptions(typer, SerializerOptions{false, false}) +} + +// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML +// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer +// and are immutable. +func NewSerializerWithOptions(typer serializer.ObjectTyper, options SerializerOptions) *Serializer { + return &Serializer{ + typer: typer, + options: options, + identifier: identifier(options), + } +} + +// identifier computes Identifier of Encoder based on the given options. +func identifier(options SerializerOptions) serializer.Identifier { + result := map[string]string{ + "name": "json", + "pretty": strconv.FormatBool(options.Pretty), + "strict": strconv.FormatBool(options.Strict), + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for json Serializer: %v", err) + } + return serializer.Identifier(identifier) +} + +type SerializerOptions struct { + // Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output. + Pretty bool + + // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML. + // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths. + Strict bool +} + +// Serializer handles encoding versioned objects into the proper JSON form +type Serializer struct { + options SerializerOptions + typer serializer.ObjectTyper + identifier serializer.Identifier +} + +func (s *Serializer) Decode(data []byte, into interface{}) (interface{}, error) { + + err := s.Unmarshal(data, &into) + + return into, err +} + +// Encode serializes the provided object to the given writer. +func (s *Serializer) Encode(obj interface{}, w io.Writer) error { + return s.doEncode(obj, w) +} + +func (s *Serializer) doEncode(obj interface{}, w io.Writer) error { + if s.options.Pretty { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + encoder := json.NewEncoder(w) + return encoder.Encode(obj) +} + +// IsStrict indicates whether the serializer +// uses strict decoding or not +func (s *Serializer) IsStrict() bool { + return s.options.Strict +} + +func (s *Serializer) Unmarshal(data []byte, into *interface{}) (err error) { + return json.Unmarshal(data, into) +} + +func (s *Serializer) Marshal(obj interface{}) (b []byte, err error) { + b, err = json.Marshal(obj) + + if err != nil { + klog.Errorf("failed to marshal object, error %v", err) + return nil, err + } + + return b, nil +} + +// Identifier implements serializer.Encoder interface. +func (s *Serializer) Identifier() serializer.Identifier { + return s.identifier +} + +//// Framer is the default JSON framing behavior, with newlines delimiting individual objects. +//var Framer = jsonFramer{} +// +//type jsonFramer struct{} +// +//// NewFrameWriter implements stream framing for this serializer +//func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer { +// // we can write JSON objects directly to the writer, because they are self-framing +// return w +//} +// +//// NewFrameReader implements stream framing for this serializer +//func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { +// // we need to extract the JSON chunks of data to pass to Decode() +// return framer.NewJSONFramedReader(r) +//} diff --git a/resource-management/pkg/common-lib/serializer/protobuf/protobuf.go b/resource-management/pkg/common-lib/serializer/protobuf/protobuf.go new file mode 100644 index 00000000..6e5c71fa --- /dev/null +++ b/resource-management/pkg/common-lib/serializer/protobuf/protobuf.go @@ -0,0 +1,113 @@ +/* +Copyright 2015 The Kubernetes Authors. +Copyright 2022 Authors of Arktos - file modified. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "fmt" + "io" + "reflect" + + "github.com/gogo/protobuf/proto" + "k8s.io/klog/v2" + + "global-resource-service/resource-management/pkg/common-lib/serializer" +) + +type errNotMarshalable struct { + t reflect.Type +} + +func (e errNotMarshalable) Error() string { + return fmt.Sprintf("object %v does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", e.t) +} + +// IsNotMarshalable checks the type of error, returns a boolean true if error is not nil and not marshalable false otherwise +func IsNotMarshalable(err error) bool { + _, ok := err.(errNotMarshalable) + return err != nil && ok +} + +// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer +// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written +// as-is (any type info passed with the object will be used). +func NewSerializer(typer serializer.ObjectTyper) *Serializer { + return &Serializer{ + typer: typer, + } +} + +// Serializer handles encoding versioned objects into the proper wire form +type Serializer struct { + prefix []byte + typer serializer.ObjectTyper +} + +var _ serializer.Serializer = &Serializer{} + +const serializerIdentifier serializer.Identifier = "protobuf" + +// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default +// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *serializer.Unknown, +// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will +// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is +// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most +// errors, the method will return the calculated schema kind. +func (s *Serializer) Decode(data []byte, into interface{}) (interface{}, error) { + + err := proto.Unmarshal(data, into.(proto.Message)) + + if err != nil { + return nil, fmt.Errorf("failed to unmarshal data") + } + + return into, nil +} + +// Encode serializes the provided object to the given writer. +func (s *Serializer) Encode(obj interface{}, w io.Writer) error { + b, err := proto.Marshal(obj.(proto.Message)) + + if err != nil { + klog.Errorf("failed to marshal object, error %v", err) + return err + } + + _, err = w.Write(b) + if err != nil { + klog.Errorf("failed to write response, error %v", err) + return err + } + + return nil +} + +func (s *Serializer) Marshal(obj interface{}) (b []byte, err error) { + b, err = proto.Marshal(obj.(proto.Message)) + + if err != nil { + klog.Errorf("failed to marshal object, error %v", err) + return nil, err + } + + return b, nil +} + +// Identifier implements serializer.Encoder interface. +func (s *Serializer) Identifier() serializer.Identifier { + return serializerIdentifier +} diff --git a/resource-management/pkg/common-lib/types/compositeresourceversion.go b/resource-management/pkg/common-lib/types/compositeresourceversion.go index b9acce4c..eadef0b9 100644 --- a/resource-management/pkg/common-lib/types/compositeresourceversion.go +++ b/resource-management/pkg/common-lib/types/compositeresourceversion.go @@ -2,8 +2,6 @@ package types import ( "encoding/json" - - "global-resource-service/resource-management/pkg/common-lib/types/location" ) type CompositeResourceVersion struct { @@ -12,9 +10,12 @@ type CompositeResourceVersion struct { ResourceVersion uint64 } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +//RvLocation is used in rv map for rest apis type RvLocation struct { - Region location.Region - Partition location.ResourcePartition + Region Region + Partition ResourcePartition } func (loc RvLocation) MarshalText() (text []byte, err error) { @@ -27,18 +28,20 @@ func (loc *RvLocation) UnmarshalText(text []byte) error { return json.Unmarshal(text, (*l)(loc)) } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // Map from (regionId, ResourcePartitionId) to resourceVersion // used in REST API calls type TransitResourceVersionMap map[RvLocation]uint64 // internally used in the eventqueue used in WATCH of nodes -type InternalResourceVersionMap map[location.Location]uint64 +type InternalResourceVersionMap map[Location]uint64 func ConvertToInternalResourceVersionMap(rvs TransitResourceVersionMap) InternalResourceVersionMap { internalMap := make(InternalResourceVersionMap) for k, v := range rvs { - internalMap[*location.NewLocation(k.Region, k.Partition)] = v + internalMap[*NewLocation(k.Region, k.Partition)] = v } return internalMap diff --git a/resource-management/pkg/common-lib/types/compositeresourceversion_test.go b/resource-management/pkg/common-lib/types/compositeresourceversion_test.go index 3b6d25cc..5ef329b6 100644 --- a/resource-management/pkg/common-lib/types/compositeresourceversion_test.go +++ b/resource-management/pkg/common-lib/types/compositeresourceversion_test.go @@ -3,13 +3,12 @@ package types import ( "encoding/json" "github.com/stretchr/testify/assert" - "global-resource-service/resource-management/pkg/common-lib/types/location" "testing" ) func TestResourceVersionMap_Marshall_UnMarshall(t *testing.T) { rvs := make(TransitResourceVersionMap) - loc := RvLocation{Region: location.Beijing, Partition: location.ResourcePartition1} + loc := RvLocation{Region: Beijing, Partition: ResourcePartition1} rvs[loc] = 100 // marshall diff --git a/resource-management/pkg/common-lib/types/errors.go b/resource-management/pkg/common-lib/types/errors.go index 93a9411d..8836d286 100644 --- a/resource-management/pkg/common-lib/types/errors.go +++ b/resource-management/pkg/common-lib/types/errors.go @@ -22,4 +22,4 @@ var Error_ClientIdExisted = errors.New(ErrMsg_ClientIdExisted) var Error_FailedToProcessBookmarkEvent = errors.New(ErrMsg_FailedToProcessBookmarkEvent) -var Error_EndOfEventQueue = errors.New(ErrMsg_EndOfEventQueue) \ No newline at end of file +var Error_EndOfEventQueue = errors.New(ErrMsg_EndOfEventQueue) diff --git a/resource-management/pkg/common-lib/types/event/event.go b/resource-management/pkg/common-lib/types/event.go similarity index 69% rename from resource-management/pkg/common-lib/types/event/event.go rename to resource-management/pkg/common-lib/types/event.go index b8c9981c..52ef7182 100644 --- a/resource-management/pkg/common-lib/types/event/event.go +++ b/resource-management/pkg/common-lib/types/event.go @@ -1,15 +1,11 @@ -package event +package types import ( "time" "global-resource-service/resource-management/pkg/common-lib/metrics" - "global-resource-service/resource-management/pkg/common-lib/types" ) -// EventType defines the possible types of events. -type EventType string - const ( Added EventType = "ADDED" Modified EventType = "MODIFIED" @@ -18,13 +14,7 @@ const ( Error EventType = "ERROR" ) -type NodeEvent struct { - Type EventType - Node *types.LogicalNode - checkpoints []time.Time -} - -func NewNodeEvent(node *types.LogicalNode, eventType EventType) *NodeEvent { +func NewNodeEvent(node *LogicalNode, eventType EventType) *NodeEvent { return &NodeEvent{ Type: eventType, Node: node, diff --git a/resource-management/pkg/common-lib/types/event/event_metrics.go b/resource-management/pkg/common-lib/types/event_metrics.go similarity index 99% rename from resource-management/pkg/common-lib/types/event/event_metrics.go rename to resource-management/pkg/common-lib/types/event_metrics.go index f9498aee..a6efa95d 100644 --- a/resource-management/pkg/common-lib/types/event/event_metrics.go +++ b/resource-management/pkg/common-lib/types/event_metrics.go @@ -1,4 +1,4 @@ -package event +package types import ( "k8s.io/klog/v2" @@ -40,7 +40,7 @@ func AddLatencyMetricsAllCheckpoints(e *NodeEvent) { if checkpointsPerEvent == nil { klog.Errorf("Event (%v, Id %s, RV %s) does not have checkpoint stamped", e.Type, e.Node.Id, e.Node.ResourceVersion) } - lastUpdatedTime := e.Node.LastUpdatedTime + lastUpdatedTime := e.Node.LastUpdatedTime.Time agg_received_time := checkpointsPerEvent[metrics.Aggregator_Received] dis_received_time := checkpointsPerEvent[metrics.Distributor_Received] diff --git a/resource-management/pkg/common-lib/types/event/event_metrics_test.go b/resource-management/pkg/common-lib/types/event_metrics_test.go similarity index 77% rename from resource-management/pkg/common-lib/types/event/event_metrics_test.go rename to resource-management/pkg/common-lib/types/event_metrics_test.go index 3a576884..2b6ce22a 100644 --- a/resource-management/pkg/common-lib/types/event/event_metrics_test.go +++ b/resource-management/pkg/common-lib/types/event_metrics_test.go @@ -1,4 +1,4 @@ -package event +package types import ( "github.com/google/uuid" @@ -8,11 +8,9 @@ import ( "time" "global-resource-service/resource-management/pkg/common-lib/metrics" - "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/location" ) -var defaultLocBeijing_RP1 = location.NewLocation(location.Beijing, location.ResourcePartition1) +var defaultLocBeijing_RP1 = NewLocation(Beijing, ResourcePartition1) var rvToGenerate = 0 func Test_PrintLatencyReport(t *testing.T) { @@ -29,16 +27,16 @@ func Test_PrintLatencyReport(t *testing.T) { PrintLatencyReport() } -func createRandomNode(rv int, loc *location.Location) *types.LogicalNode { +func createRandomNode(rv int, loc *Location) *LogicalNode { id := uuid.New() - return &types.LogicalNode{ + return &LogicalNode{ Id: id.String(), ResourceVersion: strconv.Itoa(rv), - GeoInfo: types.NodeGeoInfo{ - Region: types.RegionName(loc.GetRegion()), - ResourcePartition: types.ResourcePartitionName(loc.GetResourcePartition()), + GeoInfo: NodeGeoInfo{ + Region: RegionName(loc.GetRegion()), + ResourcePartition: ResourcePartitionName(loc.GetResourcePartition()), }, - LastUpdatedTime: time.Now().UTC(), + LastUpdatedTime: NewTime(time.Now().UTC()), } } diff --git a/resource-management/pkg/common-lib/types/generated.pb.go b/resource-management/pkg/common-lib/types/generated.pb.go new file mode 100644 index 00000000..3dc4aba1 --- /dev/null +++ b/resource-management/pkg/common-lib/types/generated.pb.go @@ -0,0 +1,2580 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/global-resource-service/resource-management/pkg/common-lib/types/generated.proto + +package types + +import ( + fmt "fmt" + + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *LogicalNode) Reset() { *m = LogicalNode{} } +func (*LogicalNode) ProtoMessage() {} +func (*LogicalNode) Descriptor() ([]byte, []int) { + return fileDescriptor_0e6ff26d2a95106f, []int{0} +} +func (m *LogicalNode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LogicalNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LogicalNode) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogicalNode.Merge(m, src) +} +func (m *LogicalNode) XXX_Size() int { + return m.Size() +} +func (m *LogicalNode) XXX_DiscardUnknown() { + xxx_messageInfo_LogicalNode.DiscardUnknown(m) +} + +var xxx_messageInfo_LogicalNode proto.InternalMessageInfo + +func (m *NodeEvent) Reset() { *m = NodeEvent{} } +func (*NodeEvent) ProtoMessage() {} +func (*NodeEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_0e6ff26d2a95106f, []int{1} +} +func (m *NodeEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeEvent.Merge(m, src) +} +func (m *NodeEvent) XXX_Size() int { + return m.Size() +} +func (m *NodeEvent) XXX_DiscardUnknown() { + xxx_messageInfo_NodeEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeEvent proto.InternalMessageInfo + +func (m *NodeGeoInfo) Reset() { *m = NodeGeoInfo{} } +func (*NodeGeoInfo) ProtoMessage() {} +func (*NodeGeoInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_0e6ff26d2a95106f, []int{2} +} +func (m *NodeGeoInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeGeoInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeGeoInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGeoInfo.Merge(m, src) +} +func (m *NodeGeoInfo) XXX_Size() int { + return m.Size() +} +func (m *NodeGeoInfo) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGeoInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGeoInfo proto.InternalMessageInfo + +func (m *NodeResource) Reset() { *m = NodeResource{} } +func (*NodeResource) ProtoMessage() {} +func (*NodeResource) Descriptor() ([]byte, []int) { + return fileDescriptor_0e6ff26d2a95106f, []int{3} +} +func (m *NodeResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeResource.Merge(m, src) +} +func (m *NodeResource) XXX_Size() int { + return m.Size() +} +func (m *NodeResource) XXX_DiscardUnknown() { + xxx_messageInfo_NodeResource.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeResource proto.InternalMessageInfo + +func (m *NodeSpecialHardWareTypeInfo) Reset() { *m = NodeSpecialHardWareTypeInfo{} } +func (*NodeSpecialHardWareTypeInfo) ProtoMessage() {} +func (*NodeSpecialHardWareTypeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_0e6ff26d2a95106f, []int{4} +} +func (m *NodeSpecialHardWareTypeInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSpecialHardWareTypeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeSpecialHardWareTypeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSpecialHardWareTypeInfo.Merge(m, src) +} +func (m *NodeSpecialHardWareTypeInfo) XXX_Size() int { + return m.Size() +} +func (m *NodeSpecialHardWareTypeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSpecialHardWareTypeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeSpecialHardWareTypeInfo proto.InternalMessageInfo + +func (m *NodeTaints) Reset() { *m = NodeTaints{} } +func (*NodeTaints) ProtoMessage() {} +func (*NodeTaints) Descriptor() ([]byte, []int) { + return fileDescriptor_0e6ff26d2a95106f, []int{5} +} +func (m *NodeTaints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeTaints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeTaints) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeTaints.Merge(m, src) +} +func (m *NodeTaints) XXX_Size() int { + return m.Size() +} +func (m *NodeTaints) XXX_DiscardUnknown() { + xxx_messageInfo_NodeTaints.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeTaints proto.InternalMessageInfo + +func (m *ResponseFromRRM) Reset() { *m = ResponseFromRRM{} } +func (*ResponseFromRRM) ProtoMessage() {} +func (*ResponseFromRRM) Descriptor() ([]byte, []int) { + return fileDescriptor_0e6ff26d2a95106f, []int{6} +} +func (m *ResponseFromRRM) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseFromRRM) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResponseFromRRM) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseFromRRM.Merge(m, src) +} +func (m *ResponseFromRRM) XXX_Size() int { + return m.Size() +} +func (m *ResponseFromRRM) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseFromRRM.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseFromRRM proto.InternalMessageInfo + +func (m *RpNodeEvents) Reset() { *m = RpNodeEvents{} } +func (*RpNodeEvents) ProtoMessage() {} +func (*RpNodeEvents) Descriptor() ([]byte, []int) { + return fileDescriptor_0e6ff26d2a95106f, []int{7} +} +func (m *RpNodeEvents) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RpNodeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RpNodeEvents) XXX_Merge(src proto.Message) { + xxx_messageInfo_RpNodeEvents.Merge(m, src) +} +func (m *RpNodeEvents) XXX_Size() int { + return m.Size() +} +func (m *RpNodeEvents) XXX_DiscardUnknown() { + xxx_messageInfo_RpNodeEvents.DiscardUnknown(m) +} + +var xxx_messageInfo_RpNodeEvents proto.InternalMessageInfo + +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { + return fileDescriptor_0e6ff26d2a95106f, []int{8} +} +func (m *Time) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Time.Unmarshal(m, b) +} +func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Time.Marshal(b, m, deterministic) +} +func (m *Time) XXX_Merge(src proto.Message) { + xxx_messageInfo_Time.Merge(m, src) +} +func (m *Time) XXX_Size() int { + return xxx_messageInfo_Time.Size(m) +} +func (m *Time) XXX_DiscardUnknown() { + xxx_messageInfo_Time.DiscardUnknown(m) +} + +var xxx_messageInfo_Time proto.InternalMessageInfo + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_0e6ff26d2a95106f, []int{9} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func init() { + proto.RegisterType((*LogicalNode)(nil), "k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types.LogicalNode") + proto.RegisterType((*NodeEvent)(nil), "k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types.NodeEvent") + proto.RegisterType((*NodeGeoInfo)(nil), "k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types.NodeGeoInfo") + proto.RegisterType((*NodeResource)(nil), "k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types.NodeResource") + proto.RegisterMapType((map[ResourceName]int64)(nil), "k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types.NodeResource.ScalarResourcesEntry") + proto.RegisterType((*NodeSpecialHardWareTypeInfo)(nil), "k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types.NodeSpecialHardWareTypeInfo") + proto.RegisterType((*NodeTaints)(nil), "k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types.NodeTaints") + proto.RegisterType((*ResponseFromRRM)(nil), "k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types.ResponseFromRRM") + proto.RegisterType((*RpNodeEvents)(nil), "k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types.RpNodeEvents") + proto.RegisterType((*Time)(nil), "k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types.Time") + proto.RegisterType((*Timestamp)(nil), "k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types.Timestamp") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/global-resource-service/resource-management/pkg/common-lib/types/generated.proto", fileDescriptor_0e6ff26d2a95106f) +} + +var fileDescriptor_0e6ff26d2a95106f = []byte{ + // 1317 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xf7, 0xda, 0x71, 0x62, 0x8f, 0xf3, 0xad, 0x9d, 0xa9, 0xbf, 0xdf, 0xaf, 0x49, 0x91, 0x1d, + 0x19, 0x54, 0x52, 0x20, 0xeb, 0x36, 0x54, 0xa8, 0xea, 0x89, 0x6c, 0x9a, 0xb4, 0x15, 0x4d, 0x14, + 0x4d, 0xfa, 0x03, 0xca, 0x61, 0x35, 0xde, 0x9d, 0xac, 0x57, 0xd9, 0x9d, 0x59, 0xcd, 0x8c, 0x5d, + 0xdc, 0x0b, 0x15, 0x42, 0x82, 0x63, 0x8f, 0x3d, 0x36, 0x47, 0x2e, 0x5c, 0xb9, 0x70, 0xe2, 0xd4, + 0x63, 0x8f, 0x3d, 0x40, 0xa0, 0xe1, 0xbf, 0x88, 0x04, 0x42, 0x33, 0x3b, 0x5e, 0xbb, 0x6e, 0xc5, + 0xc9, 0xdc, 0x66, 0x3e, 0x9f, 0x37, 0xef, 0x7d, 0xf6, 0xcd, 0x7b, 0x6f, 0x16, 0xc8, 0xc3, 0x2b, + 0xc2, 0x0e, 0x59, 0xe7, 0xb0, 0xdf, 0x25, 0x9c, 0x12, 0x49, 0x44, 0x67, 0x40, 0xa8, 0xcf, 0x78, + 0xc7, 0x10, 0x38, 0x09, 0x3b, 0x41, 0xc4, 0xba, 0x38, 0x5a, 0xe3, 0x44, 0xb0, 0x3e, 0xf7, 0xc8, + 0x9a, 0x20, 0x7c, 0x10, 0x7a, 0xa4, 0x93, 0x01, 0x31, 0xa6, 0x38, 0x20, 0x31, 0xa1, 0xb2, 0x93, + 0x1c, 0x06, 0x1d, 0x8f, 0xc5, 0x31, 0xa3, 0x6b, 0x51, 0xd8, 0xed, 0xc8, 0x61, 0x42, 0x44, 0x27, + 0x20, 0x94, 0x70, 0x2c, 0x89, 0x6f, 0x27, 0x9c, 0x49, 0x06, 0x3f, 0x4d, 0x9d, 0xdb, 0x38, 0x09, + 0xed, 0xd4, 0xb9, 0x3b, 0xf2, 0xe5, 0x1a, 0xe7, 0x76, 0x06, 0x8c, 0x9d, 0xdb, 0xc9, 0x61, 0x60, + 0xa7, 0xce, 0xdd, 0x28, 0xec, 0xda, 0xda, 0xf9, 0xf2, 0x5a, 0x10, 0xca, 0x5e, 0xbf, 0xab, 0x88, + 0x4e, 0xc0, 0x02, 0xd6, 0xd1, 0x31, 0xba, 0xfd, 0x03, 0xbd, 0xd3, 0x1b, 0xbd, 0x4a, 0x63, 0x2f, + 0x5f, 0x1e, 0x7f, 0x58, 0x8c, 0xbd, 0x5e, 0x48, 0x09, 0x1f, 0x6a, 0xd5, 0x38, 0x09, 0x45, 0x27, + 0x26, 0x12, 0x77, 0x06, 0x97, 0xa6, 0x15, 0xb7, 0xff, 0x5a, 0x00, 0x95, 0x5b, 0x2c, 0x08, 0x3d, + 0x1c, 0xed, 0x32, 0x9f, 0xc0, 0x65, 0x90, 0x0f, 0xfd, 0x86, 0xb5, 0x62, 0xad, 0x96, 0x1d, 0xf0, + 0xec, 0xb8, 0x95, 0x3b, 0x39, 0x6e, 0xe5, 0x6f, 0xfa, 0x28, 0x1f, 0xfa, 0xd0, 0x01, 0xb5, 0x4c, + 0xfc, 0x80, 0x70, 0x11, 0x32, 0xda, 0xc8, 0x6b, 0xcb, 0xff, 0x1b, 0xcb, 0x2a, 0x32, 0xfc, 0xdd, + 0x94, 0x46, 0x55, 0xfe, 0x2a, 0x00, 0xbf, 0xb1, 0x40, 0x29, 0x20, 0xcc, 0x0d, 0xe9, 0x01, 0x6b, + 0x14, 0x56, 0xac, 0xd5, 0xca, 0xfa, 0x67, 0xf6, 0x0c, 0xb3, 0x66, 0xab, 0xaf, 0xb8, 0x4e, 0xd8, + 0x4d, 0x7a, 0xc0, 0x9c, 0xaa, 0x91, 0xb5, 0x60, 0x00, 0xb4, 0x10, 0xa4, 0x0b, 0xf8, 0x15, 0x98, + 0x97, 0x38, 0xa4, 0x52, 0x34, 0xe6, 0xb4, 0x86, 0x7b, 0x33, 0xd7, 0x70, 0x5b, 0xbb, 0x77, 0xce, + 0x18, 0x09, 0xf3, 0xe9, 0x1e, 0x99, 0xb0, 0xf0, 0x27, 0x0b, 0xfc, 0x4f, 0x24, 0xc4, 0x0b, 0x71, + 0xe4, 0xf6, 0x30, 0xf7, 0x1f, 0x60, 0x4e, 0x5c, 0x7d, 0xba, 0x51, 0xd4, 0x8a, 0x7a, 0x33, 0x57, + 0xb4, 0x9f, 0x86, 0xbb, 0x81, 0xb9, 0x7f, 0x0f, 0x73, 0x72, 0x7b, 0x98, 0x10, 0x9d, 0xa5, 0xb7, + 0x8d, 0xc4, 0xfa, 0x84, 0xc1, 0x03, 0x63, 0x20, 0x50, 0x5d, 0xbc, 0x01, 0x85, 0xdf, 0x5b, 0xa0, + 0x8e, 0xa3, 0x88, 0x79, 0x58, 0xe2, 0x6e, 0x44, 0x32, 0x65, 0x8d, 0x79, 0x2d, 0xfe, 0xf3, 0x99, + 0x8b, 0x1f, 0x15, 0x96, 0x73, 0xce, 0xa8, 0x3d, 0xbb, 0x31, 0x0e, 0x3f, 0x22, 0xd1, 0x59, 0xfc, + 0x3a, 0x08, 0xd7, 0x01, 0xf0, 0x18, 0xf5, 0x43, 0x19, 0x32, 0x2a, 0x1a, 0x0b, 0x2b, 0xd6, 0x6a, + 0xd1, 0x81, 0xc6, 0x0b, 0xd8, 0xcc, 0x18, 0x34, 0x61, 0x05, 0x3f, 0x04, 0x25, 0x4e, 0x94, 0x50, + 0xe2, 0x37, 0x4a, 0x2b, 0xd6, 0x6a, 0xc9, 0xa9, 0x99, 0x13, 0x25, 0x64, 0x70, 0x94, 0x59, 0xc0, + 0x9b, 0x60, 0xd1, 0xb4, 0x9c, 0xbe, 0xc2, 0x46, 0x59, 0x37, 0xc5, 0x79, 0x73, 0xa2, 0xb2, 0x93, + 0x72, 0x2a, 0x73, 0xa7, 0xc7, 0xad, 0xaa, 0xfa, 0x9c, 0x09, 0x08, 0x55, 0xe2, 0xf1, 0x06, 0x32, + 0xb0, 0x14, 0x61, 0x21, 0xdd, 0x7e, 0xe2, 0xab, 0x2e, 0x75, 0x65, 0x18, 0x93, 0x06, 0xd0, 0x49, + 0x7d, 0x7f, 0x22, 0xa9, 0x59, 0x87, 0xeb, 0x8c, 0xa9, 0x0e, 0xb7, 0x55, 0x87, 0xdb, 0x83, 0x4b, + 0xf6, 0xed, 0x30, 0x26, 0xe3, 0x86, 0xbc, 0x85, 0x85, 0xbc, 0x93, 0xfa, 0x52, 0x04, 0xaa, 0x46, + 0xaf, 0x02, 0xed, 0x1f, 0x2c, 0x50, 0x56, 0x8a, 0xb6, 0x06, 0x84, 0x4a, 0x78, 0x61, 0xa2, 0xfd, + 0xdf, 0x32, 0x3e, 0xe6, 0x8c, 0xf0, 0xb2, 0x36, 0xd1, 0x92, 0xd5, 0x34, 0x18, 0x80, 0x39, 0xca, + 0x7c, 0xa2, 0x27, 0xc0, 0xac, 0x9b, 0x78, 0x62, 0x22, 0x39, 0x25, 0x25, 0x41, 0x97, 0x80, 0x8e, + 0xd7, 0xfe, 0x31, 0x0f, 0x2a, 0x13, 0x4d, 0x0e, 0x2f, 0x83, 0x79, 0x4e, 0x02, 0x35, 0x8b, 0x94, + 0xec, 0x42, 0x56, 0xce, 0xf3, 0x48, 0xa3, 0xa7, 0xc7, 0x2d, 0x90, 0xae, 0x76, 0x71, 0x4c, 0x90, + 0xb1, 0x85, 0x0e, 0xc8, 0xf3, 0x44, 0x6b, 0x2f, 0x38, 0xeb, 0xe6, 0xc4, 0xd2, 0xa8, 0x64, 0xf6, + 0x30, 0x97, 0xba, 0x10, 0x4e, 0x8f, 0x5b, 0xff, 0x7d, 0x0d, 0xd4, 0x7e, 0xf2, 0x3c, 0x81, 0x97, + 0x41, 0xde, 0xf7, 0xf4, 0x10, 0x2b, 0x3b, 0xef, 0x8e, 0x0a, 0xea, 0x1a, 0x96, 0x78, 0x93, 0x50, + 0x49, 0xf8, 0xe9, 0x71, 0xeb, 0xcc, 0x78, 0x97, 0x9e, 0xf2, 0x3d, 0xf8, 0x09, 0xc8, 0xe3, 0x87, + 0x7a, 0xec, 0x94, 0x9d, 0x8b, 0xe6, 0x54, 0x6d, 0x63, 0x80, 0xc3, 0x08, 0x77, 0xc3, 0x28, 0x94, + 0xc3, 0xfb, 0x8c, 0xaa, 0x74, 0xd7, 0xa7, 0xb1, 0xd4, 0x03, 0x7e, 0x08, 0x3f, 0x06, 0xf9, 0x03, + 0x5f, 0x8f, 0x89, 0x89, 0x22, 0xdb, 0xc6, 0xfd, 0x48, 0x5e, 0x63, 0x31, 0x0e, 0x95, 0xea, 0xea, + 0xc4, 0x36, 0x3d, 0x77, 0xe0, 0xb7, 0xff, 0x2c, 0x80, 0xc5, 0xc9, 0x5e, 0x82, 0x6b, 0xa0, 0x1c, + 0x87, 0x51, 0x14, 0xba, 0x5e, 0xd2, 0x37, 0xd9, 0xcb, 0xca, 0x7c, 0x47, 0x11, 0x9b, 0x7b, 0x77, + 0x50, 0x49, 0x9b, 0x6c, 0x26, 0x7d, 0x78, 0x1e, 0xcc, 0xc7, 0x24, 0x66, 0x7c, 0x68, 0xf2, 0x96, + 0xcd, 0xb6, 0x1d, 0x8d, 0x22, 0xc3, 0xc2, 0x2d, 0xb0, 0x44, 0x92, 0x1e, 0x89, 0x09, 0xc7, 0x91, + 0x2b, 0x24, 0xe3, 0x38, 0x20, 0x3a, 0x4d, 0x05, 0xa7, 0x31, 0xfa, 0xe0, 0xad, 0x91, 0xc1, 0x7e, + 0xca, 0xa3, 0x1a, 0x99, 0x42, 0xe0, 0x36, 0x80, 0xaa, 0x9d, 0x1f, 0x10, 0xdf, 0x4d, 0x98, 0xef, + 0xd2, 0x7e, 0xdc, 0x25, 0x5c, 0x27, 0xae, 0x38, 0xf6, 0xb3, 0x91, 0x5a, 0xec, 0x31, 0x7f, 0x57, + 0xf3, 0xa8, 0x86, 0xa7, 0x10, 0xf8, 0xb3, 0x05, 0x6a, 0xc2, 0xc3, 0x11, 0xe6, 0x59, 0x45, 0xaa, + 0x21, 0x5b, 0x58, 0xad, 0xac, 0xd3, 0x7f, 0x6d, 0x4e, 0xd9, 0xfb, 0x3a, 0xe2, 0x68, 0x2b, 0xb6, + 0xa8, 0xe4, 0x43, 0xe7, 0xbd, 0x51, 0x5b, 0x4e, 0xb1, 0x5f, 0xff, 0xd6, 0x5a, 0x1c, 0x6d, 0xf4, + 0x75, 0x55, 0xc5, 0xab, 0x06, 0xcb, 0x0e, 0xa8, 0xbf, 0xc9, 0x23, 0xac, 0x81, 0xc2, 0x21, 0x19, + 0xa6, 0x1d, 0x8b, 0xd4, 0x12, 0xd6, 0x41, 0x71, 0x80, 0xa3, 0x7e, 0xda, 0x98, 0x05, 0x94, 0x6e, + 0xae, 0xe6, 0xaf, 0x58, 0xed, 0x04, 0x9c, 0xfb, 0x87, 0x77, 0x40, 0x5d, 0x6f, 0x0f, 0x8b, 0xc0, + 0x94, 0x42, 0x69, 0x7c, 0xbd, 0x37, 0xb0, 0xb8, 0x9e, 0xf4, 0x91, 0x61, 0xe1, 0x05, 0xb0, 0xd0, + 0xc3, 0xe2, 0x20, 0x09, 0xb0, 0x0e, 0x51, 0x1a, 0x3f, 0xb3, 0x37, 0xb0, 0xd8, 0xde, 0xbb, 0xbe, + 0x81, 0x46, 0x7c, 0x5b, 0x00, 0x30, 0x7e, 0x0b, 0xe1, 0x47, 0xa0, 0x42, 0x99, 0x2b, 0xbc, 0x1e, + 0xf1, 0xfb, 0x11, 0x31, 0x51, 0xb2, 0x49, 0xbc, 0xcb, 0xf6, 0x0d, 0x83, 0x00, 0xcd, 0xd6, 0xf0, + 0x22, 0x00, 0x94, 0xb9, 0xe4, 0x4b, 0xe2, 0xf5, 0x25, 0x31, 0x01, 0x97, 0xcc, 0x99, 0xf2, 0x2e, + 0xdb, 0x4a, 0x09, 0x54, 0xa6, 0xa3, 0x65, 0xfb, 0x57, 0x0b, 0xa8, 0xff, 0x90, 0x84, 0x51, 0x41, + 0xb6, 0x39, 0x8b, 0x11, 0xda, 0x81, 0x47, 0x16, 0x80, 0x69, 0xe7, 0xbb, 0x6a, 0x88, 0xb8, 0x44, + 0x8d, 0x32, 0xd1, 0xb0, 0x74, 0x15, 0xcc, 0xf6, 0xb5, 0x42, 0x49, 0x36, 0x4e, 0xc5, 0xb8, 0x4e, + 0xcd, 0x08, 0xca, 0x18, 0x54, 0xe3, 0x53, 0x88, 0xca, 0x7f, 0x44, 0x68, 0x20, 0x7b, 0xba, 0x57, + 0xe6, 0xc6, 0xf9, 0xbf, 0xa5, 0x51, 0x64, 0xd8, 0xf6, 0x13, 0x0b, 0x2c, 0x4e, 0x06, 0x81, 0xdf, + 0x5a, 0x2a, 0xb1, 0xd3, 0x5f, 0x75, 0x77, 0xe6, 0xb5, 0xad, 0xc3, 0x39, 0x67, 0xd2, 0xcb, 0xca, + 0x3e, 0x04, 0xd0, 0x6c, 0xdd, 0x8e, 0xc0, 0x9c, 0x7a, 0x54, 0x54, 0x89, 0x08, 0xa2, 0x9e, 0x53, + 0x61, 0xc6, 0x4a, 0x56, 0x22, 0xfb, 0x29, 0x8c, 0x46, 0x3c, 0x7c, 0x07, 0x14, 0x29, 0xa6, 0x4c, + 0xe8, 0xab, 0x2d, 0x3a, 0xff, 0x31, 0x86, 0xc5, 0x5d, 0x05, 0xa2, 0x94, 0xbb, 0x5a, 0x7f, 0xf2, + 0xb4, 0x95, 0xfb, 0xee, 0xa8, 0x95, 0x7b, 0x7c, 0xd4, 0xca, 0x3d, 0x3d, 0x6a, 0xe5, 0x1e, 0xfd, + 0xb2, 0x92, 0x6b, 0x7f, 0x01, 0xca, 0x2a, 0x9a, 0x90, 0x38, 0x4e, 0x66, 0x1d, 0xd2, 0xf9, 0xe0, + 0xd9, 0xcb, 0x66, 0xee, 0xf9, 0xcb, 0x66, 0xee, 0xc5, 0xcb, 0x66, 0xee, 0xd1, 0x49, 0xd3, 0x7a, + 0x76, 0xd2, 0xb4, 0x9e, 0x9f, 0x34, 0xad, 0x17, 0x27, 0x4d, 0xeb, 0xf7, 0x93, 0xa6, 0xf5, 0xf8, + 0x8f, 0x66, 0xee, 0x7e, 0x51, 0x67, 0xe7, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x4e, 0x6e, + 0xcd, 0x6e, 0x0c, 0x00, 0x00, +} + +func (m *LogicalNode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogicalNode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LogicalNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastUpdatedTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + i -= len(m.MachineType) + copy(dAtA[i:], m.MachineType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineType))) + i-- + dAtA[i] = 0x4a + i-- + if m.Reserved { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.Conditions)) + i-- + dAtA[i] = 0x38 + { + size, err := m.AllocatableResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.SpecialHardwareTypes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size, err := m.Taints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.GeoInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Node != nil { + { + size, err := m.Node.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeGeoInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeGeoInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeGeoInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.FaultDomain) + copy(dAtA[i:], m.FaultDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FaultDomain))) + i-- + dAtA[i] = 0x2a + i -= len(m.AvailabilityZone) + copy(dAtA[i:], m.AvailabilityZone) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AvailabilityZone))) + i-- + dAtA[i] = 0x22 + i -= len(m.DataCenter) + copy(dAtA[i:], m.DataCenter) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataCenter))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourcePartition)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Region)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NodeResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ScalarResources) > 0 { + keysForScalarResources := make([]string, 0, len(m.ScalarResources)) + for k := range m.ScalarResources { + keysForScalarResources = append(keysForScalarResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForScalarResources) + for iNdEx := len(keysForScalarResources) - 1; iNdEx >= 0; iNdEx-- { + v := m.ScalarResources[ResourceName(keysForScalarResources[iNdEx])] + baseI := i + i = encodeVarintGenerated(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(keysForScalarResources[iNdEx]) + copy(dAtA[i:], keysForScalarResources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForScalarResources[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.AllowedPodNumber)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.EphemeralStorage)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Memory)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.MilliCPU)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NodeSpecialHardWareTypeInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSpecialHardWareTypeInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSpecialHardWareTypeInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.HasFPGA { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.HasGpu { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NodeTaints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeTaints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeTaints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.NoExecute { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.NoSchedule { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResponseFromRRM) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFromRRM) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseFromRRM) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x18 + if len(m.RegionNodeEvents) > 0 { + for iNdEx := len(m.RegionNodeEvents) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RegionNodeEvents[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RpNodeEvents) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RpNodeEvents) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RpNodeEvents) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NodeEvents) > 0 { + for iNdEx := len(m.NodeEvents) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NodeEvents[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LogicalNode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = m.GeoInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Taints.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SpecialHardwareTypes.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.AllocatableResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Conditions)) + n += 2 + l = len(m.MachineType) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdatedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NodeGeoInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Region)) + n += 1 + sovGenerated(uint64(m.ResourcePartition)) + l = len(m.DataCenter) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AvailabilityZone) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FaultDomain) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.MilliCPU)) + n += 1 + sovGenerated(uint64(m.Memory)) + n += 1 + sovGenerated(uint64(m.EphemeralStorage)) + n += 1 + sovGenerated(uint64(m.AllowedPodNumber)) + if len(m.ScalarResources) > 0 { + for k, v := range m.ScalarResources { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + sovGenerated(uint64(v)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeSpecialHardWareTypeInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *NodeTaints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *ResponseFromRRM) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RegionNodeEvents) > 0 { + for _, e := range m.RegionNodeEvents { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.Length)) + return n +} + +func (m *RpNodeEvents) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeEvents) > 0 { + for _, e := range m.NodeEvents { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Seconds)) + n += 1 + sovGenerated(uint64(m.Nanos)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LogicalNode) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogicalNode{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `GeoInfo:` + strings.Replace(strings.Replace(this.GeoInfo.String(), "NodeGeoInfo", "NodeGeoInfo", 1), `&`, ``, 1) + `,`, + `Taints:` + strings.Replace(strings.Replace(this.Taints.String(), "NodeTaints", "NodeTaints", 1), `&`, ``, 1) + `,`, + `SpecialHardwareTypes:` + strings.Replace(strings.Replace(this.SpecialHardwareTypes.String(), "NodeSpecialHardWareTypeInfo", "NodeSpecialHardWareTypeInfo", 1), `&`, ``, 1) + `,`, + `AllocatableResource:` + strings.Replace(strings.Replace(this.AllocatableResource.String(), "NodeResource", "NodeResource", 1), `&`, ``, 1) + `,`, + `Conditions:` + fmt.Sprintf("%v", this.Conditions) + `,`, + `Reserved:` + fmt.Sprintf("%v", this.Reserved) + `,`, + `MachineType:` + fmt.Sprintf("%v", this.MachineType) + `,`, + `LastUpdatedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdatedTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Node:` + strings.Replace(this.Node.String(), "LogicalNode", "LogicalNode", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeGeoInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeGeoInfo{`, + `Region:` + fmt.Sprintf("%v", this.Region) + `,`, + `ResourcePartition:` + fmt.Sprintf("%v", this.ResourcePartition) + `,`, + `DataCenter:` + fmt.Sprintf("%v", this.DataCenter) + `,`, + `AvailabilityZone:` + fmt.Sprintf("%v", this.AvailabilityZone) + `,`, + `FaultDomain:` + fmt.Sprintf("%v", this.FaultDomain) + `,`, + `}`, + }, "") + return s +} +func (this *NodeResource) String() string { + if this == nil { + return "nil" + } + keysForScalarResources := make([]string, 0, len(this.ScalarResources)) + for k := range this.ScalarResources { + keysForScalarResources = append(keysForScalarResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForScalarResources) + mapStringForScalarResources := "map[ResourceName]int64{" + for _, k := range keysForScalarResources { + mapStringForScalarResources += fmt.Sprintf("%v: %v,", k, this.ScalarResources[ResourceName(k)]) + } + mapStringForScalarResources += "}" + s := strings.Join([]string{`&NodeResource{`, + `MilliCPU:` + fmt.Sprintf("%v", this.MilliCPU) + `,`, + `Memory:` + fmt.Sprintf("%v", this.Memory) + `,`, + `EphemeralStorage:` + fmt.Sprintf("%v", this.EphemeralStorage) + `,`, + `AllowedPodNumber:` + fmt.Sprintf("%v", this.AllowedPodNumber) + `,`, + `ScalarResources:` + mapStringForScalarResources + `,`, + `}`, + }, "") + return s +} +func (this *NodeSpecialHardWareTypeInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSpecialHardWareTypeInfo{`, + `HasGpu:` + fmt.Sprintf("%v", this.HasGpu) + `,`, + `HasFPGA:` + fmt.Sprintf("%v", this.HasFPGA) + `,`, + `}`, + }, "") + return s +} +func (this *NodeTaints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeTaints{`, + `NoSchedule:` + fmt.Sprintf("%v", this.NoSchedule) + `,`, + `NoExecute:` + fmt.Sprintf("%v", this.NoExecute) + `,`, + `}`, + }, "") + return s +} +func (this *ResponseFromRRM) String() string { + if this == nil { + return "nil" + } + repeatedStringForRegionNodeEvents := "[]RpNodeEvents{" + for _, f := range this.RegionNodeEvents { + repeatedStringForRegionNodeEvents += strings.Replace(strings.Replace(f.String(), "RpNodeEvents", "RpNodeEvents", 1), `&`, ``, 1) + "," + } + repeatedStringForRegionNodeEvents += "}" + s := strings.Join([]string{`&ResponseFromRRM{`, + `RegionNodeEvents:` + repeatedStringForRegionNodeEvents + `,`, + `Length:` + fmt.Sprintf("%v", this.Length) + `,`, + `}`, + }, "") + return s +} +func (this *RpNodeEvents) String() string { + if this == nil { + return "nil" + } + repeatedStringForNodeEvents := "[]*NodeEvent{" + for _, f := range this.NodeEvents { + repeatedStringForNodeEvents += strings.Replace(f.String(), "NodeEvent", "NodeEvent", 1) + "," + } + repeatedStringForNodeEvents += "}" + s := strings.Join([]string{`&RpNodeEvents{`, + `NodeEvents:` + repeatedStringForNodeEvents + `,`, + `}`, + }, "") + return s +} +func (this *Timestamp) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Timestamp{`, + `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, + `Nanos:` + fmt.Sprintf("%v", this.Nanos) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LogicalNode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogicalNode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogicalNode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GeoInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GeoInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Taints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecialHardwareTypes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpecialHardwareTypes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatableResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AllocatableResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + m.Conditions = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Conditions |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reserved", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Reserved = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MachineType = NodeMachineType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdatedTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdatedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = EventType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &LogicalNode{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeGeoInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeGeoInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeGeoInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + m.Region = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Region |= RegionName(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourcePartition", wireType) + } + m.ResourcePartition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourcePartition |= ResourcePartitionName(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataCenter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataCenter = DataCenterName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailabilityZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AvailabilityZone = AvailabilityZoneName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FaultDomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FaultDomain = FaultDomainName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MilliCPU", wireType) + } + m.MilliCPU = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MilliCPU |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + m.Memory = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Memory |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralStorage", wireType) + } + m.EphemeralStorage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EphemeralStorage |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedPodNumber", wireType) + } + m.AllowedPodNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AllowedPodNumber |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScalarResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScalarResources == nil { + m.ScalarResources = make(map[ResourceName]int64) + } + var mapkey ResourceName + var mapvalue int64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ScalarResources[ResourceName(mapkey)] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSpecialHardWareTypeInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSpecialHardWareTypeInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSpecialHardWareTypeInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasGpu", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasGpu = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasFPGA", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasFPGA = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeTaints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeTaints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeTaints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoSchedule", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoSchedule = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoExecute", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoExecute = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseFromRRM) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseFromRRM: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseFromRRM: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionNodeEvents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegionNodeEvents = append(m.RegionNodeEvents, RpNodeEvents{}) + if err := m.RegionNodeEvents[len(m.RegionNodeEvents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RpNodeEvents) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RpNodeEvents: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RpNodeEvents: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeEvents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeEvents = append(m.NodeEvents, &NodeEvent{}) + if err := m.NodeEvents[len(m.NodeEvents)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/resource-management/pkg/common-lib/types/generated.proto b/resource-management/pkg/common-lib/types/generated.proto new file mode 100644 index 00000000..6430b9b6 --- /dev/null +++ b/resource-management/pkg/common-lib/types/generated.proto @@ -0,0 +1,170 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.global_resource_service.resource_management.pkg.common_lib.types; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "types"; + +// LogicalNode is the abstraction of the node definition in the resource clusters +// LogicalNode is a minimum set of information the scheduler needs to place the workload to a node in the region-less platform +// +// Initial set of fields as shown below. +// +// TODO: add the annotation for serialization +message LogicalNode { + // Node UUID from each resource partition cluster + optional string id = 1; + + // ResourceVersion is the RV from each resource partition cluster + optional string resource_version = 2; + + // GeoInfo defines the node location info such as region, DC, RP cluster etc. for application placement + optional NodeGeoInfo geo_info = 3; + + // Taints defines scheduling or other control action for a node + optional NodeTaints taints = 4; + + // SpecialHardwareTypes defines if the node has special hardware such as GPU or FPGA etc + optional NodeSpecialHardWareTypeInfo special_hardware_types = 5; + + // AllocatableReesource defines the resources on the node that can be used by schedulers + optional NodeResource allocatable_resource = 6; + + // Conditions is a short version of the node condition array from Arktos, each bits in the byte defines one node condition + optional int32 conditions = 7; + + // Reserved defines if the node is reserved at the resource partition cluster level + // TBD Node reservation model for post 630 + optional bool reserved = 8; + + // MachineType defines the type of category of the node, such as # of CPUs of the node, where the category can be + // defined as highend, lowend, medium as an example + // TBD for post 630 + optional string machine_type = 9; + + // LastUpdatedTime defines the time when node status was updated in resource partition + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time last_updated_time = 10; +} + +// NodeEvent is a event of nodes +message NodeEvent { + optional string id = 1; + + optional LogicalNode node = 2; +} + +message NodeGeoInfo { + // Region and RsourcePartition are required + optional int64 region = 1; + + optional int64 rp = 2; + + // Optional fields for fine-tuned resource management and application placements + optional string dc = 3; + + optional string az = 4; + + optional string fd = 5; +} + +// struct definition from Arktos node_info.go +message NodeResource { + optional int64 milli_cpu = 1; + + optional int64 memory = 2; + + optional int64 ephemeral_storage = 3; + + // We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value()) + // explicitly as int, to avoid conversions and improve performance. + optional int32 allowed_pod_number = 4; + + // ScalarResources such as GPU or FPGA etc. + map scalar_resources = 5; +} + +// TODO: consider refine for GPU types, such as NVIDIA and AMD etc. +message NodeSpecialHardWareTypeInfo { + optional bool hasgpu = 1; + + optional bool hasfpga = 2; +} + +message NodeTaints { + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // Enforced by the scheduler. + optional bool no_schedule = 1; + + // Evict any already-running pods that do not tolerate the taint + optional bool no_execute = 2; +} + +// RRM: Resource Region Manager +message ResponseFromRRM { + repeated RpNodeEvents region_node_events = 1; + + optional uint64 length = 3; +} + +// RpNodeEvents is a struct for node events from each RP +message RpNodeEvents { + repeated NodeEvent node_events = 1; +} + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Time { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// Timestamp is a struct that is equivalent to Time, but intended for +// protobuf marshalling/unmarshalling. It is generated into a serialization +// that matches Time. Do not use in Go structs. +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + diff --git a/resource-management/pkg/common-lib/types/location/location.go b/resource-management/pkg/common-lib/types/location.go similarity index 99% rename from resource-management/pkg/common-lib/types/location/location.go rename to resource-management/pkg/common-lib/types/location.go index 41e05246..51f482b8 100644 --- a/resource-management/pkg/common-lib/types/location/location.go +++ b/resource-management/pkg/common-lib/types/location.go @@ -1,4 +1,4 @@ -package location +package types import ( "fmt" diff --git a/resource-management/pkg/common-lib/types/location/location_test.go b/resource-management/pkg/common-lib/types/location_test.go similarity index 99% rename from resource-management/pkg/common-lib/types/location/location_test.go rename to resource-management/pkg/common-lib/types/location_test.go index e1ed5c03..6016808b 100644 --- a/resource-management/pkg/common-lib/types/location/location_test.go +++ b/resource-management/pkg/common-lib/types/location_test.go @@ -1,4 +1,4 @@ -package location +package types import ( "fmt" diff --git a/resource-management/pkg/common-lib/types/logicalNode.go b/resource-management/pkg/common-lib/types/logicalNode.go index d95e93cd..2f038c33 100644 --- a/resource-management/pkg/common-lib/types/logicalNode.go +++ b/resource-management/pkg/common-lib/types/logicalNode.go @@ -2,108 +2,14 @@ package types import ( "fmt" - "strconv" - "time" - "k8s.io/klog/v2" + "strconv" ) const ( PreserveNode_KeyPrefix = "MinNode" ) -// for now, simply define those as string -// RegionName and ResourcePartitionName are updated to int per initial performance test of distributor ProcessEvents -// Later the data type might be changed back to string due to further performance evaluation result -type RegionName int -type ResourcePartitionName int -type DataCenterName string -type AvailabilityZoneName string -type FaultDomainName string - -type NodeGeoInfo struct { - // Region and RsourcePartition are required - Region RegionName - ResourcePartition ResourcePartitionName - - // Optional fields for fine-tuned resource management and application placements - DataCenter DataCenterName - AvailabilityZone AvailabilityZoneName - FaultDomain FaultDomainName -} - -type NodeTaints struct { - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // Enforced by the scheduler. - NoSchedule bool - // Evict any already-running pods that do not tolerate the taint - NoExecute bool -} - -// TODO: consider refine for GPU types, such as NVIDIA and AMD etc. -type NodeSpecialHardWareTypeInfo struct { - HasGpu bool - HasFPGA bool -} - -// struct definition from Arktos node_info.go -type NodeResource struct { - MilliCPU int64 - Memory int64 - EphemeralStorage int64 - // We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value()) - // explicitly as int, to avoid conversions and improve performance. - AllowedPodNumber int - // ScalarResources such as GPU or FPGA etc. - ScalarResources map[ResourceName]int64 -} - -// TODO: from the Node definition in resource cluster, to the logicalNode struct, to the scheduler node_info structure -// the ResourceName need to be set and aligned -type ResourceName string - -// LogicalNode is the abstraction of the node definition in the resource clusters -// LogicalNode is a minimum set of information the scheduler needs to place the workload to a node in the region-less platform -// -// Initial set of fields as shown below. -// -// TODO: add the annotation for serialization -// -type LogicalNode struct { - // Node UUID from each resource partition cluster - Id string - - // ResourceVersion is the RV from each resource partition cluster - ResourceVersion string - - // GeoInfo defines the node location info such as region, DC, RP cluster etc. for application placement - GeoInfo NodeGeoInfo - - // Taints defines scheduling or other control action for a node - Taints NodeTaints - - // SpecialHardwareTypes defines if the node has special hardware such as GPU or FPGA etc - SpecialHardwareTypes NodeSpecialHardWareTypeInfo - - // AllocatableReesource defines the resources on the node that can be used by schedulers - AllocatableResource NodeResource - - // Conditions is a short version of the node condition array from Arktos, each bits in the byte defines one node condition - Conditions byte - - // Reserved defines if the node is reserved at the resource partition cluster level - // TBD Node reservation model for post 630 - Reserved bool - - // MachineType defines the type of category of the node, such as # of CPUs of the node, where the category can be - // defined as highend, lowend, medium as an example - // TBD for post 630 - MachineType NodeMachineType - - // LastUpdatedTime defines the time when node status was updated in resource partition - LastUpdatedTime time.Time -} - func (n *LogicalNode) Copy() *LogicalNode { return &LogicalNode{ Id: n.Id, @@ -134,5 +40,3 @@ func (n *LogicalNode) GetKey() string { } return "" } - -type NodeMachineType string diff --git a/resource-management/pkg/common-lib/types/time.go b/resource-management/pkg/common-lib/types/time.go new file mode 100644 index 00000000..e0258112 --- /dev/null +++ b/resource-management/pkg/common-lib/types/time.go @@ -0,0 +1,180 @@ +/* +Copyright 2015 The Kubernetes Authors. +Copyright 2022 Authors of Arktos - file modified. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "time" +) + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Time struct { + time.Time `protobuf:"-"` +} + +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *Time) DeepCopyInto(out *Time) { + *out = *t +} + +// NewTime returns a wrapped instance of the provided time +func NewTime(time time.Time) Time { + return Time{time} +} + +// Date returns the Time corresponding to the supplied parameters +// by wrapping time.Date. +func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { + return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// Now returns the current local time. +func Now() Time { + return Time{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *Time) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t *Time) Before(u *Time) bool { + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false +} + +// Equal reports whether the time instant t is equal to u. +func (t *Time) Equal(u *Time) bool { + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false +} + +// Unix returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func Unix(sec int64, nsec int64) Time { + return Time{time.Unix(sec, nsec)} +} + +// Rfc3339Copy returns a copy of the Time at second-level precision. +func (t Time) Rfc3339Copy() Time { + copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339)) + return Time{copied} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *Time) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *Time) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + buf := make([]byte, 0, len(time.RFC3339)+2) + buf = append(buf, '"') + // time cannot contain non escapable JSON characters + buf = t.UTC().AppendFormat(buf, time.RFC3339) + buf = append(buf, '"') + return buf, nil +} + +// ToUnstructured implements the value.UnstructuredConverter interface. +func (t Time) ToUnstructured() interface{} { + if t.IsZero() { + return nil + } + buf := make([]byte, 0, len(time.RFC3339)) + buf = t.UTC().AppendFormat(buf, time.RFC3339) + return string(buf) +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Time) OpenAPISchemaFormat() string { return "date-time" } + +// MarshalQueryParameter converts to a URL query parameter value +func (t Time) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(time.RFC3339), nil +} diff --git a/resource-management/pkg/common-lib/types/time_proto.go b/resource-management/pkg/common-lib/types/time_proto.go new file mode 100644 index 00000000..c64b6a16 --- /dev/null +++ b/resource-management/pkg/common-lib/types/time_proto.go @@ -0,0 +1,98 @@ +/* +Copyright 2015 The Kubernetes Authors. +Copyright 2022 Authors of Arktos - file modified. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "time" +) + +// Timestamp is a struct that is equivalent to Time, but intended for +// protobuf marshalling/unmarshalling. It is generated into a serialization +// that matches Time. Do not use in Go structs. +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `json:"seconds" protobuf:"varint,1,opt,name=seconds"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + Nanos int32 `json:"nanos" protobuf:"varint,2,opt,name=nanos"` +} + +// Timestamp returns the Time as a new Timestamp value. +func (m *Time) ProtoTime() *Timestamp { + if m == nil { + return &Timestamp{} + } + return &Timestamp{ + Seconds: m.Time.Unix(), + // leaving this here for the record. our JSON only handled seconds, so this results in writes by + // protobuf clients storing values that aren't read by json clients, which results in unexpected + // field mutation, which fails various validation and equality code. + // Nanos: int32(m.Time.Nanosecond()), + } +} + +// Size implements the protobuf marshalling interface. +func (m *Time) Size() (n int) { + if m == nil || m.Time.IsZero() { + return 0 + } + return m.ProtoTime().Size() +} + +// Reset implements the protobuf marshalling interface. +func (m *Time) Unmarshal(data []byte) error { + if len(data) == 0 { + m.Time = time.Time{} + return nil + } + p := Timestamp{} + if err := p.Unmarshal(data); err != nil { + return err + } + // leaving this here for the record. our JSON only handled seconds, so this results in writes by + // protobuf clients storing values that aren't read by json clients, which results in unexpected + // field mutation, which fails various validation and equality code. + // m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + m.Time = time.Unix(p.Seconds, int64(0)).Local() + return nil +} + +// Marshal implements the protobuf marshaling interface. +func (m *Time) Marshal() (data []byte, err error) { + if m == nil || m.Time.IsZero() { + return nil, nil + } + return m.ProtoTime().Marshal() +} + +// MarshalTo implements the protobuf marshaling interface. +func (m *Time) MarshalTo(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoTime().MarshalTo(data) +} + +// MarshalToSizedBuffer implements the protobuf reverse marshaling interface. +func (m *Time) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoTime().MarshalToSizedBuffer(data) +} diff --git a/resource-management/pkg/common-lib/types/typeDef.go b/resource-management/pkg/common-lib/types/typeDef.go new file mode 100644 index 00000000..d141e950 --- /dev/null +++ b/resource-management/pkg/common-lib/types/typeDef.go @@ -0,0 +1,126 @@ +package types + +import "time" + +// for now, simply define those as string +// RegionName and ResourcePartitionName are updated to int per initial performance test of distributor ProcessEvents +// Later the data type might be changed back to string due to further performance evaluation result +type RegionName int +type ResourcePartitionName int +type DataCenterName string +type AvailabilityZoneName string +type FaultDomainName string +type NodeMachineType string + +// TODO: from the Node definition in resource cluster, to the logicalNode struct, to the scheduler node_info structure +// the ResourceName need to be set and aligned +type ResourceName string + +// EventType defines the possible types of events. +type EventType string + +type NodeGeoInfo struct { + // Region and RsourcePartition are required + Region RegionName `json:"region" protobuf:"bytes,1,opt,name=region"` + ResourcePartition ResourcePartitionName `json:"rp" protobuf:"bytes,2,opt,name=rp"` + + // Optional fields for fine-tuned resource management and application placements + DataCenter DataCenterName `json:"dc" protobuf:"bytes,3,opt,name=dc"` + AvailabilityZone AvailabilityZoneName `json:"az" protobuf:"bytes,4,opt,name=az"` + FaultDomain FaultDomainName `json:"fd" protobuf:"bytes,5,opt,name=fd"` +} + +type NodeTaints struct { + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // Enforced by the scheduler. + NoSchedule bool `json:"no_schedule" protobuf:"varint,1,opt,name=no_schedule"` + // Evict any already-running pods that do not tolerate the taint + NoExecute bool `json:"no_execute" protobuf:"varint,2,opt,name=no_execute"` +} + +// TODO: consider refine for GPU types, such as NVIDIA and AMD etc. +type NodeSpecialHardWareTypeInfo struct { + HasGpu bool `json:"hasgpu" protobuf:"varint,1,opt,name=hasgpu"` + HasFPGA bool `json:"hasfpga" protobuf:"varint,2,opt,name=hasfpga"` +} + +// struct definition from Arktos node_info.go +type NodeResource struct { + MilliCPU int64 `json:"milli_cpu" protobuf:"varint,1,opt,name=milli_cpu"` + Memory int64 `json:"memory" protobuf:"varint,2,opt,name=memory"` + EphemeralStorage int64 `json:"ephemeral_storage" protobuf:"varint,3,opt,name=ephemeral_storage"` + // We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value()) + // explicitly as int, to avoid conversions and improve performance. + AllowedPodNumber int32 `json:"allowed_pod_number" protobuf:"varint,4,opt,name=allowed_pod_number"` + // ScalarResources such as GPU or FPGA etc. + ScalarResources map[ResourceName]int64 `json:"scalar_resources" protobuf:"bytes,5,opt,name=scalar_resources"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LogicalNode is the abstraction of the node definition in the resource clusters +// LogicalNode is a minimum set of information the scheduler needs to place the workload to a node in the region-less platform +// +// Initial set of fields as shown below. +// +// TODO: add the annotation for serialization +// +type LogicalNode struct { + // Node UUID from each resource partition cluster + Id string `json:"id" protobuf:"bytes,1,opt,name=id"` + + // ResourceVersion is the RV from each resource partition cluster + ResourceVersion string `json:"resource_version" protobuf:"bytes,2,opt,name=resource_version"` + + // GeoInfo defines the node location info such as region, DC, RP cluster etc. for application placement + GeoInfo NodeGeoInfo `json:"geo_info" protobuf:"bytes,3,opt,name=geo_info"` + + // Taints defines scheduling or other control action for a node + Taints NodeTaints `json:"taints" protobuf:"bytes,4,opt,name=taints"` + + // SpecialHardwareTypes defines if the node has special hardware such as GPU or FPGA etc + SpecialHardwareTypes NodeSpecialHardWareTypeInfo `json:"special_hardware_types" protobuf:"bytes,5,opt,name=special_hardware_types"` + + // AllocatableReesource defines the resources on the node that can be used by schedulers + AllocatableResource NodeResource `json:"allocatable_resource" protobuf:"bytes,6,opt,name=allocatable_resource"` + + // Conditions is a short version of the node condition array from Arktos, each bits in the byte defines one node condition + Conditions int32 `json:"conditions" protobuf:"varint,7,opt,name=conditions"` + + // Reserved defines if the node is reserved at the resource partition cluster level + // TBD Node reservation model for post 630 + Reserved bool `json:"reserved" protobuf:"varint,8,opt,name=reserved"` + + // MachineType defines the type of category of the node, such as # of CPUs of the node, where the category can be + // defined as highend, lowend, medium as an example + // TBD for post 630 + MachineType NodeMachineType `json:"machine_type" protobuf:"bytes,9,opt,name=machine_type"` + + // LastUpdatedTime defines the time when node status was updated in resource partition + LastUpdatedTime Time `json:"last_updated_time" protobuf:"bytes,10,opt,name=last_updated_time"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RpNodeEvents is a struct for node events from each RP +type RpNodeEvents struct { + NodeEvents []*NodeEvent `json:"node_events" protobuf:"bytes,1,rep,name=node_events"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RRM: Resource Region Manager +type ResponseFromRRM struct { + RegionNodeEvents []RpNodeEvents `json:"region_node_events" protobuf:"bytes,1,rep,name=region_node_events"` + Length uint64 `json:"length" protobuf:"varint,3,opt,name=length"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeEvent is a event of nodes +type NodeEvent struct { + Type EventType `json:"id" protobuf:"bytes,1,opt,name=id"` + Node *LogicalNode `json:"node" protobuf:"bytes,2,opt,name=node"` + // +optional + checkpoints []time.Time `protobuf:"-"` //`json:"checkpoints" protobuf:"bytes,3,rep,name=checkpoints"` +} diff --git a/resource-management/pkg/distributor/cache/eventqueue.go b/resource-management/pkg/distributor/cache/eventqueue.go index f8f8655c..57cfc6e5 100644 --- a/resource-management/pkg/distributor/cache/eventqueue.go +++ b/resource-management/pkg/distributor/cache/eventqueue.go @@ -9,13 +9,11 @@ import ( "global-resource-service/resource-management/pkg/common-lib/metrics" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" - "global-resource-service/resource-management/pkg/common-lib/types/location" "global-resource-service/resource-management/pkg/distributor/node" ) // TODO - read from config -const LengthOfNodeEventQueue = 10000 +const LengthOfNodeEventQueue = 100000 type nodeEventQueueByLoc struct { circularEventQueue []*node.ManagedNodeEvent @@ -42,6 +40,7 @@ func (qloc *nodeEventQueueByLoc) enqueueEvent(e *node.ManagedNodeEvent) { if qloc.endPos == qloc.startPos+LengthOfNodeEventQueue { // cache is full - remove the oldest element + klog.Warningf("cache is full") qloc.startPos++ } @@ -49,7 +48,7 @@ func (qloc *nodeEventQueueByLoc) enqueueEvent(e *node.ManagedNodeEvent) { qloc.endPos++ } -func (qloc *nodeEventQueueByLoc) getEventsFromIndex(startIndex int) ([]*event.NodeEvent, error) { +func (qloc *nodeEventQueueByLoc) getEventsFromIndex(startIndex int) ([]*types.NodeEvent, error) { qloc.eqLock.RLock() defer qloc.eqLock.RUnlock() @@ -58,7 +57,7 @@ func (qloc *nodeEventQueueByLoc) getEventsFromIndex(startIndex int) ([]*event.No } length := qloc.endPos - startIndex - result := make([]*event.NodeEvent, length) + result := make([]*types.NodeEvent, length) for i := 0; i < length; i++ { result[i] = qloc.circularEventQueue[(startIndex+i)%LengthOfNodeEventQueue].GetNodeEvent() } @@ -95,19 +94,19 @@ func (qloc *nodeEventQueueByLoc) getEventIndexSinceResourceVersion(resourceVersi type NodeEventQueue struct { // corresponding client id clientId string - watchChan chan *event.NodeEvent + watchChan chan *types.NodeEvent // used to lock enqueue operation during snapshot enqueueLock sync.RWMutex - eventQueueByLoc map[location.Location]*nodeEventQueueByLoc + eventQueueByLoc map[types.Location]*nodeEventQueueByLoc locationLock sync.RWMutex } func NewNodeEventQueue(clientId string) *NodeEventQueue { queue := &NodeEventQueue{ clientId: clientId, - eventQueueByLoc: make(map[location.Location]*nodeEventQueueByLoc), + eventQueueByLoc: make(map[types.Location]*nodeEventQueueByLoc), } return queue @@ -140,7 +139,7 @@ func (eq *NodeEventQueue) EnqueueEvent(e *node.ManagedNodeEvent) { queueByLoc.enqueueEvent(e) } -func (eq *NodeEventQueue) Watch(rvs types.InternalResourceVersionMap, clientWatchChan chan *event.NodeEvent, stopCh chan struct{}) error { +func (eq *NodeEventQueue) Watch(rvs types.InternalResourceVersionMap, clientWatchChan chan *types.NodeEvent, stopCh chan struct{}) error { if eq.watchChan != nil { return errors.New("currently only support one watcher per node event queue") } @@ -151,9 +150,9 @@ func (eq *NodeEventQueue) Watch(rvs types.InternalResourceVersionMap, clientWatc return err } - eq.watchChan = make(chan *event.NodeEvent) + eq.watchChan = make(chan *types.NodeEvent, 30) // writing event to channel - go func(downstreamCh chan *event.NodeEvent, initEvents []*event.NodeEvent, stopCh chan struct{}, upstreamCh chan *event.NodeEvent) { + go func(downstreamCh chan *types.NodeEvent, initEvents []*types.NodeEvent, stopCh chan struct{}, upstreamCh chan *types.NodeEvent) { if downstreamCh == nil { return } @@ -186,8 +185,8 @@ func (eq *NodeEventQueue) Watch(rvs types.InternalResourceVersionMap, clientWatc return nil } -func (eq *NodeEventQueue) getAllEventsSinceResourceVersion(rvs types.InternalResourceVersionMap) ([]*event.NodeEvent, error) { - locStartPostitions := make(map[location.Location]int) +func (eq *NodeEventQueue) getAllEventsSinceResourceVersion(rvs types.InternalResourceVersionMap) ([]*types.NodeEvent, error) { + locStartPostitions := make(map[types.Location]int) for loc, rv := range rvs { qByLoc, isOK := eq.eventQueueByLoc[loc] @@ -203,10 +202,10 @@ func (eq *NodeEventQueue) getAllEventsSinceResourceVersion(rvs types.InternalRes } } - nodeEvents := make([]*event.NodeEvent, 0) + nodeEvents := make([]*types.NodeEvent, 0, 1000) for loc, qByLoc := range eq.eventQueueByLoc { startIndex, isOK := locStartPostitions[loc] - var events []*event.NodeEvent + var events []*types.NodeEvent var err error if isOK { events, err = qByLoc.getEventsFromIndex(startIndex) diff --git a/resource-management/pkg/distributor/cache/eventqueue_test.go b/resource-management/pkg/distributor/cache/eventqueue_test.go index 21f9c338..af99d120 100644 --- a/resource-management/pkg/distributor/cache/eventqueue_test.go +++ b/resource-management/pkg/distributor/cache/eventqueue_test.go @@ -4,8 +4,6 @@ import ( "github.com/google/uuid" "github.com/stretchr/testify/assert" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" - "global-resource-service/resource-management/pkg/common-lib/types/location" nodeutil "global-resource-service/resource-management/pkg/distributor/node" "strconv" "strings" @@ -13,7 +11,7 @@ import ( ) var rvToGenerate = 10 -var defaultLocBeijing_RP1 = location.NewLocation(location.Beijing, location.ResourcePartition1) +var defaultLocBeijing_RP1 = types.NewLocation(types.Beijing, types.ResourcePartition1) func Test_getEventIndexSinceResourceVersion_ByLoc(t *testing.T) { // initalize node event queue by loc @@ -90,14 +88,14 @@ func Test_getEventIndexSinceResourceVersion_ByLoc(t *testing.T) { assert.Equal(t, -1, index) } -func generateManagedNodeEvent(loc *location.Location) *nodeutil.ManagedNodeEvent { +func generateManagedNodeEvent(loc *types.Location) *nodeutil.ManagedNodeEvent { rvToGenerate += 1 node := createRandomNode(rvToGenerate, loc) - nodeEvent := event.NewNodeEvent(node, event.Added) + nodeEvent := types.NewNodeEvent(node, types.Added) return nodeutil.NewManagedNodeEvent(nodeEvent, loc) } -func createRandomNode(rv int, loc *location.Location) *types.LogicalNode { +func createRandomNode(rv int, loc *types.Location) *types.LogicalNode { id := uuid.New() return &types.LogicalNode{ Id: id.String(), diff --git a/resource-management/pkg/distributor/distributor.go b/resource-management/pkg/distributor/distributor.go index 6ac5f3bf..a7ebca49 100644 --- a/resource-management/pkg/distributor/distributor.go +++ b/resource-management/pkg/distributor/distributor.go @@ -9,8 +9,6 @@ import ( "global-resource-service/resource-management/pkg/common-lib/interfaces/store" "global-resource-service/resource-management/pkg/common-lib/metrics" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" - "global-resource-service/resource-management/pkg/common-lib/types/location" "global-resource-service/resource-management/pkg/distributor/cache" "global-resource-service/resource-management/pkg/distributor/node" "global-resource-service/resource-management/pkg/distributor/storage" @@ -55,7 +53,7 @@ func (dis *ResourceDistributor) SetPersistHelper(persistTool store.StoreInterfac // TODO - get virtual node number, region num, partition num from external func createNodeStore() *storage.NodeStore { - return storage.NewNodeStore(virutalStoreNumPerResourcePartition, location.GetRegionNum(), location.GetRPNum()) + return storage.NewNodeStore(virutalStoreNumPerResourcePartition, types.GetRegionNum(), types.GetRPNum()) } // TODO: post 630, allocate resources per request for different type of hardware and regions @@ -144,7 +142,7 @@ func (dis *ResourceDistributor) allocateNodesToClient(clientId string, requested } func (dis *ResourceDistributor) addBookmarkEvent(stores []*storage.VirtualNodeStore, eventQueue *cache.NodeEventQueue) { - locations := make(map[location.Location]bool) + locations := make(map[types.Location]bool) for _, store := range stores { loc := store.GetLocation() @@ -226,7 +224,7 @@ func (dis *ResourceDistributor) ListNodesForClient(clientId string) ([]*types.Lo return nodes, finalRVs, nil } -func (dis *ResourceDistributor) Watch(clientId string, rvs types.TransitResourceVersionMap, watchChan chan *event.NodeEvent, stopCh chan struct{}) error { +func (dis *ResourceDistributor) Watch(clientId string, rvs types.TransitResourceVersionMap, watchChan chan *types.NodeEvent, stopCh chan struct{}) error { var nodeEventQueue *cache.NodeEventQueue var isOK bool if nodeEventQueue, isOK = dis.nodeEventQueueMap[clientId]; !isOK || nodeEventQueue == nil { @@ -247,11 +245,11 @@ func (dis *ResourceDistributor) Watch(clientId string, rvs types.TransitResource return nodeEventQueue.Watch(internal_rvs, watchChan, stopCh) } -func (dis *ResourceDistributor) ProcessEvents(events []*event.NodeEvent) (bool, types.TransitResourceVersionMap) { +func (dis *ResourceDistributor) ProcessEvents(events []*types.NodeEvent) (bool, types.TransitResourceVersionMap) { eventsToProcess := make([]*node.ManagedNodeEvent, len(events)) for i := 0; i < len(events); i++ { if events[i] != nil { - loc := location.NewLocation(location.Region(events[i].Node.GeoInfo.Region), location.ResourcePartition(events[i].Node.GeoInfo.ResourcePartition)) + loc := types.NewLocation(types.Region(events[i].Node.GeoInfo.Region), types.ResourcePartition(events[i].Node.GeoInfo.ResourcePartition)) events[i].SetCheckpoint(metrics.Distributor_Received) if loc != nil { eventsToProcess[i] = node.NewManagedNodeEvent(events[i], loc) diff --git a/resource-management/pkg/distributor/distributor_concurrency_test.go b/resource-management/pkg/distributor/distributor_concurrency_test.go index 0ba95d0c..3073f6ba 100644 --- a/resource-management/pkg/distributor/distributor_concurrency_test.go +++ b/resource-management/pkg/distributor/distributor_concurrency_test.go @@ -9,8 +9,6 @@ import ( "time" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" - "global-resource-service/resource-management/pkg/common-lib/types/location" ) func TestSingleRPMutipleClients_Workflow(t *testing.T) { @@ -104,7 +102,7 @@ func TestSingleRPMutipleClients_Workflow(t *testing.T) { // check each node event nodeIds := make(map[string]bool) for _, node := range nodes { - nodeLoc := types.RvLocation{Region: location.Region(node.GeoInfo.Region), Partition: location.ResourcePartition(node.GeoInfo.ResourcePartition)} + nodeLoc := types.RvLocation{Region: types.Region(node.GeoInfo.Region), Partition: types.ResourcePartition(node.GeoInfo.ResourcePartition)} assert.NotNil(t, nodeLoc) assert.True(t, latestRVs[nodeLoc] >= node.GetResourceVersionInt64()) if _, isOK := nodeIds[node.Id]; isOK { @@ -121,7 +119,7 @@ func TestSingleRPMutipleClients_Workflow(t *testing.T) { allWaitGroup := new(sync.WaitGroup) start = time.Now() for i := 0; i < tt.clientNum; i++ { - watchCh := make(chan *event.NodeEvent) + watchCh := make(chan *types.NodeEvent) err := distributor.Watch(clientIds[i], latestRVsByClient[i], watchCh, stopCh) if err != nil { assert.Fail(t, "Encountered error while building watch connection.", "Encountered error while building watch connection. Error %v", err) @@ -129,11 +127,11 @@ func TestSingleRPMutipleClients_Workflow(t *testing.T) { } allWaitGroup.Add(1) - go func(expectedEventCount int, watchCh chan *event.NodeEvent, wg *sync.WaitGroup) { + go func(expectedEventCount int, watchCh chan *types.NodeEvent, wg *sync.WaitGroup) { eventCount := 0 for e := range watchCh { - assert.Equal(t, event.Modified, e.Type) + assert.Equal(t, types.Modified, e.Type) eventCount++ if eventCount >= expectedEventCount { @@ -148,13 +146,13 @@ func TestSingleRPMutipleClients_Workflow(t *testing.T) { for i := 0; i < tt.clientNum; i++ { go func(expectedEventCount int, nodes []*types.LogicalNode, clientId string) { for j := 0; j < expectedEventCount/len(nodes)+2; j++ { - updateNodeEvents := make([]*event.NodeEvent, len(nodes)) + updateNodeEvents := make([]*types.NodeEvent, len(nodes)) for k := 0; k < len(nodes); k++ { rvToGenerate += 1 newNode := nodes[k].Copy() newNode.ResourceVersion = strconv.Itoa(rvToGenerate) - updateNodeEvents[k] = event.NewNodeEvent(newNode, event.Modified) + updateNodeEvents[k] = types.NewNodeEvent(newNode, types.Modified) } result, rvMap := distributor.ProcessEvents(updateNodeEvents) assert.True(t, result) @@ -218,13 +216,13 @@ func TestMultipleRPsMutipleClients_Workflow(t *testing.T) { defer tearDown() // create nodes - eventsAdd := make([][][]*event.NodeEvent, tt.regionNum) + eventsAdd := make([][][]*types.NodeEvent, tt.regionNum) for i := 0; i < tt.regionNum; i++ { - regionName := location.Regions[i] - eventsAdd[i] = make([][]*event.NodeEvent, tt.rpNum) + regionName := types.Regions[i] + eventsAdd[i] = make([][]*types.NodeEvent, tt.rpNum) for j := 0; j < tt.rpNum; j++ { - rpName := location.ResourcePartitions[j] - loc := location.NewLocation(regionName, rpName) + rpName := types.ResourcePartitions[j] + loc := types.NewLocation(regionName, rpName) eventsAdd[i][j] = generateAddNodeEvent(tt.hostPerRP, loc) } @@ -236,7 +234,7 @@ func TestMultipleRPsMutipleClients_Workflow(t *testing.T) { start := time.Now() for i := 0; i < tt.regionNum; i++ { for j := 0; j < tt.rpNum; j++ { - go func(done *sync.WaitGroup, events []*event.NodeEvent) { + go func(done *sync.WaitGroup, events []*types.NodeEvent) { result, rvMap := distributor.ProcessEvents(events) done.Done() assert.True(t, result) @@ -289,7 +287,7 @@ func TestMultipleRPsMutipleClients_Workflow(t *testing.T) { // check each node event nodeIds := make(map[string]bool) for _, node := range nodes { - nodeLoc := types.RvLocation{Region: location.Region(node.GeoInfo.Region), Partition: location.ResourcePartition(node.GeoInfo.ResourcePartition)} + nodeLoc := types.RvLocation{Region: types.Region(node.GeoInfo.Region), Partition: types.ResourcePartition(node.GeoInfo.ResourcePartition)} assert.NotNil(t, nodeLoc) assert.True(t, latestRVs[nodeLoc] >= node.GetResourceVersionInt64()) if _, isOK := nodeIds[node.Id]; isOK { @@ -308,7 +306,7 @@ func TestMultipleRPsMutipleClients_Workflow(t *testing.T) { allWaitGroup := new(sync.WaitGroup) start = time.Now() for i := 0; i < tt.clientNum; i++ { - watchCh := make(chan *event.NodeEvent) + watchCh := make(chan *types.NodeEvent) stopCh := make(chan struct{}) err := distributor.Watch(clientIds[i], latestRVsByClient[i], watchCh, stopCh) if err != nil { @@ -317,11 +315,11 @@ func TestMultipleRPsMutipleClients_Workflow(t *testing.T) { } allWaitGroup.Add(1) - go func(expectedEventCount int, watchCh chan *event.NodeEvent, wg *sync.WaitGroup) { + go func(expectedEventCount int, watchCh chan *types.NodeEvent, wg *sync.WaitGroup) { eventCount := 0 for e := range watchCh { - assert.Equal(t, event.Modified, e.Type) + assert.Equal(t, types.Modified, e.Type) eventCount++ if eventCount >= expectedEventCount { @@ -341,12 +339,12 @@ func TestMultipleRPsMutipleClients_Workflow(t *testing.T) { eventCount := 0 for j := 0; j < expectedEventCount/len(nodes)+2; j++ { - updateNodeEvents := make([]*event.NodeEvent, len(nodes)) + updateNodeEvents := make([]*types.NodeEvent, len(nodes)) for k := 0; k < len(nodes); k++ { rvToGenerate += 1 newNode := nodes[k].Copy() newNode.ResourceVersion = strconv.Itoa(rvToGenerate) - updateNodeEvents[k] = event.NewNodeEvent(newNode, event.Modified) + updateNodeEvents[k] = types.NewNodeEvent(newNode, types.Modified) eventCount++ if eventCount >= expectedEventCount { @@ -460,7 +458,7 @@ func TestProcessEvents_TwoRPs_AddNodes_Sequential(t *testing.T) { // generate add node events for i := 0; i < len(nodeCounts); i++ { eventsAdd1 := generateAddNodeEvent(nodeCounts[i], defaultLocBeijing_RP1) - eventsAdd2 := generateAddNodeEvent(nodeCounts[i], location.NewLocation(location.Shanghai, location.ResourcePartition2)) + eventsAdd2 := generateAddNodeEvent(nodeCounts[i], types.NewLocation(types.Shanghai, types.ResourcePartition2)) start := time.Now() distributor.ProcessEvents(eventsAdd1) _, rvMap := distributor.ProcessEvents(eventsAdd2) @@ -559,18 +557,18 @@ func TestProcessEvents_TwoRPs_Concurrent(t *testing.T) { // generate add node events for i := 0; i < len(nodeCounts); i++ { eventsAdd1 := generateAddNodeEvent(nodeCounts[i], defaultLocBeijing_RP1) - eventsAdd2 := generateAddNodeEvent(nodeCounts[i], location.NewLocation(location.Shanghai, location.ResourcePartition2)) + eventsAdd2 := generateAddNodeEvent(nodeCounts[i], types.NewLocation(types.Shanghai, types.ResourcePartition2)) start := time.Now() wg := new(sync.WaitGroup) wg.Add(2) - go func(done *sync.WaitGroup, eventsToProcess []*event.NodeEvent) { + go func(done *sync.WaitGroup, eventsToProcess []*types.NodeEvent) { distributor.ProcessEvents(eventsToProcess) done.Done() }(wg, eventsAdd1) - go func(done *sync.WaitGroup, eventsToProcess []*event.NodeEvent) { + go func(done *sync.WaitGroup, eventsToProcess []*types.NodeEvent) { distributor.ProcessEvents(eventsToProcess) done.Done() }(wg, eventsAdd2) diff --git a/resource-management/pkg/distributor/distributor_test.go b/resource-management/pkg/distributor/distributor_test.go index 45f5e456..b32bc5ab 100644 --- a/resource-management/pkg/distributor/distributor_test.go +++ b/resource-management/pkg/distributor/distributor_test.go @@ -9,8 +9,6 @@ import ( "time" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" - "global-resource-service/resource-management/pkg/common-lib/types/location" "global-resource-service/resource-management/pkg/distributor/cache" "global-resource-service/resource-management/pkg/distributor/storage" ) @@ -20,9 +18,9 @@ var rvToGenerate = 0 var singleTestLock = sync.Mutex{} -var defaultLocBeijing_RP1 = location.NewLocation(location.Beijing, location.ResourcePartition1) -var defaultRegion = location.Beijing -var defaultPartition = location.ResourcePartition1 +var defaultLocBeijing_RP1 = types.NewLocation(types.Beijing, types.ResourcePartition1) +var defaultRegion = types.Beijing +var defaultPartition = types.ResourcePartition1 const defaultVirtualStoreNumPerRP = 200 // 10K per resource partition, 50 hosts per virtual node store @@ -74,7 +72,7 @@ func TestDistributorInit(t *testing.T) { assert.NotEqual(t, lowerBound, upperBound, "Expecting lower bound not equal to upper bound for virtual store %d. Got hash range (%f, %f]", i, lowerBound, upperBound) lower = upperBound if i == len(*defaultNodeStores)-1 { - assert.Equal(t, location.RingRange, upperBound, "Expecting last virtual store upper bound equals %f but got %f", location.RingRange, upperBound) + assert.Equal(t, types.RingRange, upperBound, "Expecting last virtual store upper bound equals %f but got %f", types.RingRange, upperBound) } loc := store.GetLocation() @@ -85,7 +83,7 @@ func TestDistributorInit(t *testing.T) { } } -func measureProcessEvent(t *testing.T, dis *ResourceDistributor, eventType string, events []*event.NodeEvent, previousNodeCount int) { +func measureProcessEvent(t *testing.T, dis *ResourceDistributor, eventType string, events []*types.NodeEvent, previousNodeCount int) { // get all node ids nodeIds := make(map[string]bool, len(events)) eventCount := 0 @@ -193,19 +191,19 @@ func TestAddNodes(t *testing.T) { } } -func generateAddNodeEvent(eventNum int, loc *location.Location) []*event.NodeEvent { - result := make([]*event.NodeEvent, eventNum) +func generateAddNodeEvent(eventNum int, loc *types.Location) []*types.NodeEvent { + result := make([]*types.NodeEvent, eventNum) for i := 0; i < eventNum; i++ { rvToGenerate += 1 node := createRandomNode(rvToGenerate, loc) - nodeEvent := event.NewNodeEvent(node, event.Added) + nodeEvent := types.NewNodeEvent(node, types.Added) result[i] = nodeEvent } return result } -func generateUpdateNodeEvents(originalEvents []*event.NodeEvent) []*event.NodeEvent { - result := make([]*event.NodeEvent, len(originalEvents)) +func generateUpdateNodeEvents(originalEvents []*types.NodeEvent) []*types.NodeEvent { + result := make([]*types.NodeEvent, len(originalEvents)) for i := 0; i < len(originalEvents); i++ { rvToGenerate += 1 @@ -218,25 +216,25 @@ func generateUpdateNodeEvents(originalEvents []*event.NodeEvent) []*event.NodeEv }, } - newEvent := event.NewNodeEvent(lNode, event.Modified) + newEvent := types.NewNodeEvent(lNode, types.Modified) result[i] = newEvent } return result } -func generatedUpdateNodeEventsFromNodeList(nodes []*types.LogicalNode) []*event.NodeEvent { - result := make([]*event.NodeEvent, len(nodes)) +func generatedUpdateNodeEventsFromNodeList(nodes []*types.LogicalNode) []*types.NodeEvent { + result := make([]*types.NodeEvent, len(nodes)) for i := 0; i < len(nodes); i++ { rvToGenerate += 1 node := nodes[i].Copy() node.ResourceVersion = strconv.Itoa(rvToGenerate) - newEvent := event.NewNodeEvent(node, event.Modified) + newEvent := types.NewNodeEvent(node, types.Modified) result[i] = newEvent } return result } -func createRandomNode(rv int, loc *location.Location) *types.LogicalNode { +func createRandomNode(rv int, loc *types.Location) *types.LogicalNode { id := uuid.New() return &types.LogicalNode{ Id: id.String(), @@ -374,7 +372,7 @@ func TestRegistrationWorkflow(t *testing.T) { // check each node event nodeIds := make(map[string]bool) for _, node := range nodes { - nodeLoc := types.RvLocation{Region: location.Region(node.GeoInfo.Region), Partition: location.ResourcePartition(node.GeoInfo.ResourcePartition)} + nodeLoc := types.RvLocation{Region: types.Region(node.GeoInfo.Region), Partition: types.ResourcePartition(node.GeoInfo.ResourcePartition)} assert.NotNil(t, nodeLoc) assert.True(t, latestRVs[nodeLoc] >= node.GetResourceVersionInt64()) if _, isOK := nodeIds[node.Id]; isOK { @@ -390,13 +388,13 @@ func TestRegistrationWorkflow(t *testing.T) { updateNodeEvents := generatedUpdateNodeEventsFromNodeList(nodes) result2, rvMap2 := distributor.ProcessEvents(updateNodeEvents) assert.True(t, result2, "Expecting update nodes successfully") - loc := types.RvLocation{Region: location.Region(nodes[0].GeoInfo.Region), Partition: location.ResourcePartition(nodes[0].GeoInfo.ResourcePartition)} + loc := types.RvLocation{Region: types.Region(nodes[0].GeoInfo.Region), Partition: types.ResourcePartition(nodes[0].GeoInfo.ResourcePartition)} assert.Equal(t, uint64(rvToGenerate), rvMap2[loc]) assert.Equal(t, oldNodeRV, nodes[0].GetResourceVersionInt64(), "Expecting listed nodes are snapshoted and cannot be affected by update") // client watch node update - watchCh := make(chan *event.NodeEvent) + watchCh := make(chan *types.NodeEvent) stopCh := make(chan struct{}) err = distributor.Watch(clientId, latestRVs, watchCh, stopCh) if err != nil { @@ -405,8 +403,8 @@ func TestRegistrationWorkflow(t *testing.T) { } watchedEventCount := 0 for e := range watchCh { - assert.Equal(t, event.Modified, e.Type) - nodeLoc := types.RvLocation{Region: location.Region(e.Node.GeoInfo.Region), Partition: location.ResourcePartition(e.Node.GeoInfo.ResourcePartition)} + assert.Equal(t, types.Modified, e.Type) + nodeLoc := types.RvLocation{Region: types.Region(e.Node.GeoInfo.Region), Partition: types.ResourcePartition(e.Node.GeoInfo.ResourcePartition)} assert.Equal(t, loc, nodeLoc) watchedEventCount++ @@ -449,7 +447,7 @@ func TestWatchRenewal(t *testing.T) { // check each node event nodeIds := make(map[string]bool) for _, node := range nodes { - nodeLoc := types.RvLocation{Region: location.Region(node.GeoInfo.Region), Partition: location.ResourcePartition(node.GeoInfo.ResourcePartition)} + nodeLoc := types.RvLocation{Region: types.Region(node.GeoInfo.Region), Partition: types.ResourcePartition(node.GeoInfo.ResourcePartition)} assert.NotNil(t, nodeLoc) assert.True(t, latestRVs[nodeLoc] >= node.GetResourceVersionInt64()) if _, isOK := nodeIds[node.Id]; isOK { @@ -459,10 +457,10 @@ func TestWatchRenewal(t *testing.T) { } } assert.Equal(t, len(nodes), len(nodeIds)) - loc := location.NewLocation(location.Region(nodes[0].GeoInfo.Region), location.ResourcePartition(nodes[0].GeoInfo.ResourcePartition)) + loc := types.NewLocation(types.Region(nodes[0].GeoInfo.Region), types.ResourcePartition(nodes[0].GeoInfo.ResourcePartition)) // client watch node update - watchCh := make(chan *event.NodeEvent) + watchCh := make(chan *types.NodeEvent) stopCh := make(chan struct{}) err = distributor.Watch(clientId, latestRVs, watchCh, stopCh) if err != nil { @@ -493,7 +491,7 @@ func TestWatchRenewal(t *testing.T) { t.Logf("Watch renewal .....................") close(stopCh) time.Sleep(100 * time.Millisecond) // note here sleep is necessary. otherwise previous watch channel was not successfully discarded - watchCh2 := make(chan *event.NodeEvent) + watchCh2 := make(chan *types.NodeEvent) stopCh2 := make(chan struct{}) err = distributor.Watch(clientId, rvMap2, watchCh2, stopCh2) if err != nil { @@ -521,12 +519,12 @@ func TestWatchRenewal(t *testing.T) { t.Logf("Latest rvs after updates: %v\n", rvMap3) } -func watch(t *testing.T, wg *sync.WaitGroup, lastRVResult *int, watchedEventCount *int, expectedEventCount int, watchCh chan *event.NodeEvent, loc *location.Location) { +func watch(t *testing.T, wg *sync.WaitGroup, lastRVResult *int, watchedEventCount *int, expectedEventCount int, watchCh chan *types.NodeEvent, loc *types.Location) { go func(wg *sync.WaitGroup, t *testing.T, lastRVResult *int, watchedEventCount *int, expectedEventCount int) { lastRV := int(0) for e := range watchCh { - assert.Equal(t, event.Modified, e.Type) - nodeLoc := location.NewLocation(location.Region(e.Node.GeoInfo.Region), location.ResourcePartition(e.Node.GeoInfo.ResourcePartition)) + assert.Equal(t, types.Modified, e.Type) + nodeLoc := types.NewLocation(types.Region(e.Node.GeoInfo.Region), types.ResourcePartition(e.Node.GeoInfo.ResourcePartition)) assert.Equal(t, loc, nodeLoc) *watchedEventCount++ diff --git a/resource-management/pkg/distributor/node/managedNodeEvent.go b/resource-management/pkg/distributor/node/managedNodeEvent.go index 92ecebc5..62cad335 100644 --- a/resource-management/pkg/distributor/node/managedNodeEvent.go +++ b/resource-management/pkg/distributor/node/managedNodeEvent.go @@ -5,17 +5,15 @@ import ( "strconv" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" - "global-resource-service/resource-management/pkg/common-lib/types/location" ) // TODO - add more fields for minimal node record type ManagedNodeEvent struct { - nodeEvent *event.NodeEvent - loc *location.Location + nodeEvent *types.NodeEvent + loc *types.Location } -func NewManagedNodeEvent(nodeEvent *event.NodeEvent, loc *location.Location) *ManagedNodeEvent { +func NewManagedNodeEvent(nodeEvent *types.NodeEvent, loc *types.Location) *ManagedNodeEvent { return &ManagedNodeEvent{ nodeEvent: nodeEvent, loc: loc, @@ -26,7 +24,7 @@ func (n *ManagedNodeEvent) GetId() string { return n.nodeEvent.Node.Id } -func (n *ManagedNodeEvent) GetLocation() *location.Location { +func (n *ManagedNodeEvent) GetLocation() *types.Location { return n.loc } @@ -44,11 +42,11 @@ func (n *ManagedNodeEvent) GetResourceVersion() uint64 { return rv } -func (n *ManagedNodeEvent) GetEventType() event.EventType { +func (n *ManagedNodeEvent) GetEventType() types.EventType { return n.nodeEvent.Type } -func (n *ManagedNodeEvent) GetNodeEvent() *event.NodeEvent { +func (n *ManagedNodeEvent) GetNodeEvent() *types.NodeEvent { return n.nodeEvent } diff --git a/resource-management/pkg/distributor/storage/nodestore.go b/resource-management/pkg/distributor/storage/nodestore.go index 1a28185c..85f3bbfe 100644 --- a/resource-management/pkg/distributor/storage/nodestore.go +++ b/resource-management/pkg/distributor/storage/nodestore.go @@ -8,8 +8,6 @@ import ( "global-resource-service/resource-management/pkg/common-lib/hash" "global-resource-service/resource-management/pkg/common-lib/interfaces/store" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" - "global-resource-service/resource-management/pkg/common-lib/types/location" "global-resource-service/resource-management/pkg/distributor/cache" "global-resource-service/resource-management/pkg/distributor/node" ) @@ -26,7 +24,7 @@ type VirtualNodeStore struct { upperbound float64 // one virtual store can only have nodes from one resource partition - location location.Location + location types.Location clientId string eventQueue *cache.NodeEventQueue @@ -38,7 +36,7 @@ func (vs *VirtualNodeStore) GetHostNum() int { return len(vs.nodeEventByHash) } -func (vs *VirtualNodeStore) GetLocation() location.Location { +func (vs *VirtualNodeStore) GetLocation() types.Location { return vs.location } @@ -98,7 +96,7 @@ func (vs *VirtualNodeStore) GenerateBookmarkEvent() *node.ManagedNodeEvent { for _, n := range vs.nodeEventByHash { logicalNode := n.CopyNode() - nodeEvent := event.NewNodeEvent(logicalNode, event.Bookmark) + nodeEvent := types.NewNodeEvent(logicalNode, types.Bookmark) return node.NewManagedNodeEvent(nodeEvent, n.GetLocation()) } return nil @@ -146,7 +144,7 @@ func NewNodeStore(vNodeNumPerRP int, regionNum int, partitionMaxNum int) *NodeSt ns := &NodeStore{ virtualNodeNum: totalVirtualNodeNum, vNodeStores: &virtualNodeStores, - granularOfRing: location.RingRange / (float64(totalVirtualNodeNum)), + granularOfRing: types.RingRange / (float64(totalVirtualNodeNum)), regionNum: regionNum, partitionMaxNum: partitionMaxNum, resourceSlots: regionNum * partitionMaxNum, @@ -166,7 +164,7 @@ func (ns *NodeStore) GetCurrentResourceVersions() types.TransitResourceVersionMa for i := 0; i < ns.regionNum; i++ { for j := 0; j < ns.partitionMaxNum; j++ { if ns.currentRVs[i][j] > 0 { - rvMap[types.RvLocation{Region: location.Regions[i], Partition: location.ResourcePartitions[j]}] = ns.currentRVs[i][j] + rvMap[types.RvLocation{Region: types.Regions[i], Partition: types.ResourcePartitions[j]}] = ns.currentRVs[i][j] } } } @@ -203,11 +201,11 @@ func (ns *NodeStore) generateVirtualNodeStores(vNodeNumPerRP int) { vNodeIndex := 0 for k := 0; k < ns.regionNum; k++ { - region := location.Regions[k] - rpsInRegion := location.GetRPsForRegion(region) + region := types.Regions[k] + rpsInRegion := types.GetRPsForRegion(region) for m := 0; m < ns.partitionMaxNum; m++ { - loc := location.NewLocation(region, rpsInRegion[m]) + loc := types.NewLocation(region, rpsInRegion[m]) lowerBound, upperBound := loc.GetArcRangeFromLocation() for i := 0; i < vNodeNumPerRP; i++ { @@ -228,7 +226,7 @@ func (ns *NodeStore) generateVirtualNodeStores(vNodeNumPerRP int) { } } - (*ns.vNodeStores)[ns.virtualNodeNum-1].upperbound = location.RingRange + (*ns.vNodeStores)[ns.virtualNodeNum-1].upperbound = types.RingRange } func (ns *NodeStore) CreateNode(nodeEvent *node.ManagedNodeEvent) { @@ -245,7 +243,7 @@ func (ns *NodeStore) UpdateNode(nodeEvent *node.ManagedNodeEvent) { } // TODO -func (ns NodeStore) DeleteNode(nodeEvent event.NodeEvent) { +func (ns NodeStore) DeleteNode(nodeEvent types.NodeEvent) { } func (ns *NodeStore) ProcessNodeEvents(nodeEvents []*node.ManagedNodeEvent, persistHelper *DistributorPersistHelper) (bool, types.TransitResourceVersionMap) { @@ -284,9 +282,9 @@ func (ns *NodeStore) ProcessNodeEvents(nodeEvents []*node.ManagedNodeEvent, pers func (ns *NodeStore) processNodeEvent(nodeEvent *node.ManagedNodeEvent) bool { switch nodeEvent.GetEventType() { - case event.Added: + case types.Added: ns.CreateNode(nodeEvent) - case event.Modified: + case types.Modified: ns.UpdateNode(nodeEvent) default: // TODO - action needs to take when non acceptable events happened diff --git a/resource-management/pkg/service-api/endpoints/installer.go b/resource-management/pkg/service-api/endpoints/installer.go index b96463f8..111d688d 100644 --- a/resource-management/pkg/service-api/endpoints/installer.go +++ b/resource-management/pkg/service-api/endpoints/installer.go @@ -14,7 +14,6 @@ import ( store "global-resource-service/resource-management/pkg/common-lib/interfaces/store" "global-resource-service/resource-management/pkg/common-lib/metrics" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" apiTypes "global-resource-service/resource-management/pkg/service-api/types" ) @@ -169,7 +168,7 @@ func (i *Installer) serverWatch(resp http.ResponseWriter, req *http.Request, cli klog.V(3).Infof("Serving watch for client: %s", clientId) // For 630 distributor impl, watchChannel and stopChannel passed into the Watch routine from API layer - watchCh := make(chan *event.NodeEvent, WatchChannelSize) + watchCh := make(chan *types.NodeEvent, WatchChannelSize) stopCh := make(chan struct{}) // Signal the distributor to stop the watch for this client on exit @@ -232,7 +231,7 @@ func (i *Installer) serverWatch(resp http.ResponseWriter, req *http.Request, cli flusher.Flush() } record.SetCheckpoint(metrics.Serializer_Sent) - event.AddLatencyMetricsAllCheckpoints(record) + types.AddLatencyMetricsAllCheckpoints(record) } } } diff --git a/resource-management/pkg/service-api/endpoints/installer_test.go b/resource-management/pkg/service-api/endpoints/installer_test.go index d7242fa6..c5b018f4 100644 --- a/resource-management/pkg/service-api/endpoints/installer_test.go +++ b/resource-management/pkg/service-api/endpoints/installer_test.go @@ -12,7 +12,6 @@ import ( "k8s.io/klog/v2" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" "global-resource-service/resource-management/pkg/distributor" "global-resource-service/resource-management/pkg/distributor/storage" apitypes "global-resource-service/resource-management/pkg/service-api/types" @@ -49,12 +48,12 @@ func createRandomNode(rv int) *types.LogicalNode { } } -func generateAddNodeEvent(eventNum int) []*event.NodeEvent { - result := make([]*event.NodeEvent, eventNum) +func generateAddNodeEvent(eventNum int) []*types.NodeEvent { + result := make([]*types.NodeEvent, eventNum) for i := 0; i < eventNum; i++ { rvToGenerate += 1 node := createRandomNode(rvToGenerate) - nodeEvent := event.NewNodeEvent(node, event.Added) + nodeEvent := types.NewNodeEvent(node, types.Added) result[i] = nodeEvent } return result diff --git a/resource-management/pkg/service-api/types/types.go b/resource-management/pkg/service-api/types/types.go index 2e37e6a9..c964104e 100644 --- a/resource-management/pkg/service-api/types/types.go +++ b/resource-management/pkg/service-api/types/types.go @@ -27,6 +27,6 @@ type ClientRegistrationResponse struct { // NodeList is the list of LogicalNodes returned from Distributor allocated for this client // ResourceVersions are the list of RVs from each RP type ListNodeResponse struct { - NodeList []*types.LogicalNode `json:"node_list",omitempty` + NodeList []*types.LogicalNode `json:"node_list",omitempty` ResourceVersions types.TransitResourceVersionMap `json:"resource_version_map,omitempty"` } diff --git a/resource-management/pkg/store/redis/redis.go b/resource-management/pkg/store/redis/redis.go index c809f118..4940cde6 100644 --- a/resource-management/pkg/store/redis/redis.go +++ b/resource-management/pkg/store/redis/redis.go @@ -4,6 +4,8 @@ import ( "context" "encoding/json" "fmt" + "global-resource-service/resource-management/pkg/common-lib/serializer" + "global-resource-service/resource-management/pkg/common-lib/serializer/protobuf" "time" "k8s.io/klog/v2" @@ -15,8 +17,9 @@ import ( ) type Goredis struct { - client *redis.Client - ctx context.Context + client *redis.Client + ctx context.Context + serializer serializer.Serializer } // Initialize Redis Client @@ -36,8 +39,9 @@ func NewRedisClient() *Goredis { ctx := context.Background() return &Goredis{ - client: client, - ctx: ctx, + client: client, + ctx: ctx, + serializer: protobuf.NewSerializer("foo"), } } @@ -98,7 +102,7 @@ func (gr *Goredis) PersistNodes(logicalNodes []*types.LogicalNode) bool { for _, logicalNode := range logicalNodes { logicalNodeKey := logicalNode.GetKey() - logicalNodeBytes, err := json.Marshal(logicalNode) + logicalNodeBytes, err := gr.serializer.Marshal(logicalNode) if err != nil { klog.Errorf("Error from JSON Marshal for Logical Nodes. error %v", err) @@ -176,7 +180,7 @@ func (gr *Goredis) GetNodes() []*types.LogicalNode { } if err != redis.Nil { - err = json.Unmarshal(value, &logicalNode) + _, err = gr.serializer.Decode(value, &logicalNode) if err != nil { klog.Errorf("Error from JSON Unmarshal for LogicalNode. error %v", err) diff --git a/resource-management/pkg/store/redis/redis_test.go b/resource-management/pkg/store/redis/redis_test.go index 047d7448..585eb39a 100644 --- a/resource-management/pkg/store/redis/redis_test.go +++ b/resource-management/pkg/store/redis/redis_test.go @@ -6,7 +6,6 @@ import ( "global-resource-service/resource-management/pkg/common-lib/interfaces/store" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/location" ) var GR *Goredis @@ -85,7 +84,7 @@ func TestPersistNodes(t *testing.T) { // func TestPersistNodeStoreStatus(t *testing.T) { var CRV = make(types.TransitResourceVersionMap, 1) - testLocation := types.RvLocation{Region: location.Beijing, Partition: location.ResourcePartition1} + testLocation := types.RvLocation{Region: types.Beijing, Partition: types.ResourcePartition1} CRV[testLocation] = 1000 testCase0 := &store.NodeStoreStatus{ @@ -136,7 +135,7 @@ func TestPersistVirtualNodesAssignments(t *testing.T) { vNodeToSave := &store.VirtualNodeConfig{ Lowerbound: 1000.00, Upperbound: 2000.00, - Location: *location.NewLocation(location.Beijing, location.ResourcePartition1), + Location: *types.NewLocation(types.Beijing, types.ResourcePartition1), } vNodeConfigs[0] = vNodeToSave diff --git a/resource-management/test/e2e/singleClientTest.go b/resource-management/test/e2e/singleClientTest.go index ed3445a3..3064e559 100644 --- a/resource-management/test/e2e/singleClientTest.go +++ b/resource-management/test/e2e/singleClientTest.go @@ -13,7 +13,6 @@ import ( "global-resource-service/resource-management/pkg/clientSdk/tools/cache" utilruntime "global-resource-service/resource-management/pkg/clientSdk/util/runtime" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" "global-resource-service/resource-management/test/e2e/stats" ) @@ -184,17 +183,17 @@ func watchNodes(client rmsclient.RmsInterface, clientId string, crv types.Transi klog.Infof("End of results") return } - watchDelay := time.Now().UTC().Sub(record.Node.LastUpdatedTime) + watchDelay := time.Now().UTC().Sub(record.Node.LastUpdatedTime.Time) addWatchLatency(watchDelay, watchStats) logIfProlonged(&record, watchDelay, watchStats) switch record.Type { - case event.Added: + case types.Added: store.Add(*record.Node) watchStats.NumberOfAddedNodes++ - case event.Modified: + case types.Modified: store.Update(*record.Node) watchStats.NumberOfUpdatedNodes++ - case event.Deleted: + case types.Deleted: store.Delete(*record.Node) watchStats.NumberOfDeletedNodes++ @@ -220,7 +219,7 @@ func addWatchLatency(delay time.Duration, ws *stats.WatchStats) { //ws.WatchDelayLock.Unlock() } -func logIfProlonged(record *event.NodeEvent, delay time.Duration, ws *stats.WatchStats) { +func logIfProlonged(record *types.NodeEvent, delay time.Duration, ws *stats.WatchStats) { if delay > stats.LongWatchThreshold { klog.Warningf("Prolonged watch node from server: %v with time (%v)", record.Node.Id, delay) ws.NumberOfProlongedItems++ diff --git a/resource-management/test/resourceRegionMgrSimulator/data/regionNodeEvents.go b/resource-management/test/resourceRegionMgrSimulator/data/regionNodeEvents.go index d7440c62..ed5aabb4 100644 --- a/resource-management/test/resourceRegionMgrSimulator/data/regionNodeEvents.go +++ b/resource-management/test/resourceRegionMgrSimulator/data/regionNodeEvents.go @@ -9,9 +9,6 @@ import ( "k8s.io/klog/v2" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" - "global-resource-service/resource-management/pkg/common-lib/types/location" - simulatorTypes "global-resource-service/resource-management/test/resourceRegionMgrSimulator/types" ) @@ -36,7 +33,7 @@ const atEachMin10 = 10 // func Init(regionName string, rpNum, nodesPerRP int) { RegionNodeEventsList = generateAddedNodeEvents(regionName, rpNum, nodesPerRP) - RegionId = int(location.GetRegionFromRegionName(regionName)) + RegionId = int(types.GetRegionFromRegionName(regionName)) RpNum = rpNum NodesPerRP = nodesPerRP } @@ -94,21 +91,21 @@ func GetRegionNodeModifiedEventsCRV(rvs types.TransitResourceVersionMap) (simula var count uint64 = 0 for j := 0; j < RpNum; j++ { - pulledNodeListEventsPerRP := make([]*event.NodeEvent, NodesPerRP) + pulledNodeListEventsPerRP := types.RpNodeEvents{NodeEvents: make([]*types.NodeEvent, NodesPerRP)} indexPerRP := 0 for i := 0; i < NodesPerRP; i++ { - region := snapshotNodeListEvents[j][i].Node.GeoInfo.Region - rp := snapshotNodeListEvents[j][i].Node.GeoInfo.ResourcePartition - loc := types.RvLocation{Region: location.Region(region), Partition: location.ResourcePartition(rp)} + region := snapshotNodeListEvents[j].NodeEvents[i].Node.GeoInfo.Region + rp := snapshotNodeListEvents[j].NodeEvents[i].Node.GeoInfo.ResourcePartition + loc := types.RvLocation{Region: types.Region(region), Partition: types.ResourcePartition(rp)} - if snapshotNodeListEvents[j][i].Node.GetResourceVersionInt64() > rvs[loc] { + if snapshotNodeListEvents[j].NodeEvents[i].Node.GetResourceVersionInt64() > rvs[loc] { count += 1 - pulledNodeListEventsPerRP[indexPerRP] = snapshotNodeListEvents[j][i] + pulledNodeListEventsPerRP.NodeEvents[indexPerRP] = snapshotNodeListEvents[j].NodeEvents[i] indexPerRP += 1 } } - pulledNodeListEvents[j] = pulledNodeListEventsPerRP[:indexPerRP] + pulledNodeListEvents[j].NodeEvents = pulledNodeListEventsPerRP.NodeEvents[0:indexPerRP] } klog.V(9).Infof("Total (%v) Modified events are to be pulled", count) @@ -122,22 +119,22 @@ func GetRegionNodeModifiedEventsCRV(rvs types.TransitResourceVersionMap) (simula // This function is used to initialize the region node added event // func generateAddedNodeEvents(regionName string, rpNum, nodesPerRP int) simulatorTypes.RegionNodeEvents { - regionId := location.GetRegionFromRegionName(regionName) + regionId := types.GetRegionFromRegionName(regionName) eventsAdd := make(simulatorTypes.RegionNodeEvents, rpNum) for j := 0; j < rpNum; j++ { - rpName := location.ResourcePartitions[j] - loc := location.NewLocation(regionId, rpName) + rpName := types.ResourcePartitions[j] + loc := types.NewLocation(regionId, rpName) // Initialize the resource version starting from 0 for each RP var rvToGenerateRPs = 0 - eventsAdd[j] = make([]*event.NodeEvent, nodesPerRP) + eventsAdd[j] = types.RpNodeEvents{NodeEvents: make([]*types.NodeEvent, nodesPerRP)} for i := 0; i < nodesPerRP; i++ { rvToGenerateRPs += 1 node := createRandomNode(rvToGenerateRPs, loc) - nodeEvent := event.NewNodeEvent(node, event.Added) - eventsAdd[j][i] = nodeEvent + nodeEvent := types.NewNodeEvent(node, types.Added) + eventsAdd[j].NodeEvents[i] = nodeEvent } } @@ -153,9 +150,9 @@ func makeOneRPDown() { // Search the nodes in the RP to get the highestRV var highestRVForRP uint64 = 0 - length := len(eventsPerRP) + length := len(eventsPerRP.NodeEvents) for k := 0; k < length; k++ { - currentResourceVersion := eventsPerRP[k].Node.GetResourceVersionInt64() + currentResourceVersion := eventsPerRP.NodeEvents[k].Node.GetResourceVersionInt64() if highestRVForRP < currentResourceVersion { highestRVForRP = currentResourceVersion } @@ -166,14 +163,14 @@ func makeOneRPDown() { for i := 0; i < NodesPerRP; i++ { // reset the version of node with the current rvToGenerateRPs - node := eventsPerRP[i].Node + node := eventsPerRP.NodeEvents[i].Node node.ResourceVersion = strconv.FormatUint(rvToGenerateRPs, 10) // record the time to change resource version in resource partition - node.LastUpdatedTime = time.Now().UTC() + node.LastUpdatedTime = types.NewTime(time.Now().UTC()) - newEvent := event.NewNodeEvent(node, event.Modified) - RegionNodeEventsList[selectedRP][i] = newEvent + newEvent := types.NewNodeEvent(node, types.Modified) + RegionNodeEventsList[selectedRP].NodeEvents[i] = newEvent rvToGenerateRPs++ } @@ -194,9 +191,9 @@ func makeDataUpdate(changesThreshold int) { // Search the nodes in the RP to get the highestRV var highestRVForRP uint64 = 0 - length := len(eventsPerRP) + length := len(eventsPerRP.NodeEvents) for k := 0; k < length; k++ { - currentResourceVersion := eventsPerRP[k].Node.GetResourceVersionInt64() + currentResourceVersion := eventsPerRP.NodeEvents[k].Node.GetResourceVersionInt64() if highestRVForRP < currentResourceVersion { highestRVForRP = currentResourceVersion } @@ -208,7 +205,7 @@ func makeDataUpdate(changesThreshold int) { for count < nodeChangesPerRP { // Randonly create data update per RP node events list i := int(rand.Intn(length)) - node := eventsPerRP[i].Node + node := eventsPerRP.NodeEvents[i].Node // special case: Consider 5000 changes per RP for 500 nodes per RP // each node has 10 changes within this cycle @@ -219,10 +216,10 @@ func makeDataUpdate(changesThreshold int) { node.ResourceVersion = strconv.FormatUint(currentResourceVersion+1, 10) } // record the time to change resource version in resource partition - node.LastUpdatedTime = time.Now().UTC() + node.LastUpdatedTime = types.NewTime(time.Now().UTC()) - newEvent := event.NewNodeEvent(node, event.Modified) - RegionNodeEventsList[j][i] = newEvent + newEvent := types.NewNodeEvent(node, types.Modified) + RegionNodeEventsList[j].NodeEvents[i] = newEvent count++ } @@ -233,7 +230,7 @@ func makeDataUpdate(changesThreshold int) { // Create logical node with random UUID // -func createRandomNode(rv int, loc *location.Location) *types.LogicalNode { +func createRandomNode(rv int, loc *types.Location) *types.LogicalNode { id := uuid.New() return &types.LogicalNode{ @@ -258,7 +255,7 @@ func createRandomNode(rv int, loc *location.Location) *types.LogicalNode { MilliCPU: int64(rand.Intn(200) + 20), Memory: int64(rand.Intn(2000)), EphemeralStorage: int64(rand.Intn(2000000)), - AllowedPodNumber: int(rand.Intn(20000000)), + AllowedPodNumber: int32(rand.Intn(20000000)), ScalarResources: map[types.ResourceName]int64{ "GPU": int64(rand.Intn(200)), "FPGA": int64(rand.Intn(200)), @@ -267,6 +264,6 @@ func createRandomNode(rv int, loc *location.Location) *types.LogicalNode { Conditions: 111, Reserved: false, MachineType: types.NodeMachineType(id.String() + "-highend"), - LastUpdatedTime: time.Now().UTC(), + LastUpdatedTime: types.NewTime(time.Now().UTC()), } } diff --git a/resource-management/test/resourceRegionMgrSimulator/handlers/regionNodeEvents.go b/resource-management/test/resourceRegionMgrSimulator/handlers/regionNodeEvents.go index f6ccf60c..5dd3f6f0 100644 --- a/resource-management/test/resourceRegionMgrSimulator/handlers/regionNodeEvents.go +++ b/resource-management/test/resourceRegionMgrSimulator/handlers/regionNodeEvents.go @@ -5,16 +5,24 @@ import ( "k8s.io/klog/v2" + "global-resource-service/resource-management/pkg/common-lib/serializer" + "global-resource-service/resource-management/pkg/common-lib/serializer/protobuf" + "global-resource-service/resource-management/pkg/common-lib/types" "global-resource-service/resource-management/test/resourceRegionMgrSimulator/data" simulatorTypes "global-resource-service/resource-management/test/resourceRegionMgrSimulator/types" ) -type RegionNodeEventHandler struct{} +type RegionNodeEventHandler struct { + serializer serializer.Serializer +} // NewRegionNodeEvents creates a Region Node Events handler with the given logger // func NewRegionNodeEventsHander() *RegionNodeEventHandler { - return &RegionNodeEventHandler{} + return &RegionNodeEventHandler{ + // serializer: localJson.NewSerializer("foo", false), + serializer: protobuf.NewSerializer("foo"), + } } func (re *RegionNodeEventHandler) SimulatorHandler(rw http.ResponseWriter, r *http.Request) { @@ -68,7 +76,7 @@ func (re *RegionNodeEventHandler) SimulatorHandler(rw http.ResponseWriter, r *ht // Process initpull or subsequentpull request // if r.URL.Path == InitPullPath || r.URL.Path == SubsequentPullPath { - var nodeEvents simulatorTypes.RegionNodeEvents + var nodeEvents []types.RpNodeEvents var count uint64 if r.URL.Path == InitPullPath { @@ -78,19 +86,19 @@ func (re *RegionNodeEventHandler) SimulatorHandler(rw http.ResponseWriter, r *ht } if count == 0 { - klog.V(6).Info("Pulling Region Node Events with batch is in the end") + klog.V(9).Info("Pulling Region Node Events with batch is in the end") + return } else { klog.V(6).Infof("Pulling Region Node Event with final batch size (%v) for (%v) RPs", count, len(nodeEvents)) } - response := &simulatorTypes.ResponseFromRRM{ + response := &types.ResponseFromRRM{ RegionNodeEvents: nodeEvents, - RvMap: aggregatorClientReq.CRV, Length: uint64(count), } // Serialize region node events result to JSON - err = response.ToJSON(rw) + err = re.serializer.Encode(response, rw) if err != nil { klog.Errorf("Error - Unable to marshal json : ", err) diff --git a/resource-management/test/resourceRegionMgrSimulator/types/types.go b/resource-management/test/resourceRegionMgrSimulator/types/types.go index 915b8754..dd03d00b 100644 --- a/resource-management/test/resourceRegionMgrSimulator/types/types.go +++ b/resource-management/test/resourceRegionMgrSimulator/types/types.go @@ -5,21 +5,12 @@ import ( "io" "global-resource-service/resource-management/pkg/common-lib/types" - "global-resource-service/resource-management/pkg/common-lib/types/event" ) -type RegionNodeEvents [][]*event.NodeEvent +type RegionNodeEvents []types.RpNodeEvents type PostCRVstatus bool -// RRM: Resource Region Manager -// -type ResponseFromRRM struct { - RegionNodeEvents [][]*event.NodeEvent - RvMap types.TransitResourceVersionMap - Length uint64 -} - // The type is for pulling data with batch from RRM - Resource Region Manager // type PullDataFromRRM struct { @@ -50,12 +41,6 @@ func (p *RegionNodeEvents) ToJSON(w io.Writer) error { return e.Encode(p) } -func (p *ResponseFromRRM) ToJSON(w io.Writer) error { - e := json.NewEncoder(w) - - return e.Encode(p) -} - func (p *PostCRVstatus) ToJSON(w io.Writer) error { e := json.NewEncoder(w)