diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go index 01bdec105d..1a7751113b 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go @@ -512,6 +512,9 @@ func TestGraphQLDataSourceFederation(t *testing.T) { DataSource: &Source{}, PostProcessing: DefaultPostProcessingConfiguration, }, + Info: &resolve.FetchInfo{ + DataSourceID: "user.service", + }, }, Fields: []*resolve.Field{ { @@ -589,7 +592,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, Fetch: &resolve.SingleFetch{ - SerialID: 1, + SerialID: 1, + Info: &resolve.FetchInfo{ + DataSourceID: "account.service", + }, DataSourceIdentifier: []byte("graphql_datasource.Source"), FetchConfiguration: resolve.FetchConfiguration{ Input: `{"method":"POST","url":"http://account.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){__typename ... on Account {name shippingInfo {zip}}}}","variables":{"representations":[$$0$$]}}}`, diff --git a/v2/pkg/engine/datasource/httpclient/httpclient.go b/v2/pkg/engine/datasource/httpclient/httpclient.go index accd863c92..ec9060c647 100644 --- a/v2/pkg/engine/datasource/httpclient/httpclient.go +++ b/v2/pkg/engine/datasource/httpclient/httpclient.go @@ -32,6 +32,7 @@ const ( UNDEFINED_VARIABLES = "undefined" FORWARDED_CLIENT_HEADER_NAMES = "forwarded_client_header_names" FORWARDED_CLIENT_HEADER_REGULAR_EXPRESSIONS = "forwarded_client_header_regular_expressions" + TRACE = "__trace__" ) var ( @@ -41,6 +42,7 @@ var ( {BODY}, {HEADER}, {QUERYPARAMS}, + {TRACE}, } subscriptionInputPaths = [][]string{ {URL}, @@ -208,7 +210,7 @@ func SetInputPath(input, path []byte) []byte { return out } -func requestInputParams(input []byte) (url, method, body, headers, queryParams []byte) { +func requestInputParams(input []byte) (url, method, body, headers, queryParams []byte, trace bool) { jsonparser.EachKey(input, func(i int, bytes []byte, valueType jsonparser.ValueType, err error) { switch i { case 0: @@ -221,6 +223,8 @@ func requestInputParams(input []byte) (url, method, body, headers, queryParams [ headers = bytes case 4: queryParams = bytes + case 5: + trace = bytes[0] == 't' } }, inputPaths...) return diff --git a/v2/pkg/engine/datasource/httpclient/nethttpclient.go b/v2/pkg/engine/datasource/httpclient/nethttpclient.go index 73643791be..05a6e14c7a 100644 --- a/v2/pkg/engine/datasource/httpclient/nethttpclient.go +++ b/v2/pkg/engine/datasource/httpclient/nethttpclient.go @@ -5,6 +5,7 @@ import ( "compress/flate" "compress/gzip" "context" + "encoding/json" "io" "net/http" "time" @@ -40,9 +41,26 @@ var ( } ) +type TraceHTTP struct { + Request TraceHTTPRequest `json:"request"` + Response TraceHTTPResponse `json:"response"` +} + +type TraceHTTPRequest struct { + Method string `json:"method"` + URL string `json:"url"` + Headers http.Header `json:"headers"` +} + +type TraceHTTPResponse struct { + StatusCode int `json:"status_code"` + Status string `json:"status"` + Headers http.Header `json:"headers"` +} + func Do(client *http.Client, ctx context.Context, requestInput []byte, out io.Writer) (err error) { - url, method, body, headers, queryParams := requestInputParams(requestInput) + url, method, body, headers, queryParams, enableTrace := requestInputParams(requestInput) request, err := http.NewRequestWithContext(ctx, string(method), string(url), bytes.NewReader(body)) if err != nil { @@ -113,8 +131,38 @@ func Do(client *http.Client, ctx context.Context, requestInput []byte, out io.Wr return err } - _, err = io.Copy(out, respReader) - return + if !enableTrace { + _, err = io.Copy(out, respReader) + return + } + + buf := &bytes.Buffer{} + _, err = io.Copy(buf, respReader) + if err != nil { + return err + } + responseTrace := TraceHTTP{ + Request: TraceHTTPRequest{ + Method: request.Method, + URL: request.URL.String(), + Headers: request.Header, + }, + Response: TraceHTTPResponse{ + StatusCode: response.StatusCode, + Status: response.Status, + Headers: response.Header, + }, + } + trace, err := json.Marshal(responseTrace) + if err != nil { + return err + } + responseWithTraceExtension, err := jsonparser.Set(buf.Bytes(), trace, "extensions", "trace") + if err != nil { + return err + } + _, err = out.Write(responseWithTraceExtension) + return err } func respBodyReader(resp *http.Response) (io.ReadCloser, error) { diff --git a/v2/pkg/engine/plan/configuration_visitor.go b/v2/pkg/engine/plan/configuration_visitor.go index 640b5605fb..e2d02288a5 100644 --- a/v2/pkg/engine/plan/configuration_visitor.go +++ b/v2/pkg/engine/plan/configuration_visitor.go @@ -60,6 +60,7 @@ type objectFetchConfiguration struct { fieldRef int fieldDefinitionRef int fetchID int + sourceID string } func (c *configurationVisitor) currentSelectionSet() int { @@ -654,6 +655,7 @@ func (c *configurationVisitor) addNewPlanner(ref int, typeName, fieldName, curre fieldRef: ref, fieldDefinitionRef: fieldDefinition, fetchID: fetchID, + sourceID: config.ID, }) c.saveAddedPath(currentPathConfiguration) diff --git a/v2/pkg/engine/plan/schemausageinfo.go b/v2/pkg/engine/plan/schemausageinfo.go index 5807082ec4..749241672f 100644 --- a/v2/pkg/engine/plan/schemausageinfo.go +++ b/v2/pkg/engine/plan/schemausageinfo.go @@ -58,15 +58,7 @@ func (t *TypeFieldUsageInfo) Equals(other TypeFieldUsageInfo) bool { return false } } - if len(t.Source.IDs) != len(other.Source.IDs) { - return false - } - for i := range t.Source.IDs { - if t.Source.IDs[i] != other.Source.IDs[i] { - return false - } - } - return true + return len(t.Source.IDs) == len(other.Source.IDs) } type InputTypeFieldUsageInfo struct { diff --git a/v2/pkg/engine/plan/visitor.go b/v2/pkg/engine/plan/visitor.go index 8f3255be40..7c8dac45ff 100644 --- a/v2/pkg/engine/plan/visitor.go +++ b/v2/pkg/engine/plan/visitor.go @@ -339,6 +339,7 @@ func (v *Visitor) resolveFieldInfo(ref, typeRef int) *resolve.FieldInfo { } sourceIDs := make([]string, 0, 1) + for i := range v.planners { for j := range v.planners[i].paths { if v.planners[i].paths[j].fieldRef == ref { @@ -1038,5 +1039,11 @@ func (v *Visitor) configureFetch(internal objectFetchConfiguration, external res DataSourceIdentifier: []byte(dataSourceType), } + if v.Config.IncludeInfo { + singleFetch.Info = &resolve.FetchInfo{ + DataSourceID: internal.sourceID, + } + } + return singleFetch } diff --git a/v2/pkg/engine/postprocess/datasourcefetch.go b/v2/pkg/engine/postprocess/datasourcefetch.go index 38f61ec14f..04d30150f3 100644 --- a/v2/pkg/engine/postprocess/datasourcefetch.go +++ b/v2/pkg/engine/postprocess/datasourcefetch.go @@ -84,6 +84,7 @@ func (d *DataSourceFetch) createEntityBatchFetch(fetch *resolve.SingleFetch) res } return &resolve.BatchEntityFetch{ + Info: fetch.Info, Input: resolve.BatchInput{ Header: resolve.InputTemplate{ Segments: fetch.InputTemplate.Segments[:representationsVariableIndex], @@ -128,6 +129,7 @@ func (d *DataSourceFetch) createEntityFetch(fetch *resolve.SingleFetch) resolve. } return &resolve.EntityFetch{ + Info: fetch.Info, Input: resolve.EntityInput{ Header: resolve.InputTemplate{ Segments: fetch.InputTemplate.Segments[:representationsVariableIndex], diff --git a/v2/pkg/engine/resolve/context.go b/v2/pkg/engine/resolve/context.go index b698bf8d9b..e9e54d6fa3 100644 --- a/v2/pkg/engine/resolve/context.go +++ b/v2/pkg/engine/resolve/context.go @@ -1,29 +1,17 @@ package resolve import ( - "bytes" "context" "net/http" - "strconv" - - "github.com/wundergraph/graphql-go-tools/v2/internal/pkg/unsafebytes" - "github.com/wundergraph/graphql-go-tools/v2/pkg/lexer/literal" - "github.com/wundergraph/graphql-go-tools/v2/pkg/pool" + "time" ) type Context struct { - ctx context.Context - Variables []byte - Request Request - pathElements [][]byte - responseElements []string - usedBuffers []*bytes.Buffer - pathPrefix []byte - beforeFetchHook BeforeFetchHook - afterFetchHook AfterFetchHook - position Position - RenameTypeNames []RenameTypeName - EnableTracing bool + ctx context.Context + Variables []byte + Request Request + RenameTypeNames []RenameTypeName + RequestTracingOptions RequestTraceOptions } type Request struct { @@ -35,12 +23,7 @@ func NewContext(ctx context.Context) *Context { panic("nil context.Context") } return &Context{ - ctx: ctx, - Variables: make([]byte, 0, 4096), - pathPrefix: make([]byte, 0, 4096), - pathElements: make([][]byte, 0, 16), - usedBuffers: make([]*bytes.Buffer, 0, 48), - position: Position{}, + ctx: ctx, } } @@ -58,116 +41,79 @@ func (c *Context) WithContext(ctx context.Context) *Context { } func (c *Context) clone() Context { - variables := make([]byte, len(c.Variables)) - copy(variables, c.Variables) - pathPrefix := make([]byte, len(c.pathPrefix)) - copy(pathPrefix, c.pathPrefix) - pathElements := make([][]byte, len(c.pathElements)) - for i := range pathElements { - pathElements[i] = make([]byte, len(c.pathElements[i])) - copy(pathElements[i], c.pathElements[i]) - } - return Context{ - ctx: c.ctx, - Variables: variables, - Request: c.Request, - pathElements: pathElements, - usedBuffers: make([]*bytes.Buffer, 0, 48), - pathPrefix: pathPrefix, - beforeFetchHook: c.beforeFetchHook, - afterFetchHook: c.afterFetchHook, - position: c.position, - } + cpy := *c + cpy.ctx = context.Background() + cpy.Variables = append([]byte(nil), c.Variables...) + cpy.Request.Header = c.Request.Header.Clone() + cpy.RenameTypeNames = append([]RenameTypeName(nil), c.RenameTypeNames...) + return cpy } func (c *Context) Free() { c.ctx = nil - c.Variables = c.Variables[:0] - c.pathPrefix = c.pathPrefix[:0] - c.pathElements = c.pathElements[:0] - for i := range c.usedBuffers { - pool.BytesBuffer.Put(c.usedBuffers[i]) - } - c.usedBuffers = c.usedBuffers[:0] - c.beforeFetchHook = nil - c.afterFetchHook = nil + c.Variables = nil c.Request.Header = nil - c.position = Position{} c.RenameTypeNames = nil + c.RequestTracingOptions.DisableAll() } -func (c *Context) SetBeforeFetchHook(hook BeforeFetchHook) { - c.beforeFetchHook = hook -} - -func (c *Context) SetAfterFetchHook(hook AfterFetchHook) { - c.afterFetchHook = hook -} - -func (c *Context) setPosition(position Position) { - c.position = position -} - -func (c *Context) addResponseElements(elements []string) { - c.responseElements = append(c.responseElements, elements...) -} - -func (c *Context) addResponseArrayElements(elements []string) { - c.responseElements = append(c.responseElements, elements...) -} - -func (c *Context) removeResponseLastElements(elements []string) { - c.responseElements = c.responseElements[:len(c.responseElements)-len(elements)] -} -func (c *Context) removeResponseArrayLastElements(elements []string) { - c.responseElements = c.responseElements[:len(c.responseElements)-(len(elements))] -} +type traceStartKey struct{} -func (c *Context) resetResponsePathElements() { - c.responseElements = nil +type TraceInfo struct { + TraceStart time.Time `json:"-"` + TraceStartTime string `json:"trace_start_time"` + TraceStartUnix int64 `json:"trace_start_unix"` + PlannerStats PlannerStats `json:"planner_stats"` + debug bool } -func (c *Context) addPathElement(elem []byte) { - c.pathElements = append(c.pathElements, elem) +type PlannerStats struct { + PlanningTimeNano int64 `json:"planning_time_nanoseconds"` + PlanningTimePretty string `json:"planning_time_pretty"` + DurationSinceStartNano int64 `json:"duration_since_start_nanoseconds"` + DurationSinceStartPretty string `json:"duration_since_start_pretty"` } -func (c *Context) addIntegerPathElement(elem int) { - b := unsafebytes.StringToBytes(strconv.Itoa(elem)) - c.pathElements = append(c.pathElements, b) -} - -func (c *Context) removeLastPathElement() { - c.pathElements = c.pathElements[:len(c.pathElements)-1] -} - -func (c *Context) path() []byte { - buf := pool.BytesBuffer.Get() - c.usedBuffers = append(c.usedBuffers, buf) - if len(c.pathPrefix) != 0 { - buf.Write(c.pathPrefix) +func SetTraceStart(ctx context.Context, predictableDebugTimings bool) context.Context { + info := &TraceInfo{} + if predictableDebugTimings { + info.debug = true + info.TraceStart = time.UnixMilli(0) + info.TraceStartUnix = 0 + info.TraceStartTime = "" } else { - buf.Write(literal.SLASH) - buf.Write(literal.DATA) - } - for i := range c.pathElements { - if i == 0 && bytes.Equal(literal.DATA, c.pathElements[0]) { - continue - } - _, _ = buf.Write(literal.SLASH) - _, _ = buf.Write(c.pathElements[i]) + info.TraceStart = time.Now() + info.TraceStartUnix = info.TraceStart.Unix() + info.TraceStartTime = info.TraceStart.Format(time.RFC3339) } - return buf.Bytes() + return context.WithValue(ctx, traceStartKey{}, info) } -type HookContext struct { - CurrentPath []byte +func GetDurationNanoSinceTraceStart(ctx context.Context) int64 { + info, ok := ctx.Value(traceStartKey{}).(*TraceInfo) + if !ok { + return 0 + } + if info.debug { + return 1 + } + return time.Since(info.TraceStart).Nanoseconds() } -type BeforeFetchHook interface { - OnBeforeFetch(ctx HookContext, input []byte) +func SetPlannerStats(ctx context.Context, stats PlannerStats) { + info, ok := ctx.Value(traceStartKey{}).(*TraceInfo) + if !ok { + return + } + if info.debug { + stats.DurationSinceStartNano = 5 + stats.DurationSinceStartPretty = time.Duration(5).String() + stats.PlanningTimeNano = 5 + stats.PlanningTimePretty = time.Duration(5).String() + } + info.PlannerStats = stats } -type AfterFetchHook interface { - OnData(ctx HookContext, output []byte, singleFlight bool) - OnError(ctx HookContext, output []byte, singleFlight bool) +func GetTraceInfo(ctx context.Context) *TraceInfo { + return ctx.Value(traceStartKey{}).(*TraceInfo) } diff --git a/v2/pkg/engine/resolve/fetch.go b/v2/pkg/engine/resolve/fetch.go index 6c215163c7..785e0ba719 100644 --- a/v2/pkg/engine/resolve/fetch.go +++ b/v2/pkg/engine/resolve/fetch.go @@ -1,5 +1,9 @@ package resolve +import ( + "encoding/json" +) + type FetchKind int const ( @@ -22,6 +26,8 @@ type SingleFetch struct { SerialID int InputTemplate InputTemplate DataSourceIdentifier []byte + Trace *DataSourceLoadTrace + Info *FetchInfo } type PostProcessingConfiguration struct { @@ -78,6 +84,8 @@ type BatchEntityFetch struct { PostProcessing PostProcessingConfiguration DataSourceIdentifier []byte DisallowSingleFlight bool + Trace *DataSourceLoadTrace + Info *FetchInfo } type BatchInput struct { @@ -105,6 +113,8 @@ type EntityFetch struct { PostProcessing PostProcessingConfiguration DataSourceIdentifier []byte DisallowSingleFlight bool + Trace *DataSourceLoadTrace + Info *FetchInfo } type EntityInput struct { @@ -122,7 +132,8 @@ func (_ *EntityFetch) FetchKind() FetchKind { // Usually, you want to batch fetches within a list, which is the default behavior of SingleFetch // However, if the data source does not support batching, you can use this fetch to make parallel fetches within a list type ParallelListItemFetch struct { - Fetch *SingleFetch + Fetch *SingleFetch + Traces []*SingleFetch } func (_ *ParallelListItemFetch) FetchKind() FetchKind { @@ -154,3 +165,21 @@ type FetchConfiguration struct { // Returning null in this case tells the batch implementation to skip this item SetTemplateOutputToNullOnVariableNull bool } + +type FetchInfo struct { + DataSourceID string +} + +type DataSourceLoadTrace struct { + RawInputData json.RawMessage `json:"raw_input_data,omitempty"` + Input json.RawMessage `json:"input,omitempty"` + Output json.RawMessage `json:"output,omitempty"` + LoadError string `json:"error,omitempty"` + DurationSinceStartNano int64 `json:"duration_since_start_nanoseconds,omitempty"` + DurationSinceStartPretty string `json:"duration_since_start_pretty,omitempty"` + DurationLoadNano int64 `json:"duration_load_nanoseconds,omitempty"` + DurationLoadPretty string `json:"duration_load_pretty,omitempty"` + SingleFlightUsed bool `json:"single_flight_used"` + SingleFlightSharedResponse bool `json:"single_flight_shared_response"` + LoadSkipped bool `json:"load_skipped"` +} diff --git a/v2/pkg/engine/resolve/load.go b/v2/pkg/engine/resolve/load.go deleted file mode 100644 index efdf07765c..0000000000 --- a/v2/pkg/engine/resolve/load.go +++ /dev/null @@ -1,1006 +0,0 @@ -package resolve - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strconv" - "sync" - "unsafe" - - "github.com/buger/jsonparser" - "github.com/pkg/errors" - "go.uber.org/multierr" - "golang.org/x/sync/errgroup" - "golang.org/x/sync/singleflight" - - "github.com/wundergraph/graphql-go-tools/v2/pkg/pool" -) - -var ( - ErrOriginResponseError = errors.New("origin response error") -) - -type Loader struct { - layers []*layer - - errors []byte - - sf *singleflight.Group - sfEnabled bool - - buffers []*bytes.Buffer -} - -func (l *Loader) Free() { - for i := range l.buffers { - pool.BytesBuffer.Put(l.buffers[i]) - } - l.buffers = l.buffers[:0] -} - -type resultSet struct { - mu sync.Mutex - data []byte - itemsData [][]byte - mergePath []string - buffers []*bytes.Buffer - errors []byte -} - -func (r *resultSet) getBuffer() *bytes.Buffer { - buf := pool.BytesBuffer.Get() - r.mu.Lock() - r.buffers = append(r.buffers, buf) - r.mu.Unlock() - return buf -} - -type layer struct { - path []string - data []byte - items [][]byte - mapping [][]int - kind layerKind - hasFetches bool - hasResolvedData bool -} - -func (l *layer) itemsSize() int { - size := 0 - for i := range l.items { - size += len(l.items[i]) - } - return size -} - -type layerKind int - -const ( - layerKindObject layerKind = iota + 1 - layerKindArray -) - -func (l *Loader) getBuffer() *bytes.Buffer { - buf := pool.BytesBuffer.Get() - l.buffers = append(l.buffers, buf) - return buf -} - -func (l *Loader) popLayer() { - l.layers = l.layers[:len(l.layers)-1] -} - -func (l *Loader) inputData(layer *layer, out *bytes.Buffer) []byte { - if layer.data != nil || layer.kind == layerKindObject { - return layer.data - } - _, _ = out.Write([]byte(`[`)) - addCommaSeparator := false - for i := range layer.items { - if layer.items[i] == nil { - continue - } - if addCommaSeparator { - _, _ = out.Write([]byte(`,`)) - } else { - addCommaSeparator = true - } - _, _ = out.Write(layer.items[i]) - } - _, _ = out.Write([]byte(`]`)) - return out.Bytes() -} - -func (l *Loader) LoadGraphQLResponseData(ctx *Context, response *GraphQLResponse, data []byte, out io.Writer) (hasErrors bool, err error) { - l.layers = l.layers[:0] - l.errors = l.errors[:0] - l.layers = append(l.layers, &layer{ - data: data, - kind: layerKindObject, - }) - err = l.resolveNode(ctx, response.Data) - if err != nil { - if errors.Is(err, ErrOriginResponseError) { - _, err1 := out.Write([]byte(`{"errors":`)) - _, err2 := out.Write(l.errors) - _, err3 := out.Write([]byte(`}`)) - return true, multierr.Combine(err1, err2, err3) - } - return false, err - } - _, err = out.Write(l.layers[0].data) - return false, err -} - -func (l *Loader) resolveNode(ctx *Context, node Node) (err error) { - switch node := node.(type) { - case *Object: - return l.resolveObject(ctx, node) - case *Array: - return l.resolveArray(ctx, node) - } - return nil -} - -func (l *Loader) insideArray() bool { - return l.currentLayer().kind == layerKindArray -} - -func (l *Loader) currentLayer() *layer { - return l.layers[len(l.layers)-1] -} - -func (l *Loader) currentLayerData() []byte { - return l.layers[len(l.layers)-1].data -} - -func (l *Loader) setCurrentLayerData(data []byte) { - l.layers[len(l.layers)-1].data = data -} - -func (l *Loader) resolveLayerData(path []string, isArray bool) (data []byte, items [][]byte, itemsMapping [][]int, err error) { - current := l.currentLayer() - if !l.insideArray() && !isArray { - data, _, _, err = jsonparser.Get(current.data, path...) - if errors.Is(err, jsonparser.KeyPathNotFoundError) { - // we have no data for this path which is legit - return nil, nil, nil, nil - } - return - } - if current.data != nil { - _, err = jsonparser.ArrayEach(current.data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { - switch dataType { - case jsonparser.String: - // jsonparser.ArrayEach does not return the quotes so we need to add them - items = append(items, current.data[offset-2:offset+len(value)]) - default: - items = append(items, value) - } - }, path...) - return nil, items, nil, errors.WithStack(err) - } - if isArray { - itemsMapping = make([][]int, len(current.items)) - count := 0 - for i := range current.items { - _, _ = jsonparser.ArrayEach(current.items[i], func(value []byte, dataType jsonparser.ValueType, offset int, err error) { - switch dataType { - case jsonparser.String: - // jsonparser.ArrayEach does not return the quotes so we need to add them - items = append(items, current.items[i][offset-2:offset+len(value)]) - default: - items = append(items, value) - } - itemsMapping[i] = append(itemsMapping[i], count) - count++ - }, path...) - } - } else { - for i := range current.items { - data, _, _, _ = jsonparser.Get(current.items[i], path...) - // we explicitly ignore the error and just append a nil slice - items = append(items, data) - } - } - return nil, items, itemsMapping, nil -} - -func (l *Loader) resolveArray(ctx *Context, array *Array) (err error) { - if !array.HasChildFetches() { - return nil - } - data, items, mapping, err := l.resolveLayerData(array.Path, true) - if err != nil { - return errors.WithStack(err) - } - next := &layer{ - path: array.Path, - data: data, - items: items, - mapping: mapping, - kind: layerKindArray, - } - l.layers = append(l.layers, next) - err = l.resolveNode(ctx, array.Item) - if err != nil { - return errors.WithStack(err) - } - err = l.mergeLayerIntoParent() - if err != nil { - return errors.WithStack(err) - } - l.popLayer() - return nil -} - -func (l *Loader) resolveObject(ctx *Context, object *Object) (err error) { - if l.shouldSkipObject(object) { - return nil - } - if len(object.Path) != 0 { - data, items, mapping, err := l.resolveLayerData(object.Path, false) - if err != nil { - return errors.WithStack(err) - } - next := &layer{ - path: object.Path, - data: data, - items: items, - mapping: mapping, - kind: layerKindObject, - } - if l.insideArray() { - next.kind = layerKindArray - } - l.layers = append(l.layers, next) - } - if object.Fetch != nil { - err = l.resolveFetch(ctx, object.Fetch, nil) - if err != nil { - return errors.WithStack(err) - } - } - if l.shouldTraverseObjectChildren(object) { - for i := range object.Fields { - err = l.resolveNode(ctx, object.Fields[i].Value) - if err != nil { - return errors.WithStack(err) - } - } - } - if len(object.Path) != 0 { - err = l.mergeLayerIntoParent() - if err != nil { - return errors.WithStack(err) - } - l.popLayer() - } - return nil -} - -func (l *Loader) shouldSkipObject(object *Object) bool { - if object.Fetch == nil && !object.HasChildFetches() { - return true - } - return false -} - -func (l *Loader) shouldTraverseObjectChildren(object *Object) bool { - if !object.HasChildFetches() { - return false - } - lr := l.currentLayer() - if lr.hasFetches { - return lr.hasResolvedData - } - return true -} - -func (l *Loader) mergeLayerIntoParent() (err error) { - child := l.layers[len(l.layers)-1] - parent := l.layers[len(l.layers)-2] - if child.mapping != nil { - for i, indices := range child.mapping { - buf := l.getBuffer() - _, _ = buf.Write([]byte(`[`)) - for j := range indices { - if j != 0 { - _, _ = buf.Write([]byte(`,`)) - } - _, _ = buf.Write(child.items[indices[j]]) - } - _, _ = buf.Write([]byte(`]`)) - parent.items[i], err = jsonparser.Set(parent.items[i], buf.Bytes(), child.path...) - if err != nil { - return errors.WithStack(err) - } - } - return nil - } - if parent.kind == layerKindObject && child.kind == layerKindObject { - parent.data, err = l.mergeJSONWithMergePath(parent.data, child.data, child.path) - return errors.WithStack(err) - } - if parent.kind == layerKindObject && child.kind == layerKindArray { - buf := l.getBuffer() - _, _ = buf.Write([]byte(`[`)) - addCommaSeparator := false - for i := range child.items { - if child.items[i] == nil { - continue - } - if addCommaSeparator { - _, _ = buf.Write([]byte(`,`)) - } else { - addCommaSeparator = true - } - _, _ = buf.Write(child.items[i]) - } - _, _ = buf.Write([]byte(`]`)) - parent.data, err = jsonparser.Set(parent.data, buf.Bytes(), child.path...) - return errors.WithStack(err) - } - for i := range parent.items { - if child.items[i] == nil { - continue - } - existing, _, _, _ := jsonparser.Get(parent.items[i], child.path...) - combined, err := l.mergeJSON(existing, child.items[i]) - if err != nil { - return errors.WithStack(err) - } - parent.items[i], err = jsonparser.Set(parent.items[i], combined, child.path...) - if err != nil { - return errors.WithStack(err) - } - } - return nil -} - -func (l *Loader) mergeResultErr(res *resultSet) { - if res == nil { - return - } - if res.errors == nil { - return - } - l.errors = res.errors -} - -func (l *Loader) mergeResultSet(res *resultSet) (err error) { - if res == nil { - return nil - } - if res.buffers != nil { - l.buffers = append(l.buffers, res.buffers...) - } - if res.data != nil { - return l.mergeDataIntoLayer(l.currentLayer(), res.data, res.mergePath) - } - if res.itemsData != nil { - lr := l.currentLayer() - before := lr.itemsSize() - for i := range lr.items { - if lr.items[i] == nil { - continue - } - lr.items[i], err = l.mergeJSONWithMergePath(lr.items[i], res.itemsData[i], res.mergePath) - if err != nil { - return errors.WithStack(err) - } - } - after := lr.itemsSize() - if after > before { - lr.hasResolvedData = true - } - } - return nil -} - -func (l *Loader) resolveFetch(ctx *Context, fetch Fetch, res *resultSet) (err error) { - parallel := res != nil - lr := l.currentLayer() - if !parallel { - // would be a data race otherwise - // we already set it to true for the root parallel fetch, so skip is fine - lr.hasFetches = true - } - switch f := fetch.(type) { - case *SingleFetch: - if parallel { - return l.resolveSingleFetch(ctx, f, res) - } - res = &resultSet{} - err = l.resolveSingleFetch(ctx, f, res) - if err != nil { - l.mergeResultErr(res) - return err - } - return l.mergeResultSet(res) - case *SerialFetch: - return l.resolveSerialFetch(ctx, f) - case *ParallelFetch: - return l.resolveParallelFetch(ctx, f) - case *ParallelListItemFetch: - if parallel { - return l.resolveParallelListItemFetch(ctx, f, res) - } - res = &resultSet{} - err = l.resolveParallelListItemFetch(ctx, f, res) - if err != nil { - l.mergeResultErr(res) - return err - } - return l.mergeResultSet(res) - case *EntityFetch: - if parallel { - return l.resolveEntityFetch(ctx, f, res) - } - res = &resultSet{} - err = l.resolveEntityFetch(ctx, f, res) - if err != nil { - l.mergeResultErr(res) - return err - } - return l.mergeResultSet(res) - case *BatchEntityFetch: - if parallel { - return l.resolveBatchEntityFetch(ctx, f, res) - } - res = &resultSet{} - err = l.resolveBatchEntityFetch(ctx, f, res) - if err != nil { - l.mergeResultErr(res) - return err - } - return l.mergeResultSet(res) - } - return nil -} - -func (l *Loader) resolveEntityFetch(ctx *Context, fetch *EntityFetch, res *resultSet) (err error) { - input := res.getBuffer() - out := res.getBuffer() - - lr := l.currentLayer() - err = fetch.Input.Header.Render(ctx, nil, input) - if err != nil { - return errors.WithStack(err) - } - - itemBuf := pool.BytesBuffer.Get() - defer pool.BytesBuffer.Put(itemBuf) - - err = fetch.Input.Item.Render(ctx, lr.data, itemBuf) - if err != nil { - if fetch.Input.SkipErrItem { - err = nil - // skip fetch on render item error - return - } - return errors.WithStack(err) - } - - switch { - case itemBuf.Len() == 4 && bytes.Equal(itemBuf.Bytes(), null): - // skip fetch if item is null - return - case itemBuf.Len() == 2 && bytes.Equal(itemBuf.Bytes(), emptyObject): - // skip fetch if item is empty - return - } - - _, _ = input.Write(itemBuf.Bytes()) - - err = fetch.Input.Footer.Render(ctx, nil, input) - if err != nil { - return errors.WithStack(err) - } - - res.mergePath = fetch.PostProcessing.MergePath - res.data, err = l.loadAndPostProcess(ctx, fetch.DisallowSingleFlight, fetch.DataSource, fetch.DataSourceIdentifier, fetch.PostProcessing, input, out, res) - if err != nil { - return errors.WithStack(err) - } - return -} - -func (l *Loader) resolveBatchEntityFetch(ctx *Context, fetch *BatchEntityFetch, res *resultSet) (err error) { - res.mergePath = fetch.PostProcessing.MergePath - input := pool.BytesBuffer.Get() - defer pool.BytesBuffer.Put(input) - lr := l.currentLayer() - err = fetch.Input.Header.Render(ctx, nil, input) - if err != nil { - return errors.WithStack(err) - } - batchStats := make([][]int, len(lr.items)) - batchItemIndex := 0 - - itemBuf := pool.BytesBuffer.Get() - defer pool.BytesBuffer.Put(itemBuf) - hash := pool.Hash64.Get() - defer pool.Hash64.Put(hash) - - itemHashes := make([]uint64, 0, len(lr.items)*len(fetch.Input.Items)) - addSeparator := false - - for i := range lr.items { - if lr.items[i] == nil { - continue - } - WithNext: - for j := range fetch.Input.Items { - itemBuf.Reset() - err = fetch.Input.Items[j].Render(ctx, lr.items[i], itemBuf) - if err != nil { - if fetch.Input.SkipErrItems { - err = nil - batchStats[i] = append(batchStats[i], -1) - continue - } - return errors.WithStack(err) - } - if fetch.Input.SkipNullItems && itemBuf.Len() == 4 && bytes.Equal(itemBuf.Bytes(), null) { - batchStats[i] = append(batchStats[i], -1) - continue - } - hash.Reset() - _, _ = hash.Write(itemBuf.Bytes()) - itemHash := hash.Sum64() - for k := range itemHashes { - if itemHashes[k] == itemHash { - batchStats[i] = append(batchStats[i], k) - continue WithNext - } - } - itemHashes = append(itemHashes, itemHash) - if addSeparator { - err = fetch.Input.Separator.Render(ctx, nil, input) - if err != nil { - return errors.WithStack(err) - } - } - _, _ = input.Write(itemBuf.Bytes()) - batchStats[i] = append(batchStats[i], batchItemIndex) - batchItemIndex++ - addSeparator = true - } - } - err = fetch.Input.Footer.Render(ctx, nil, input) - if err != nil { - return errors.WithStack(err) - } - data, err := l.loadWithSingleFlight(ctx, fetch.DisallowSingleFlight, fetch.DataSource, fetch.DataSourceIdentifier, input.Bytes(), res) - if err != nil { - return errors.WithStack(err) - } - responseErrors, _, _, _ := jsonparser.Get(data, "errors") - if responseErrors != nil { - l.errors = responseErrors - return errors.WithStack(ErrOriginResponseError) - } - if fetch.PostProcessing.SelectResponseDataPath != nil { - data, _, _, err = jsonparser.Get(data, fetch.PostProcessing.SelectResponseDataPath...) - if err != nil { - return errors.WithStack(err) - } - } - var ( - batchResponseItems [][]byte - ) - _, err = jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { - batchResponseItems = append(batchResponseItems, value) - }) - if err != nil { - return errors.WithStack(err) - } - res.itemsData = make([][]byte, len(lr.items)) - if fetch.PostProcessing.ResponseTemplate != nil { - buf := res.getBuffer() - start := 0 - for i, stats := range batchStats { - _, _ = buf.Write(lBrack) - addCommaSeparator := false - for j := range stats { - if addCommaSeparator { - _, _ = buf.Write(comma) - } else { - addCommaSeparator = true - } - if stats[j] == -1 { - _, _ = buf.Write(null) - continue - } - _, err = buf.Write(batchResponseItems[stats[j]]) - if err != nil { - return errors.WithStack(err) - } - } - _, _ = buf.Write(rBrack) - res.itemsData[i] = buf.Bytes()[start:] - start = buf.Len() - } - for i := range res.itemsData { - err = fetch.PostProcessing.ResponseTemplate.Render(ctx, res.itemsData[i], buf) - if err != nil { - return errors.WithStack(err) - } - res.itemsData[i] = buf.Bytes()[start:] - start = buf.Len() - } - } else { - for i, stats := range batchStats { - for j := range stats { - if stats[j] == -1 { - continue - } - res.itemsData[i], err = l.mergeJSON(res.itemsData[i], batchResponseItems[stats[j]]) - if err != nil { - return errors.WithStack(err) - } - } - } - } - return -} - -func (l *Loader) resolveParallelListItemFetch(ctx *Context, fetch *ParallelListItemFetch, res *resultSet) (err error) { - if !l.insideArray() { - return errors.WithStack(fmt.Errorf("resolveParallelListItemFetch must be inside an array, this seems to be a bug in the planner")) - } - layer := l.currentLayer() - group, gCtx := errgroup.WithContext(ctx.ctx) - groupContext := ctx.WithContext(gCtx) - res.itemsData = make([][]byte, len(layer.items)) - res.mergePath = fetch.Fetch.PostProcessing.MergePath - for i := range layer.items { - i := i - // get a buffer before we start the goroutines - // getLayerBuffer will append the buffer to the list of buffers of the current layer - // this will ensure that the buffer is not re-used before this layer is merged into the parent - // however, appending is not concurrency safe, so we need to do it before we start the goroutines - out := res.getBuffer() - input := res.getBuffer() - err = fetch.Fetch.InputTemplate.Render(ctx, layer.items[i], input) - if err != nil { - return errors.WithStack(err) - } - group.Go(func() error { - data, err := l.loadAndPostProcess(groupContext, fetch.Fetch.DisallowSingleFlight, fetch.Fetch.DataSource, fetch.Fetch.DataSourceIdentifier, fetch.Fetch.PostProcessing, input, out, res) - if err != nil { - return errors.WithStack(err) - } - res.itemsData[i] = data - return nil - }) - } - err = group.Wait() - if err != nil { - return errors.WithStack(err) - } - return nil -} - -func (l *Loader) resolveParallelFetch(ctx *Context, fetch *ParallelFetch) (err error) { - group, groupContext := errgroup.WithContext(ctx.Context()) - groupCtx := ctx.WithContext(groupContext) - isArray := l.insideArray() - resultSets := make([]*resultSet, len(fetch.Fetches)) - for i := range fetch.Fetches { - f := fetch.Fetches[i] - res := &resultSet{} - resultSets[i] = res - if isArray && f.FetchKind() == FetchKindSingle { - return fmt.Errorf("parallel fetches inside an array must not be of kind FetchKindSingle - this seems to be a bug in the planner") - } - group.Go(func() error { - return l.resolveFetch(groupCtx, f, res) - }) - } - err = group.Wait() - for i := range resultSets { - err = l.mergeResultSet(resultSets[i]) - if err != nil { - return err - } - } - return errors.WithStack(err) -} - -func (l *Loader) resolveSingleFetch(ctx *Context, fetch *SingleFetch, res *resultSet) (err error) { - input := res.getBuffer() - inputBuf := res.getBuffer() - out := res.getBuffer() - inputData := l.inputData(l.currentLayer(), inputBuf) - err = fetch.InputTemplate.Render(ctx, inputData, input) - if err != nil { - return errors.WithStack(err) - } - res.mergePath = fetch.PostProcessing.MergePath - res.data, err = l.loadAndPostProcess(ctx, fetch.DisallowSingleFlight, fetch.DataSource, fetch.DataSourceIdentifier, fetch.PostProcessing, input, out, res) - if err != nil { - return errors.WithStack(err) - } - return -} - -func (l *Loader) loadWithSingleFlight(ctx *Context, dissalowSingleFlight bool, source DataSource, identifier []byte, input []byte, res *resultSet) ([]byte, error) { - if !l.sfEnabled || dissalowSingleFlight { - out := res.getBuffer() - err := source.Load(ctx.ctx, input, out) - if err != nil { - return nil, err - } - return out.Bytes(), nil - } - keyGen := pool.Hash64.Get() - defer pool.Hash64.Put(keyGen) - _, _ = keyGen.Write(identifier) - _, _ = keyGen.Write(input) - key := strconv.FormatUint(keyGen.Sum64(), 10) - maybeData, err, shared := l.sf.Do(key, func() (interface{}, error) { - out := &bytes.Buffer{} - err := source.Load(ctx.ctx, input, out) - if err != nil { - return nil, err - } - return out.Bytes(), nil - }) - if err != nil { - return nil, err - } - data := maybeData.([]byte) - if shared { - out := make([]byte, len(data)) - copy(out, data) - return out, err - } - return data, nil -} - -func (l *Loader) loadAndPostProcess(ctx *Context, dissalowSingleFlight bool, source DataSource, identifier []byte, postProcessing PostProcessingConfiguration, input *bytes.Buffer, out *bytes.Buffer, res *resultSet) (data []byte, err error) { - data, err = l.loadWithSingleFlight(ctx, dissalowSingleFlight, source, identifier, input.Bytes(), res) - if err != nil { - return nil, errors.WithStack(err) - } - responseErrors, _, _, _ := jsonparser.Get(data, "errors") - if responseErrors != nil { - res.errors = responseErrors - return nil, ErrOriginResponseError - } - if postProcessing.SelectResponseDataPath != nil { - data, _, _, err = jsonparser.Get(data, postProcessing.SelectResponseDataPath...) - if err != nil { - return nil, errors.WithStack(err) - } - } - if postProcessing.ResponseTemplate != nil { - intermediate := pool.FastBuffer.Get() - defer pool.FastBuffer.Put(intermediate) - _, err = intermediate.Write(data) - if err != nil { - return nil, errors.WithStack(err) - } - out.Reset() - err = postProcessing.ResponseTemplate.Render(ctx, intermediate.Bytes(), out) - if err != nil { - return nil, errors.WithStack(err) - } - data = out.Bytes() - } - return data, nil -} - -func (l *Loader) mergeDataIntoLayer(layer *layer, data []byte, mergePath []string) (err error) { - if bytes.Equal(data, null) { - return - } - layer.hasResolvedData = true - if layer.kind == layerKindObject { - layer.data, err = l.mergeJSONWithMergePath(layer.data, data, mergePath) - return errors.WithStack(err) - } - var ( - dataItems [][]byte - ) - _, err = jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { - switch dataType { - case jsonparser.String: - // jsonparser.ArrayEach does not return the quotes so we need to add them - dataItems = append(dataItems, data[offset-2:offset+len(value)]) - default: - dataItems = append(dataItems, value) - } - }) - if err != nil { - return errors.WithStack(err) - } - skipped := 0 - for i := 0; i < len(layer.items); i++ { - if layer.items[i] == nil { - skipped++ - continue - } - layer.items[i], err = l.mergeJSONWithMergePath(layer.items[i], dataItems[i-skipped], mergePath) - if err != nil { - return errors.WithStack(err) - } - } - return nil -} - -func (l *Loader) resolveSerialFetch(ctx *Context, fetch *SerialFetch) (err error) { - for i := range fetch.Fetches { - err = l.resolveFetch(ctx, fetch.Fetches[i], nil) - if err != nil { - return errors.WithStack(err) - } - } - return nil -} - -type fastJsonContext struct { - keys, values [][]byte - types []jsonparser.ValueType - missingKeys, missingValues [][]byte - missingTypes []jsonparser.ValueType -} - -var ( - fastJsonPool = sync.Pool{ - New: func() interface{} { - ctx := &fastJsonContext{} - ctx.keys = make([][]byte, 0, 4) - ctx.values = make([][]byte, 0, 4) - ctx.types = make([]jsonparser.ValueType, 0, 4) - ctx.missingKeys = make([][]byte, 0, 4) - ctx.missingValues = make([][]byte, 0, 4) - ctx.missingTypes = make([]jsonparser.ValueType, 0, 4) - return ctx - }, - } -) - -func (l *Loader) mergeJSONWithMergePath(left, right []byte, mergePath []string) ([]byte, error) { - if len(mergePath) == 0 { - return l.mergeJSON(left, right) - } - element := mergePath[len(mergePath)-1] - mergePath = mergePath[:len(mergePath)-1] - buf := pool.BytesBuffer.Get() - defer pool.BytesBuffer.Put(buf) - _, _ = buf.Write(lBrace) - _, _ = buf.Write(quote) - _, _ = buf.Write([]byte(element)) - _, _ = buf.Write(quote) - _, _ = buf.Write(colon) - _, _ = buf.Write(right) - _, _ = buf.Write(rBrace) - out := make([]byte, buf.Len()) - copy(out, buf.Bytes()) - return l.mergeJSONWithMergePath(left, out, mergePath) -} - -func (l *Loader) mergeJSON(left, right []byte) ([]byte, error) { - if left == nil { - return right, nil - } - if right == nil { - return left, nil - } - if left == nil && right == nil { - return nil, nil - } - leftIsNull := bytes.Equal(left, null) - rightIsNull := bytes.Equal(right, null) - switch { - case leftIsNull && rightIsNull: - return left, nil - case !leftIsNull && rightIsNull: - return left, nil - case leftIsNull && !rightIsNull: - return right, nil - } - ctx := fastJsonPool.Get().(*fastJsonContext) - defer func() { - ctx.keys = ctx.keys[:0] - ctx.values = ctx.values[:0] - ctx.types = ctx.types[:0] - ctx.missingKeys = ctx.missingKeys[:0] - ctx.missingValues = ctx.missingValues[:0] - ctx.missingTypes = ctx.missingTypes[:0] - fastJsonPool.Put(ctx) - }() - err := jsonparser.ObjectEach(left, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { - ctx.keys = append(ctx.keys, key) - ctx.values = append(ctx.values, value) - ctx.types = append(ctx.types, dataType) - return nil - }) - if err != nil { - return nil, errors.WithStack(err) - } - err = jsonparser.ObjectEach(right, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { - if i, exists := l.byteSliceContainsKey(ctx.keys, key); exists { - if bytes.Equal(ctx.values[i], value) { - return nil - } - switch ctx.types[i] { - case jsonparser.Object: - merged, err := l.mergeJSON(ctx.values[i], value) - if err != nil { - return errors.WithStack(err) - } - left, err = jsonparser.Set(left, merged, l.unsafeBytesToString(key)) - if err != nil { - return errors.WithStack(err) - } - case jsonparser.String: - update := right[offset-len(value)-2 : offset] - left, err = jsonparser.Set(left, update, l.unsafeBytesToString(key)) - if err != nil { - return errors.WithStack(err) - } - default: - left, err = jsonparser.Set(left, value, l.unsafeBytesToString(key)) - if err != nil { - return errors.WithStack(err) - } - } - return nil - } - ctx.missingKeys = append(ctx.missingKeys, key) - ctx.missingValues = append(ctx.missingValues, value) - ctx.missingTypes = append(ctx.missingTypes, dataType) - return nil - }) - if err != nil { - return nil, errors.WithStack(err) - } - if len(ctx.missingKeys) == 0 { - return left, nil - } - buf := pool.BytesBuffer.Get() - defer pool.BytesBuffer.Put(buf) - _, _ = buf.Write(lBrace) - for i := range ctx.missingKeys { - _, _ = buf.Write(quote) - _, _ = buf.Write(ctx.missingKeys[i]) - _, _ = buf.Write(quote) - _, _ = buf.Write(colon) - if ctx.missingTypes[i] == jsonparser.String { - _, _ = buf.Write(quote) - } - _, _ = buf.Write(ctx.missingValues[i]) - if ctx.missingTypes[i] == jsonparser.String { - _, _ = buf.Write(quote) - } - _, _ = buf.Write(comma) - } - start := bytes.Index(left, lBrace) - _, _ = buf.Write(left[start+1:]) - combined := buf.Bytes() - out := make([]byte, len(combined)) - copy(out, combined) - return out, nil -} - -func (l *Loader) byteSliceContainsKey(slice [][]byte, key []byte) (int, bool) { - for i := range slice { - if bytes.Equal(slice[i], key) { - return i, true - } - } - return -1, false -} - -// unsafeBytesToString is a helper function to convert a byte slice to a string without copying the underlying data -func (l *Loader) unsafeBytesToString(bytes []byte) string { - sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes)) - stringHeader := reflect.StringHeader{Data: sliceHeader.Data, Len: sliceHeader.Len} - return *(*string)(unsafe.Pointer(&stringHeader)) // nolint: govet -} diff --git a/v2/pkg/engine/resolve/v2load.go b/v2/pkg/engine/resolve/loader.go similarity index 80% rename from v2/pkg/engine/resolve/v2load.go rename to v2/pkg/engine/resolve/loader.go index 3c1d66cfae..9235714e6f 100644 --- a/v2/pkg/engine/resolve/v2load.go +++ b/v2/pkg/engine/resolve/loader.go @@ -8,15 +8,18 @@ import ( "runtime" "runtime/debug" "sync" + "time" + "github.com/buger/jsonparser" "github.com/pkg/errors" + "github.com/tidwall/gjson" "golang.org/x/sync/errgroup" "github.com/wundergraph/graphql-go-tools/v2/pkg/astjson" "github.com/wundergraph/graphql-go-tools/v2/pkg/pool" ) -type V2Loader struct { +type Loader struct { data *astjson.JSON dataRoot int errorsRoot int @@ -24,9 +27,10 @@ type V2Loader struct { sf *Group enableSingleFlight bool path []string + traceOptions RequestTraceOptions } -func (l *V2Loader) Free() { +func (l *Loader) Free() { l.ctx = nil l.sf = nil l.data = nil @@ -36,15 +40,16 @@ func (l *V2Loader) Free() { l.path = l.path[:0] } -func (l *V2Loader) LoadGraphQLResponseData(ctx *Context, response *GraphQLResponse, resolvable *Resolvable) (err error) { +func (l *Loader) LoadGraphQLResponseData(ctx *Context, response *GraphQLResponse, resolvable *Resolvable) (err error) { l.data = resolvable.storage l.dataRoot = resolvable.dataRoot l.errorsRoot = resolvable.errorsRoot + l.traceOptions = resolvable.requestTraceOptions l.ctx = ctx return l.walkNode(response.Data, []int{resolvable.dataRoot}) } -func (l *V2Loader) walkNode(node Node, items []int) error { +func (l *Loader) walkNode(node Node, items []int) error { switch n := node.(type) { case *Object: return l.walkObject(n, items) @@ -55,23 +60,23 @@ func (l *V2Loader) walkNode(node Node, items []int) error { } } -func (l *V2Loader) pushPath(path []string) { +func (l *Loader) pushPath(path []string) { l.path = append(l.path, path...) } -func (l *V2Loader) popPath(path []string) { +func (l *Loader) popPath(path []string) { l.path = l.path[:len(l.path)-len(path)] } -func (l *V2Loader) pushArrayPath() { +func (l *Loader) pushArrayPath() { l.path = append(l.path, "@") } -func (l *V2Loader) popArrayPath() { +func (l *Loader) popArrayPath() { l.path = l.path[:len(l.path)-1] } -func (l *V2Loader) walkObject(object *Object, parentItems []int) (err error) { +func (l *Loader) walkObject(object *Object, parentItems []int) (err error) { l.pushPath(object.Path) defer l.popPath(object.Path) objectItems := l.selectNodeItems(parentItems, object.Path) @@ -90,7 +95,7 @@ func (l *V2Loader) walkObject(object *Object, parentItems []int) (err error) { return nil } -func (l *V2Loader) walkArray(array *Array, parentItems []int) error { +func (l *Loader) walkArray(array *Array, parentItems []int) error { l.pushPath(array.Path) l.pushArrayPath() nodeItems := l.selectNodeItems(parentItems, array.Path) @@ -100,7 +105,7 @@ func (l *V2Loader) walkArray(array *Array, parentItems []int) error { return err } -func (l *V2Loader) selectNodeItems(parentItems []int, path []string) (items []int) { +func (l *Loader) selectNodeItems(parentItems []int, path []string) (items []int) { if parentItems == nil { return nil } @@ -132,7 +137,7 @@ func (l *V2Loader) selectNodeItems(parentItems []int, path []string) (items []in return } -func (l *V2Loader) itemsData(items []int, out io.Writer) error { +func (l *Loader) itemsData(items []int, out io.Writer) error { if len(items) == 0 { return nil } @@ -145,7 +150,7 @@ func (l *V2Loader) itemsData(items []int, out io.Writer) error { }, out) } -func (l *V2Loader) resolveAndMergeFetch(fetch Fetch, items []int) error { +func (l *Loader) resolveAndMergeFetch(fetch Fetch, items []int) error { switch f := fetch.(type) { case *SingleFetch: res := &result{ @@ -236,7 +241,7 @@ func (l *V2Loader) resolveAndMergeFetch(fetch Fetch, items []int) error { return nil } -func (l *V2Loader) loadFetch(ctx context.Context, fetch Fetch, items []int, res *result) error { +func (l *Loader) loadFetch(ctx context.Context, fetch Fetch, items []int, res *result) error { switch f := fetch.(type) { case *SingleFetch: res.out = pool.BytesBuffer.Get() @@ -247,12 +252,23 @@ func (l *V2Loader) loadFetch(ctx context.Context, fetch Fetch, items []int, res return fmt.Errorf("parallel fetch must not be nested") case *ParallelListItemFetch: results := make([]*result, len(items)) + if l.traceOptions.Enable { + f.Traces = make([]*SingleFetch, len(items)) + } g, ctx := errgroup.WithContext(l.ctx.ctx) for i := range items { i := i results[i] = &result{ out: pool.BytesBuffer.Get(), } + if l.traceOptions.Enable { + f.Traces[i] = new(SingleFetch) + *f.Traces[i] = *f.Fetch + g.Go(func() error { + return l.loadFetch(ctx, f.Traces[i], items[i:i+1], results[i]) + }) + continue + } g.Go(func() error { return l.loadFetch(ctx, f.Fetch, items[i:i+1], results[i]) }) @@ -273,7 +289,7 @@ func (l *V2Loader) loadFetch(ctx context.Context, fetch Fetch, items []int, res return nil } -func (l *V2Loader) mergeErrors(ref int) { +func (l *Loader) mergeErrors(ref int) { if ref == -1 { return } @@ -284,7 +300,7 @@ func (l *V2Loader) mergeErrors(ref int) { l.data.MergeArrays(l.errorsRoot, ref) } -func (l *V2Loader) mergeResult(res *result, items []int) error { +func (l *Loader) mergeResult(res *result, items []int) error { defer pool.BytesBuffer.Put(res.out) if res.fetchAborted { return nil @@ -402,7 +418,7 @@ var ( errorsInvalidInputFooter = []byte(`]}]}`) ) -func (l *V2Loader) renderErrorsInvalidInput(out *bytes.Buffer) error { +func (l *Loader) renderErrorsInvalidInput(out *bytes.Buffer) error { _, _ = out.Write(errorsInvalidInputHeader) for i := range l.path { if i != 0 { @@ -421,7 +437,7 @@ var ( errorsFailedToFetchFooter = []byte(`]}]}`) ) -func (l *V2Loader) renderErrorsFailedToFetch(out *bytes.Buffer) error { +func (l *Loader) renderErrorsFailedToFetch(out *bytes.Buffer) error { _, _ = out.Write(errorsFailedToFetchHeader) for i := range l.path { if i != 0 { @@ -435,7 +451,7 @@ func (l *V2Loader) renderErrorsFailedToFetch(out *bytes.Buffer) error { return nil } -func (l *V2Loader) loadSingleFetch(ctx context.Context, fetch *SingleFetch, items []int, res *result) error { +func (l *Loader) loadSingleFetch(ctx context.Context, fetch *SingleFetch, items []int, res *result) error { input := pool.BytesBuffer.Get() defer pool.BytesBuffer.Put(input) preparedInput := pool.BytesBuffer.Get() @@ -444,11 +460,19 @@ func (l *V2Loader) loadSingleFetch(ctx context.Context, fetch *SingleFetch, item if err != nil { return errors.WithStack(err) } + if l.traceOptions.Enable { + fetch.Trace = &DataSourceLoadTrace{} + if !l.traceOptions.ExcludeRawInputData { + inputCopy := make([]byte, input.Len()) + copy(inputCopy, input.Bytes()) + fetch.Trace.RawInputData = inputCopy + } + } err = fetch.InputTemplate.Render(l.ctx, input.Bytes(), preparedInput) if err != nil { return l.renderErrorsInvalidInput(res.out) } - err = l.executeSourceLoad(ctx, fetch.DisallowSingleFlight, fetch.DataSource, preparedInput.Bytes(), res.out) + err = l.executeSourceLoad(ctx, fetch.DisallowSingleFlight, fetch.DataSource, preparedInput.Bytes(), res.out, fetch.Trace) if err != nil { return l.renderErrorsFailedToFetch(res.out) } @@ -456,7 +480,7 @@ func (l *V2Loader) loadSingleFetch(ctx context.Context, fetch *SingleFetch, item return nil } -func (l *V2Loader) loadEntityFetch(ctx context.Context, fetch *EntityFetch, items []int, res *result) error { +func (l *Loader) loadEntityFetch(ctx context.Context, fetch *EntityFetch, items []int, res *result) error { itemData := pool.BytesBuffer.Get() defer pool.BytesBuffer.Put(itemData) preparedInput := pool.BytesBuffer.Get() @@ -468,6 +492,15 @@ func (l *V2Loader) loadEntityFetch(ctx context.Context, fetch *EntityFetch, item return errors.WithStack(err) } + if l.traceOptions.Enable { + fetch.Trace = &DataSourceLoadTrace{} + if !l.traceOptions.ExcludeRawInputData { + itemDataCopy := make([]byte, itemData.Len()) + copy(itemDataCopy, itemData.Bytes()) + fetch.Trace.RawInputData = itemDataCopy + } + } + var undefinedVariables []string err = fetch.Input.Header.RenderAndCollectUndefinedVariables(l.ctx, nil, preparedInput, &undefinedVariables) @@ -480,6 +513,9 @@ func (l *V2Loader) loadEntityFetch(ctx context.Context, fetch *EntityFetch, item if fetch.Input.SkipErrItem { err = nil // nolint:ineffassign // skip fetch on render item error + if l.traceOptions.Enable { + fetch.Trace.LoadSkipped = true + } return nil } return errors.WithStack(err) @@ -488,11 +524,17 @@ func (l *V2Loader) loadEntityFetch(ctx context.Context, fetch *EntityFetch, item if bytes.Equal(renderedItem, null) { // skip fetch if item is null res.fetchAborted = true + if l.traceOptions.Enable { + fetch.Trace.LoadSkipped = true + } return nil } if bytes.Equal(renderedItem, emptyObject) { // skip fetch if item is empty res.fetchAborted = true + if l.traceOptions.Enable { + fetch.Trace.LoadSkipped = true + } return nil } _, _ = item.WriteTo(preparedInput) @@ -506,7 +548,7 @@ func (l *V2Loader) loadEntityFetch(ctx context.Context, fetch *EntityFetch, item return errors.WithStack(err) } - err = l.executeSourceLoad(ctx, fetch.DisallowSingleFlight, fetch.DataSource, preparedInput.Bytes(), res.out) + err = l.executeSourceLoad(ctx, fetch.DisallowSingleFlight, fetch.DataSource, preparedInput.Bytes(), res.out, fetch.Trace) if err != nil { return errors.WithStack(err) } @@ -514,8 +556,19 @@ func (l *V2Loader) loadEntityFetch(ctx context.Context, fetch *EntityFetch, item return nil } -func (l *V2Loader) loadBatchEntityFetch(ctx context.Context, fetch *BatchEntityFetch, items []int, res *result) error { +func (l *Loader) loadBatchEntityFetch(ctx context.Context, fetch *BatchEntityFetch, items []int, res *result) error { res.postProcessing = fetch.PostProcessing + if l.traceOptions.Enable { + fetch.Trace = &DataSourceLoadTrace{} + if !l.traceOptions.ExcludeRawInputData { + buf := &bytes.Buffer{} + err := l.itemsData(items, buf) + if err != nil { + return errors.WithStack(err) + } + fetch.Trace.RawInputData = buf.Bytes() + } + } preparedInput := pool.BytesBuffer.Get() defer pool.BytesBuffer.Put(preparedInput) @@ -556,6 +609,9 @@ WithNextItem: res.batchStats[i] = append(res.batchStats[i], -1) continue } + if l.traceOptions.Enable { + fetch.Trace.LoadSkipped = true + } return errors.WithStack(err) } if fetch.Input.SkipNullItems && itemInput.Len() == 4 && bytes.Equal(itemInput.Bytes(), null) { @@ -593,6 +649,9 @@ WithNextItem: if len(itemHashes) == 0 { // all items were skipped - discard fetch res.fetchAborted = true + if l.traceOptions.Enable { + fetch.Trace.LoadSkipped = true + } return nil } @@ -605,18 +664,34 @@ WithNextItem: if err != nil { return errors.WithStack(err) } - - err = l.executeSourceLoad(ctx, fetch.DisallowSingleFlight, fetch.DataSource, preparedInput.Bytes(), res.out) + err = l.executeSourceLoad(ctx, fetch.DisallowSingleFlight, fetch.DataSource, preparedInput.Bytes(), res.out, fetch.Trace) if err != nil { return errors.WithStack(err) } return nil } -func (l *V2Loader) executeSourceLoad(ctx context.Context, disallowSingleFlight bool, source DataSource, input []byte, out io.Writer) error { +func (l *Loader) executeSourceLoad(ctx context.Context, disallowSingleFlight bool, source DataSource, input []byte, out io.Writer, trace *DataSourceLoadTrace) error { + if l.traceOptions.Enable { + if !l.traceOptions.ExcludeInput { + trace.Input = []byte(string(input)) // copy input explicitly, omit __trace__ field + } + if gjson.ValidBytes(input) { + inputCopy := make([]byte, len(input)) + copy(inputCopy, input) + input, _ = jsonparser.Set(inputCopy, []byte("true"), "__trace__") + } + if !l.traceOptions.ExcludeLoadStats { + trace.DurationSinceStartNano = GetDurationNanoSinceTraceStart(ctx) + trace.DurationSinceStartPretty = time.Duration(trace.DurationSinceStartNano).String() + } + } if !l.enableSingleFlight || disallowSingleFlight { return source.Load(ctx, input, out) } + if l.traceOptions.Enable { + trace.SingleFlightUsed = true + } keyGen := pool.Hash64.Get() defer pool.Hash64.Put(keyGen) _, err := keyGen.Write(input) @@ -624,7 +699,7 @@ func (l *V2Loader) executeSourceLoad(ctx context.Context, disallowSingleFlight b return errors.WithStack(err) } key := keyGen.Sum64() - data, err, _ := l.sf.Do(key, func() ([]byte, error) { + data, err, shared := l.sf.Do(key, func() ([]byte, error) { singleBuffer := pool.BytesBuffer.Get() defer pool.BytesBuffer.Put(singleBuffer) err := source.Load(ctx, input, singleBuffer) @@ -636,7 +711,28 @@ func (l *V2Loader) executeSourceLoad(ctx context.Context, disallowSingleFlight b copy(cp, data) return cp, nil }) + if l.traceOptions.Enable { + if !l.traceOptions.ExcludeOutput && data != nil { + if l.traceOptions.EnablePredictableDebugTimings { + trace.Output = jsonparser.Delete([]byte(string(data)), "extensions", "trace", "response", "headers", "Date") + } else { + trace.Output = []byte(string(data)) + } + } + trace.SingleFlightSharedResponse = shared + if !l.traceOptions.ExcludeLoadStats { + if l.traceOptions.EnablePredictableDebugTimings { + trace.DurationLoadNano = 1 + } else { + trace.DurationLoadNano = GetDurationNanoSinceTraceStart(ctx) - trace.DurationSinceStartNano + } + trace.DurationLoadPretty = time.Duration(trace.DurationLoadNano).String() + } + } if err != nil { + if l.traceOptions.Enable { + trace.LoadError = err.Error() + } return errors.WithStack(err) } _, err = out.Write(data) diff --git a/v2/pkg/engine/resolve/v2load_test.go b/v2/pkg/engine/resolve/loader_test.go similarity index 99% rename from v2/pkg/engine/resolve/v2load_test.go rename to v2/pkg/engine/resolve/loader_test.go index 54d24ea6d7..7649514d8f 100644 --- a/v2/pkg/engine/resolve/v2load_test.go +++ b/v2/pkg/engine/resolve/loader_test.go @@ -286,7 +286,7 @@ func TestV2Loader_LoadGraphQLResponseData(t *testing.T) { resolvable := &Resolvable{ storage: &astjson.JSON{}, } - loader := &V2Loader{} + loader := &Loader{} err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) assert.NoError(t, err) err = loader.LoadGraphQLResponseData(ctx, response, resolvable) @@ -564,7 +564,7 @@ func BenchmarkV2Loader_LoadGraphQLResponseData(b *testing.B) { resolvable := &Resolvable{ storage: &astjson.JSON{}, } - loader := &V2Loader{} + loader := &Loader{} expected := []byte(`{"errors":[],"data":{"topProducts":[{"name":"Table","__typename":"Product","upc":"1","reviews":[{"body":"Love Table!","author":{"__typename":"User","id":"1","name":"user-1"}},{"body":"Prefer other Table.","author":{"__typename":"User","id":"2","name":"user-2"}}],"stock":8},{"name":"Couch","__typename":"Product","upc":"2","reviews":[{"body":"Couch Too expensive.","author":{"__typename":"User","id":"1","name":"user-1"}}],"stock":2},{"name":"Chair","__typename":"Product","upc":"3","reviews":[{"body":"Chair Could be better.","author":{"__typename":"User","id":"2","name":"user-2"}}],"stock":5}]}}`) out := &bytes.Buffer{} b.SetBytes(int64(len(expected))) diff --git a/v2/pkg/engine/resolve/resolvable.go b/v2/pkg/engine/resolve/resolvable.go index 05d28494c6..43012516ff 100644 --- a/v2/pkg/engine/resolve/resolvable.go +++ b/v2/pkg/engine/resolve/resolvable.go @@ -2,6 +2,7 @@ package resolve import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -13,18 +14,18 @@ import ( ) type Resolvable struct { - storage *astjson.JSON - dataRoot int - errorsRoot int - variablesRoot int - print bool - out io.Writer - printErr error - path []astjson.PathElement - depth int - operationType ast.OperationType - renameTypeNames []RenameTypeName - enableTracing bool + storage *astjson.JSON + dataRoot int + errorsRoot int + variablesRoot int + print bool + out io.Writer + printErr error + path []astjson.PathElement + depth int + operationType ast.OperationType + renameTypeNames []RenameTypeName + requestTraceOptions RequestTraceOptions } func NewResolvable() *Resolvable { @@ -122,7 +123,7 @@ func (r *Resolvable) InitSubscription(ctx *Context, initialData []byte, postProc return } -func (r *Resolvable) Resolve(root *Object, out io.Writer) error { +func (r *Resolvable) Resolve(ctx context.Context, root *Object, out io.Writer) error { r.out = out r.print = false r.printErr = nil @@ -142,7 +143,7 @@ func (r *Resolvable) Resolve(root *Object, out io.Writer) error { r.printData(root) if r.hasExtensions() { r.printBytes(comma) - r.printExtensions(root) + r.printExtensions(ctx, root) } } r.printBytes(rBrace) @@ -175,22 +176,22 @@ func (r *Resolvable) printData(root *Object) { r.printBytes(rBrace) } -func (r *Resolvable) printExtensions(root *Object) { +func (r *Resolvable) printExtensions(ctx context.Context, root *Object) { r.printBytes(quote) r.printBytes(literalExtensions) r.printBytes(quote) r.printBytes(colon) r.printBytes(lBrace) - if r.enableTracing { - r.printTrace(root) + if r.requestTraceOptions.IncludeTraceOutputInResponseExtensions { + r.printTrace(ctx, root) } r.printBytes(rBrace) } -func (r *Resolvable) printTrace(root *Object) { - trace := GetTrace(root) +func (r *Resolvable) printTrace(ctx context.Context, root *Object) { + trace := GetTrace(ctx, root) traceData, err := json.Marshal(trace) if err != nil { @@ -205,7 +206,7 @@ func (r *Resolvable) printTrace(root *Object) { } func (r *Resolvable) hasExtensions() bool { - return r.enableTracing + return r.requestTraceOptions.IncludeTraceOutputInResponseExtensions } func (r *Resolvable) hasErrors() bool { diff --git a/v2/pkg/engine/resolve/resolvable_test.go b/v2/pkg/engine/resolve/resolvable_test.go index 1997fc5399..1eaf0be83c 100644 --- a/v2/pkg/engine/resolve/resolvable_test.go +++ b/v2/pkg/engine/resolve/resolvable_test.go @@ -2,6 +2,7 @@ package resolve import ( "bytes" + "context" "testing" "github.com/stretchr/testify/assert" @@ -75,7 +76,7 @@ func TestResolvable_Resolve(t *testing.T) { } out := &bytes.Buffer{} - err = res.Resolve(object, out) + err = res.Resolve(context.Background(), object, out) assert.NoError(t, err) assert.Equal(t, `{"data":{"topProducts":[{"name":"Table","stock":8,"reviews":[{"body":"Love Table!","author":{"name":"user-1"}},{"body":"Prefer other Table.","author":{"name":"user-2"}}]},{"name":"Couch","stock":2,"reviews":[{"body":"Couch Too expensive.","author":{"name":"user-1"}}]},{"name":"Chair","stock":5,"reviews":[{"body":"Chair Could be better.","author":{"name":"user-2"}}]}]}}`, out.String()) } @@ -148,7 +149,7 @@ func TestResolvable_ResolveWithTypeMismatch(t *testing.T) { } out := &bytes.Buffer{} - err = res.Resolve(object, out) + err = res.Resolve(context.Background(), object, out) assert.NoError(t, err) assert.Equal(t, `{"errors":[{"message":"String cannot represent non-string value: \"true\"","path":["topProducts",0,"reviews",0,"author","name"]}],"data":{"topProducts":[{"name":"Table","stock":8,"reviews":[{"body":"Love Table!","author":null},{"body":"Prefer other Table.","author":{"name":"user-2"}}]},{"name":"Couch","stock":2,"reviews":[{"body":"Couch Too expensive.","author":{"name":"user-1"}}]},{"name":"Chair","stock":5,"reviews":[{"body":"Chair Could be better.","author":{"name":"user-2"}}]}]}}`, out.String()) } @@ -221,7 +222,7 @@ func TestResolvable_ResolveWithErrorBubbleUp(t *testing.T) { } out := &bytes.Buffer{} - err = res.Resolve(object, out) + err = res.Resolve(context.Background(), object, out) assert.NoError(t, err) assert.Equal(t, `{"errors":[{"message":"Cannot return null for non-nullable field Query.topProducts.reviews.author.name.","path":["topProducts",0,"reviews",0,"author","name"]}],"data":{"topProducts":[{"name":"Table","stock":8,"reviews":[{"body":"Love Table!","author":null},{"body":"Prefer other Table.","author":{"name":"user-2"}}]},{"name":"Couch","stock":2,"reviews":[{"body":"Couch Too expensive.","author":{"name":"user-1"}}]},{"name":"Chair","stock":5,"reviews":[{"body":"Chair Could be better.","author":{"name":"user-2"}}]}]}}`, out.String()) } @@ -293,7 +294,7 @@ func TestResolvable_ResolveWithErrorBubbleUpUntilData(t *testing.T) { } out := &bytes.Buffer{} - err = res.Resolve(object, out) + err = res.Resolve(context.Background(), object, out) assert.NoError(t, err) assert.Equal(t, `{"errors":[{"message":"Cannot return null for non-nullable field Query.topProducts.reviews.author.name.","path":["topProducts",0,"reviews",1,"author","name"]}],"data":null}`, out.String()) } @@ -371,7 +372,7 @@ func BenchmarkResolvable_Resolve(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { out.Reset() - err = res.Resolve(object, out) + err = res.Resolve(context.Background(), object, out) if err != nil { b.Fatal(err) } @@ -449,7 +450,7 @@ func BenchmarkResolvable_ResolveWithErrorBubbleUp(b *testing.B) { } out := &bytes.Buffer{} - err = res.Resolve(object, out) + err = res.Resolve(context.Background(), object, out) assert.NoError(b, err) expected := []byte(`{"errors":[{"message":"Cannot return null for non-nullable field Query.topProducts.reviews.author.name.","path":["topProducts",0,"reviews",0,"author","name"]}],"data":{"topProducts":[{"name":"Table","stock":8,"reviews":[{"body":"Love Table!","author":null},{"body":"Prefer other Table.","author":{"name":"user-2"}}]},{"name":"Couch","stock":2,"reviews":[{"body":"Couch Too expensive.","author":{"name":"user-1"}}]},{"name":"Chair","stock":5,"reviews":[{"body":"Chair Could be better.","author":{"name":"user-2"}}]}]}}`) b.SetBytes(int64(len(expected))) @@ -457,7 +458,7 @@ func BenchmarkResolvable_ResolveWithErrorBubbleUp(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { out.Reset() - err = res.Resolve(object, out) + err = res.Resolve(context.Background(), object, out) if err != nil { b.Fatal(err) } @@ -470,10 +471,10 @@ func BenchmarkResolvable_ResolveWithErrorBubbleUp(b *testing.B) { func TestResolvable_WithTracing(t *testing.T) { topProducts := `{"topProducts":[{"name":"Table","__typename":"Product","upc":"1","reviews":[{"body":"Love Table!","author":{"__typename":"User","id":"1","name":"user-1"}},{"body":"Prefer other Table.","author":{"__typename":"User","id":"2","name":"user-2"}}],"stock":8},{"name":"Couch","__typename":"Product","upc":"2","reviews":[{"body":"Couch Too expensive.","author":{"__typename":"User","id":"1","name":"user-1"}}],"stock":2},{"name":"Chair","__typename":"Product","upc":"3","reviews":[{"body":"Chair Could be better.","author":{"__typename":"User","id":"2","name":"user-2"}}],"stock":5}]}` res := NewResolvable() - res.enableTracing = true - ctx := &Context{ - Variables: nil, - } + res.requestTraceOptions.EnableAll() + res.requestTraceOptions.EnablePredictableDebugTimings = true + background := SetTraceStart(context.Background(), true) + ctx := NewContext(background) err := res.Init(ctx, []byte(topProducts), ast.OperationTypeQuery) assert.NoError(t, err) assert.NotNil(t, res) @@ -534,9 +535,11 @@ func TestResolvable_WithTracing(t *testing.T) { }, } + SetPlannerStats(ctx.ctx, PlannerStats{}) + out := &bytes.Buffer{} - err = res.Resolve(object, out) + err = res.Resolve(ctx.ctx, object, out) assert.NoError(t, err) - assert.Equal(t, `{"data":{"topProducts":[{"name":"Table","stock":8,"reviews":[{"body":"Love Table!","author":{"name":"user-1"}},{"body":"Prefer other Table.","author":{"name":"user-2"}}]},{"name":"Couch","stock":2,"reviews":[{"body":"Couch Too expensive.","author":{"name":"user-1"}}]},{"name":"Chair","stock":5,"reviews":[{"body":"Chair Could be better.","author":{"name":"user-2"}}]}]},"extensions":{"trace":{"node_type":"object","nullable":true,"fields":[{"name":"topProducts","value":{"node_type":"array","path":["topProducts"],"items":[{"node_type":"object","nullable":true,"fields":[{"name":"name","value":{"node_type":"string","path":["name"]}},{"name":"stock","value":{"node_type":"integer","path":["stock"]}},{"name":"reviews","value":{"node_type":"array","path":["reviews"],"items":[{"node_type":"object","nullable":true,"fields":[{"name":"body","value":{"node_type":"string","path":["body"]}},{"name":"author","value":{"node_type":"object","path":["author"],"fields":[{"name":"name","value":{"node_type":"string","path":["name"]}}]}}]}]}}]}]}}]}}}`, out.String()) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Table","stock":8,"reviews":[{"body":"Love Table!","author":{"name":"user-1"}},{"body":"Prefer other Table.","author":{"name":"user-2"}}]},{"name":"Couch","stock":2,"reviews":[{"body":"Couch Too expensive.","author":{"name":"user-1"}}]},{"name":"Chair","stock":5,"reviews":[{"body":"Chair Could be better.","author":{"name":"user-2"}}]}]},"extensions":{"trace":{"info":{"trace_start_time":"","trace_start_unix":0,"planner_stats":{"planning_time_nanoseconds":5,"planning_time_pretty":"5ns","duration_since_start_nanoseconds":5,"duration_since_start_pretty":"5ns"}},"node_type":"object","nullable":true,"fields":[{"name":"topProducts","value":{"node_type":"array","path":["topProducts"],"items":[{"node_type":"object","nullable":true,"fields":[{"name":"name","value":{"node_type":"string","path":["name"]}},{"name":"stock","value":{"node_type":"integer","path":["stock"]}},{"name":"reviews","value":{"node_type":"array","path":["reviews"],"items":[{"node_type":"object","nullable":true,"fields":[{"name":"body","value":{"node_type":"string","path":["body"]}},{"name":"author","value":{"node_type":"object","path":["author"],"fields":[{"name":"name","value":{"node_type":"string","path":["name"]}}]}}]}]}}]}]}}]}}}`, out.String()) } diff --git a/v2/pkg/engine/resolve/resolve.go b/v2/pkg/engine/resolve/resolve.go index 723c0db71e..ad1e2c584c 100644 --- a/v2/pkg/engine/resolve/resolve.go +++ b/v2/pkg/engine/resolve/resolve.go @@ -21,7 +21,7 @@ type Resolver struct { type tools struct { resolvable *Resolvable - loader *V2Loader + loader *Loader } // New returns a new Resolver, ctx.Done() is used to cancel all active subscriptions & streams @@ -34,7 +34,7 @@ func New(ctx context.Context, enableSingleFlightLoader bool) *Resolver { New: func() interface{} { return &tools{ resolvable: NewResolvable(), - loader: &V2Loader{}, + loader: &Loader{}, } }, }, @@ -64,7 +64,7 @@ func (r *Resolver) ResolveGraphQLResponse(ctx *Context, response *GraphQLRespons t := r.getTools() defer r.putTools(t) - t.resolvable.enableTracing = ctx.EnableTracing + t.resolvable.requestTraceOptions = ctx.RequestTracingOptions err = t.resolvable.Init(ctx, data, response.Info.OperationType) if err != nil { @@ -76,7 +76,7 @@ func (r *Resolver) ResolveGraphQLResponse(ctx *Context, response *GraphQLRespons return err } - return t.resolvable.Resolve(response.Data, writer) + return t.resolvable.Resolve(ctx.ctx, response.Data, writer) } func (r *Resolver) ResolveGraphQLSubscription(ctx *Context, subscription *GraphQLSubscription, writer FlushWriter) error { @@ -114,7 +114,7 @@ func (r *Resolver) ResolveGraphQLSubscription(ctx *Context, subscription *GraphQ t := r.getTools() defer r.putTools(t) - t.resolvable.enableTracing = ctx.EnableTracing + t.resolvable.requestTraceOptions = ctx.RequestTracingOptions for { select { @@ -131,7 +131,7 @@ func (r *Resolver) ResolveGraphQLSubscription(ctx *Context, subscription *GraphQ if err := t.loader.LoadGraphQLResponseData(ctx, subscription.Response, t.resolvable); err != nil { return err } - if err := t.resolvable.Resolve(subscription.Response.Data, writer); err != nil { + if err := t.resolvable.Resolve(ctx.ctx, subscription.Response.Data, writer); err != nil { return err } writer.Flush() diff --git a/v2/pkg/engine/resolve/resolve_mock_test.go b/v2/pkg/engine/resolve/resolve_mock_test.go index 58ca8078b9..7b512808d5 100644 --- a/v2/pkg/engine/resolve/resolve_mock_test.go +++ b/v2/pkg/engine/resolve/resolve_mock_test.go @@ -67,23 +67,6 @@ func NewMockBeforeFetchHook(ctrl *gomock.Controller) *MockBeforeFetchHook { return mock } -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBeforeFetchHook) EXPECT() *MockBeforeFetchHookMockRecorder { - return m.recorder -} - -// OnBeforeFetch mocks base method. -func (m *MockBeforeFetchHook) OnBeforeFetch(arg0 HookContext, arg1 []byte) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "OnBeforeFetch", arg0, arg1) -} - -// OnBeforeFetch indicates an expected call of OnBeforeFetch. -func (mr *MockBeforeFetchHookMockRecorder) OnBeforeFetch(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnBeforeFetch", reflect.TypeOf((*MockBeforeFetchHook)(nil).OnBeforeFetch), arg0, arg1) -} - // MockAfterFetchHook is a mock of AfterFetchHook interface. type MockAfterFetchHook struct { ctrl *gomock.Controller @@ -106,27 +89,3 @@ func NewMockAfterFetchHook(ctrl *gomock.Controller) *MockAfterFetchHook { func (m *MockAfterFetchHook) EXPECT() *MockAfterFetchHookMockRecorder { return m.recorder } - -// OnData mocks base method. -func (m *MockAfterFetchHook) OnData(arg0 HookContext, arg1 []byte, arg2 bool) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "OnData", arg0, arg1, arg2) -} - -// OnData indicates an expected call of OnData. -func (mr *MockAfterFetchHookMockRecorder) OnData(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnData", reflect.TypeOf((*MockAfterFetchHook)(nil).OnData), arg0, arg1, arg2) -} - -// OnError mocks base method. -func (m *MockAfterFetchHook) OnError(arg0 HookContext, arg1 []byte, arg2 bool) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "OnError", arg0, arg1, arg2) -} - -// OnError indicates an expected call of OnError. -func (mr *MockAfterFetchHookMockRecorder) OnError(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnError", reflect.TypeOf((*MockAfterFetchHook)(nil).OnError), arg0, arg1, arg2) -} diff --git a/v2/pkg/engine/resolve/resolve_test.go b/v2/pkg/engine/resolve/resolve_test.go index fa15782918..a181a5d53b 100644 --- a/v2/pkg/engine/resolve/resolve_test.go +++ b/v2/pkg/engine/resolve/resolve_test.go @@ -3920,114 +3920,6 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) } -func TestResolver_mergeJSON(t *testing.T) { - setup := func() *Loader { - loader := &Loader{ - layers: []*layer{}, - } - return loader - } - t.Run("a", func(t *testing.T) { - loader := setup() - left := `{"name":"Bill","info":{"id":11,"__typename":"Info"},"address":{"id": 55,"__typename":"Address"}}` - right := `{"info":{"age":21},"address":{"line1":"Munich"}}` - expected := `{"address":{"__typename":"Address","id":55,"line1":"Munich"},"info":{"__typename":"Info","age":21,"id":11},"name":"Bill"}` - out, err := loader.mergeJSON([]byte(left), []byte(right)) - assert.NoError(t, err) - assert.JSONEq(t, expected, string(out)) - }) - - t.Run("b", func(t *testing.T) { - loader := setup() - left := `{"id":"1234","username":"Me","__typename":"User"}` - right := `{"reviews":[{"body": "A highly effective form of birth control.","product": {"upc": "top-1","__typename": "Product"}},{"body": "Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product": {"upc": "top-2","__typename": "Product"}}]}` - expected := `{"__typename":"User","id":"1234","reviews":[{"body":"A highly effective form of birth control.","product":{"__typename":"Product","upc":"top-1"}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"__typename":"Product","upc":"top-2"}}],"username":"Me"}` - out, err := loader.mergeJSON([]byte(left), []byte(right)) - assert.NoError(t, err) - assert.JSONEq(t, expected, string(out)) - }) - - t.Run("c", func(t *testing.T) { - loader := setup() - left := `{"__typename":"Product","upc":"top-1"}` - right := `{"name": "Trilby"}` - expected := `{"__typename":"Product","name":"Trilby","upc":"top-1"}` - out, err := loader.mergeJSON([]byte(left), []byte(right)) - assert.NoError(t, err) - assert.JSONEq(t, expected, string(out)) - }) - - t.Run("d", func(t *testing.T) { - loader := setup() - left := `{"__typename":"Product","upc":"top-1"}` - right := `{"__typename":"Product","name":"Trilby","upc":"top-1"}` - expected := `{"__typename":"Product","name":"Trilby","upc":"top-1"}` - out, err := loader.mergeJSON([]byte(left), []byte(right)) - assert.NoError(t, err) - assert.JSONEq(t, expected, string(out)) - }) - - t.Run("e", func(t *testing.T) { - loader := setup() - left := `{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"}` - right := `{"__typename":"Address","country":"country-1","city":"city-1"}` - expected := `{"__typename":"Address","city":"city-1","country":"country-1","id":"address-1","line1":"line1","line2":"line2"}` - out, err := loader.mergeJSON([]byte(left), []byte(right)) - assert.NoError(t, err) - assert.JSONEq(t, expected, string(out)) - }) - - t.Run("f", func(t *testing.T) { - loader := setup() - left := `{"__typename":"Address","city":"city-1","country":"country-1","id":"address-1","line1":"line1","line2":"line2"}` - right := `{"__typename": "Address", "line3": "line3-1", "zip": "zip-1"}` - expected := `{"__typename":"Address","city":"city-1","country":"country-1","id":"address-1","line1":"line1","line2":"line2","line3":"line3-1","zip":"zip-1"}` - out, err := loader.mergeJSON([]byte(left), []byte(right)) - assert.NoError(t, err) - assert.JSONEq(t, expected, string(out)) - }) - - t.Run("g", func(t *testing.T) { - loader := setup() - left := `{"__typename":"Address","city":"city-1","country":"country-1","id":"address-1","line1":"line1","line2":"line2","line3":"line3-1","zip":"zip-1"}` - right := `{"__typename":"Address","fullAddress":"line1 line2 line3-1 city-1 country-1 zip-1"}` - expected := `{"__typename":"Address","city":"city-1","country":"country-1","fullAddress":"line1 line2 line3-1 city-1 country-1 zip-1","id":"address-1","line1":"line1","line2":"line2","line3":"line3-1","zip":"zip-1"}` - out, err := loader.mergeJSON([]byte(left), []byte(right)) - assert.NoError(t, err) - assert.JSONEq(t, expected, string(out)) - }) - - t.Run("h", func(t *testing.T) { - loader := setup() - left := `{"address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"}}` - right := `{"__typename":"Address","city":"city-1","country":"country-1","fullAddress":"line1 line2 line3-1 city-1 country-1 zip-1","id":"address-1","line1":"line1","line2":"line2","line3":"line3-1","zip":"zip-1"}` - expected := `{"__typename":"Address","address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"},"city":"city-1","country":"country-1","fullAddress":"line1 line2 line3-1 city-1 country-1 zip-1","id":"address-1","line1":"line1","line2":"line2","line3":"line3-1","zip":"zip-1"}` - out, err := loader.mergeJSON([]byte(left), []byte(right)) - assert.NoError(t, err) - assert.JSONEq(t, expected, string(out)) - }) - - t.Run("i", func(t *testing.T) { - loader := setup() - left := `{"account":{"address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"}}}` - right := `{"address":{"__typename":"Address","address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"},"city":"city-1","country":"country-1","fullAddress":"line1 line2 line3-1 city-1 country-1 zip-1","id":"address-1","line1":"line1","line2":"line2","line3":"line3-1","zip":"zip-1"}}` - expected := `{"account":{"address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"}},"address":{"__typename":"Address","address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"},"city":"city-1","country":"country-1","fullAddress":"line1 line2 line3-1 city-1 country-1 zip-1","id":"address-1","line1":"line1","line2":"line2","line3":"line3-1","zip":"zip-1"}}` - out, err := loader.mergeJSON([]byte(left), []byte(right)) - assert.NoError(t, err) - assert.JSONEq(t, expected, string(out)) - }) - - t.Run("j", func(t *testing.T) { - loader := setup() - left := `{"user":{"account":{"address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"}}}}` - right := `{"account":{"account":{"address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"}},"address":{"__typename":"Address","address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"},"city":"city-1","country":"country-1","fullAddress":"line1 line2 line3-1 city-1 country-1 zip-1","id":"address-1","line1":"line1","line2":"line2","line3":"line3-1","zip":"zip-1"}}}` - expected := `{"account":{"account":{"address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"}},"address":{"__typename":"Address","address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"},"city":"city-1","country":"country-1","fullAddress":"line1 line2 line3-1 city-1 country-1 zip-1","id":"address-1","line1":"line1","line2":"line2","line3":"line3-1","zip":"zip-1"}},"user":{"account":{"address":{"__typename":"Address","id":"address-1","line1":"line1","line2":"line2"}}}}` - out, err := loader.mergeJSON([]byte(left), []byte(right)) - assert.NoError(t, err) - assert.JSONEq(t, expected, string(out)) - }) -} - func Benchmark_ResolveGraphQLResponse(b *testing.B) { rCtx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -4885,16 +4777,3 @@ func Benchmark_NestedBatchingWithoutChecks(b *testing.B) { } }) } - -type hookContextPathMatcher struct { - path string -} - -func (h hookContextPathMatcher) Matches(x interface{}) bool { - path := string(x.(HookContext).CurrentPath) - return path == h.path -} - -func (h hookContextPathMatcher) String() string { - return fmt.Sprintf("is equal to %s", h.path) -} diff --git a/v2/pkg/engine/resolve/trace.go b/v2/pkg/engine/resolve/trace.go index fdb138bcaa..b4c89d1c66 100644 --- a/v2/pkg/engine/resolve/trace.go +++ b/v2/pkg/engine/resolve/trace.go @@ -1,5 +1,50 @@ package resolve +import ( + "context" + "encoding/json" +) + +type RequestTraceOptions struct { + // Enable switches tracing on or off + Enable bool + // ExcludePlannerStats excludes planner timing information from the trace output + ExcludePlannerStats bool + // ExcludeRawInputData excludes the raw input for a load operation from the trace output + ExcludeRawInputData bool + // ExcludeInput excludes the rendered input for a load operation from the trace output + ExcludeInput bool + // ExcludeOutput excludes the result of a load operation from the trace output + ExcludeOutput bool + // ExcludeLoadStats excludes the load timing information from the trace output + ExcludeLoadStats bool + // EnablePredictableDebugTimings makes the timings in the trace output predictable for debugging purposes + EnablePredictableDebugTimings bool + IncludeTraceOutputInResponseExtensions bool +} + +func (r *RequestTraceOptions) EnableAll() { + r.Enable = true + r.ExcludePlannerStats = false + r.ExcludeRawInputData = false + r.ExcludeInput = false + r.ExcludeOutput = false + r.ExcludeLoadStats = false + r.EnablePredictableDebugTimings = false + r.IncludeTraceOutputInResponseExtensions = true +} + +func (r *RequestTraceOptions) DisableAll() { + r.Enable = false + r.ExcludePlannerStats = true + r.ExcludeRawInputData = true + r.ExcludeInput = true + r.ExcludeOutput = true + r.ExcludeLoadStats = true + r.EnablePredictableDebugTimings = false + r.IncludeTraceOutputInResponseExtensions = false +} + type TraceFetchType string const ( @@ -30,20 +75,27 @@ const ( ) type TraceFetch struct { - Type TraceFetchType `json:"type,omitempty"` - Fetches []*TraceFetch `json:"fetches,omitempty"` - DataSource string `json:"datasource,omitempty"` + Type TraceFetchType `json:"type,omitempty"` + DataSourceID string `json:"data_source_id,omitempty"` + Fetches []*TraceFetch `json:"fetches,omitempty"` + DataSourceLoadTrace *DataSourceLoadTrace `json:"datasource_load_trace,omitempty"` + DataSourceLoadTraces []*DataSourceLoadTrace `json:"data_source_load_traces,omitempty"` +} + +type TraceFetchEvents struct { + InputBeforeSourceLoad json.RawMessage `json:"input_before_source_load,omitempty"` } type TraceField struct { Name string `json:"name,omitempty"` Value *TraceNode `json:"value,omitempty"` - ParentTypeNames []string `json:"parentTypeNames,omitempty"` - NamedType string `json:"namedType,omitempty"` - SourceIDs []string `json:"sourceIDs,omitempty"` + ParentTypeNames []string `json:"parent_type_names,omitempty"` + NamedType string `json:"named_type,omitempty"` + DataSourceIDs []string `json:"data_source_ids,omitempty"` } type TraceNode struct { + Info *TraceInfo `json:"info,omitempty"` Fetch *TraceFetch `json:"fetch,omitempty"` NodeType TraceNodeType `json:"node_type,omitempty"` Nullable bool `json:"nullable,omitempty"` @@ -101,7 +153,7 @@ func parseField(f *Field) *TraceField { field.ParentTypeNames = f.Info.ParentTypeNames field.NamedType = f.Info.NamedType - field.SourceIDs = f.Info.Source.IDs + field.DataSourceIDs = f.Info.Source.IDs return field } @@ -112,6 +164,12 @@ func parseFetch(fetch Fetch) *TraceFetch { switch f := fetch.(type) { case *SingleFetch: traceFetch.Type = TraceFetchTypeSingle + if f.Trace != nil { + traceFetch.DataSourceLoadTrace = f.Trace + } + if f.Info != nil { + traceFetch.DataSourceID = f.Info.DataSourceID + } case *ParallelFetch: traceFetch.Type = TraceFetchTypeParallel @@ -128,12 +186,33 @@ func parseFetch(fetch Fetch) *TraceFetch { case *ParallelListItemFetch: traceFetch.Type = TraceFetchTypeParallelListItem traceFetch.Fetches = append(traceFetch.Fetches, parseFetch(f.Fetch)) - + if f.Traces != nil { + for _, trace := range f.Traces { + if trace.Trace != nil { + traceFetch.DataSourceLoadTraces = append(traceFetch.DataSourceLoadTraces, trace.Trace) + } + if trace.Info != nil { + traceFetch.DataSourceID = trace.Info.DataSourceID + } + } + } case *EntityFetch: traceFetch.Type = TraceFetchTypeEntity + if f.Trace != nil { + traceFetch.DataSourceLoadTrace = f.Trace + } + if f.Info != nil { + traceFetch.DataSourceID = f.Info.DataSourceID + } case *BatchEntityFetch: traceFetch.Type = TraceFetchTypeBatchEntity + if f.Trace != nil { + traceFetch.DataSourceLoadTrace = f.Trace + } + if f.Info != nil { + traceFetch.DataSourceID = f.Info.DataSourceID + } default: return nil @@ -173,6 +252,8 @@ func parseNode(n Node) *TraceNode { return node } -func GetTrace(root *Object) *TraceNode { - return parseNode(root) +func GetTrace(ctx context.Context, root *Object) *TraceNode { + node := parseNode(root) + node.Info = GetTraceInfo(ctx) + return node } diff --git a/v2/pkg/graphql/execution_engine_v2.go b/v2/pkg/graphql/execution_engine_v2.go index 4ebe2ad384..9183d3422f 100644 --- a/v2/pkg/graphql/execution_engine_v2.go +++ b/v2/pkg/graphql/execution_engine_v2.go @@ -154,18 +154,6 @@ type WebsocketBeforeStartHook interface { type ExecutionOptionsV2 func(ctx *internalExecutionContext) -func WithBeforeFetchHook(hook resolve.BeforeFetchHook) ExecutionOptionsV2 { - return func(ctx *internalExecutionContext) { - ctx.resolveContext.SetBeforeFetchHook(hook) - } -} - -func WithAfterFetchHook(hook resolve.AfterFetchHook) ExecutionOptionsV2 { - return func(ctx *internalExecutionContext) { - ctx.resolveContext.SetAfterFetchHook(hook) - } -} - func WithAdditionalHttpHeaders(headers http.Header, excludeByKeys ...string) ExecutionOptionsV2 { return func(ctx *internalExecutionContext) { if len(headers) == 0 { diff --git a/v2/pkg/graphql/execution_engine_v2_test.go b/v2/pkg/graphql/execution_engine_v2_test.go index a152d92b29..7b44e0daa0 100644 --- a/v2/pkg/graphql/execution_engine_v2_test.go +++ b/v2/pkg/graphql/execution_engine_v2_test.go @@ -1234,94 +1234,6 @@ func testNetHttpClient(t *testing.T, testCase roundTripperTestCase) *http.Client } } -type beforeFetchHook struct { - input string -} - -func (b *beforeFetchHook) OnBeforeFetch(_ resolve.HookContext, input []byte) { - b.input += string(input) -} - -type afterFetchHook struct { - data string - err string -} - -func (a *afterFetchHook) OnData(_ resolve.HookContext, output []byte, _ bool) { - a.data += string(output) -} - -func (a *afterFetchHook) OnError(_ resolve.HookContext, output []byte, _ bool) { - a.err += string(output) -} - -func TestExecutionWithOptions(t *testing.T) { - t.Skip("FIXME") - - closer := make(chan struct{}) - defer close(closer) - - testCase := ExecutionEngineV2TestCase{ - schema: starwarsSchema(t), - operation: loadStarWarsQuery(starwars.FileSimpleHeroQuery, nil), - dataSources: []plan.DataSourceConfiguration{ - { - RootNodes: []plan.TypeField{ - { - TypeName: "Query", - FieldNames: []string{"hero"}, - }, - }, - ChildNodes: []plan.TypeField{ - { - TypeName: "Character", - FieldNames: []string{"name"}, - }, - }, - Factory: &graphql_datasource.Factory{ - HTTPClient: testNetHttpClient(t, roundTripperTestCase{ - expectedHost: "example.com", - expectedPath: "/", - expectedBody: "", - sendResponseBody: `{"data":{"hero":{"name":"Luke Skywalker"}}}`, - sendStatusCode: 200, - }), - }, - Custom: graphql_datasource.ConfigJson(graphql_datasource.Configuration{ - Fetch: graphql_datasource.FetchConfiguration{ - URL: "https://example.com/", - Method: "GET", - }, - }), - }, - }, - fields: []plan.FieldConfiguration{}, - expectedResponse: `{"data":{"hero":{"name":"Luke Skywalker"}}}`, - } - - engineConf := NewEngineV2Configuration(testCase.schema) - engineConf.SetDataSources(testCase.dataSources) - engineConf.SetFieldConfigurations(testCase.fields) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - engine, err := NewExecutionEngineV2(ctx, abstractlogger.Noop{}, engineConf) - require.NoError(t, err) - - before := &beforeFetchHook{} - after := &afterFetchHook{} - - operation := testCase.operation(t) - resultWriter := NewEngineResultWriter() - err = engine.Execute(context.Background(), &operation, &resultWriter, WithBeforeFetchHook(before), WithAfterFetchHook(after)) - - assert.Equal(t, `{"method":"GET","url":"https://example.com/","body":{"query":"{hero {name}}"}}`, before.input) - assert.Equal(t, `{"hero":{"name":"Luke Skywalker"}}`, after.data) - assert.Equal(t, "", after.err) - assert.NoError(t, err) -} - func TestExecutionEngineV2_GetCachedPlan(t *testing.T) { schema, err := NewSchemaFromString(testSubscriptionDefinition) require.NoError(t, err)