diff --git a/internal/http/interceptors/auth/auth.go b/internal/http/interceptors/auth/auth.go new file mode 100644 index 0000000000..64b9f53353 --- /dev/null +++ b/internal/http/interceptors/auth/auth.go @@ -0,0 +1,420 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package auth + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/bluele/gcache" + authpb "github.com/cs3org/go-cs3apis/cs3/auth/provider/v1beta1" + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + "github.com/mitchellh/mapstructure" + "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/credential/registry" + tokenregistry "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/token/registry" + tokenwriterregistry "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/tokenwriter/registry" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/auth" + "github.com/opencloud-eu/reva/v2/pkg/auth/scope" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" + "github.com/opencloud-eu/reva/v2/pkg/errtypes" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/global" + "github.com/opencloud-eu/reva/v2/pkg/sharedconf" + "github.com/opencloud-eu/reva/v2/pkg/token" + tokenmgr "github.com/opencloud-eu/reva/v2/pkg/token/manager/registry" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/pkg/errors" + "github.com/rs/zerolog" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/trace" + "google.golang.org/grpc/metadata" +) + +// name is the Tracer name used to identify this instrumentation library. +const tracerName = "auth" + +var ( + cacheOnce sync.Once + userGroupsCache gcache.Cache +) + +type config struct { + Priority int `mapstructure:"priority"` + GatewaySvc string `mapstructure:"gatewaysvc"` + // TODO(jdf): Realm is optional, will be filled with request host if not given? + Realm string `mapstructure:"realm"` + CredentialsByUserAgent map[string]string `mapstructure:"credentials_by_user_agent"` + CredentialChain []string `mapstructure:"credential_chain"` + CredentialStrategies map[string]map[string]interface{} `mapstructure:"credential_strategies"` + TokenStrategyChain []string `mapstructure:"token_strategy_chain"` + TokenStrategies map[string]map[string]interface{} `mapstructure:"token_strategies"` + TokenManager string `mapstructure:"token_manager"` + TokenManagers map[string]map[string]interface{} `mapstructure:"token_managers"` + TokenWriter string `mapstructure:"token_writer"` + TokenWriters map[string]map[string]interface{} `mapstructure:"token_writers"` + UserGroupsCacheSize int `mapstructure:"usergroups_cache_size"` +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + err = errors.Wrap(err, "error decoding conf") + return nil, err + } + return c, nil +} + +// New returns a new middleware with defined priority. +func New(m map[string]interface{}, unprotected []string, tp trace.TracerProvider) (global.Middleware, error) { + conf, err := parseConfig(m) + if err != nil { + return nil, err + } + + conf.GatewaySvc = sharedconf.GetGatewaySVC(conf.GatewaySvc) + + // set defaults + if len(conf.TokenStrategyChain) == 0 { + conf.TokenStrategyChain = []string{"header"} + } + + if conf.TokenWriter == "" { + conf.TokenWriter = "header" + } + + if conf.TokenManager == "" { + conf.TokenManager = "jwt" + } + + if conf.CredentialsByUserAgent == nil { + conf.CredentialsByUserAgent = map[string]string{} + } + + if conf.UserGroupsCacheSize == 0 { + conf.UserGroupsCacheSize = 5000 + } + + cacheOnce.Do(func() { + userGroupsCache = gcache.New(conf.UserGroupsCacheSize).LFU().Build() + }) + + credChain := map[string]auth.CredentialStrategy{} + for i, key := range conf.CredentialChain { + f, ok := registry.NewCredentialFuncs[conf.CredentialChain[i]] + if !ok { + return nil, fmt.Errorf("credential strategy not found: %s", conf.CredentialChain[i]) + } + + credStrategy, err := f(conf.CredentialStrategies[conf.CredentialChain[i]]) + if err != nil { + return nil, err + } + credChain[key] = credStrategy + } + + tokenStrategyChain := make([]auth.TokenStrategy, 0, len(conf.TokenStrategyChain)) + for _, strategy := range conf.TokenStrategyChain { + g, ok := tokenregistry.NewTokenFuncs[strategy] + if !ok { + return nil, fmt.Errorf("token strategy not found: %s", strategy) + } + tokenStrategy, err := g(conf.TokenStrategies[strategy]) + if err != nil { + return nil, err + } + tokenStrategyChain = append(tokenStrategyChain, tokenStrategy) + } + + h, ok := tokenmgr.NewFuncs[conf.TokenManager] + if !ok { + return nil, fmt.Errorf("token manager not found: %s", conf.TokenManager) + } + + tokenManager, err := h(conf.TokenManagers[conf.TokenManager]) + if err != nil { + return nil, err + } + + i, ok := tokenwriterregistry.NewTokenFuncs[conf.TokenWriter] + if !ok { + return nil, fmt.Errorf("token writer not found: %s", conf.TokenWriter) + } + + tokenWriter, err := i(conf.TokenWriters[conf.TokenWriter]) + if err != nil { + return nil, err + } + + chain := func(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // OPTION requests need to pass for preflight requests + // TODO(labkode): this will break options for auth protected routes. + // Maybe running the CORS middleware before auth kicks in is enough. + ctx := r.Context() + span := trace.SpanFromContext(ctx) + defer span.End() + if !span.SpanContext().HasTraceID() { + _, span = tp.Tracer(tracerName).Start(ctx, "http auth interceptor") + } + + if r.Method == "OPTIONS" { + h.ServeHTTP(w, r) + return + } + + log := appctx.GetLogger(r.Context()) + isUnprotectedEndpoint := false + + // For unprotected URLs, we try to authenticate the request in case some service needs it, + // but don't return any errors if it fails. + if utils.Skip(r.URL.Path, unprotected) { + log.Info().Msg("skipping auth check for: " + r.URL.Path) + isUnprotectedEndpoint = true + } + + ctx, err := authenticateUser(w, r, conf, tokenStrategyChain, tokenManager, tokenWriter, credChain, isUnprotectedEndpoint) + if err != nil { + if !isUnprotectedEndpoint { + return + } + } else { + u, ok := ctxpkg.ContextGetUser(ctx) + if ok { + span.SetAttributes(semconv.EnduserIDKey.String(u.Id.OpaqueId)) + } + + r = r.WithContext(ctx) + } + h.ServeHTTP(w, r) + + }) + } + return chain, nil +} + +func authenticateUser(w http.ResponseWriter, r *http.Request, conf *config, tokenStrategies []auth.TokenStrategy, tokenManager token.Manager, tokenWriter auth.TokenWriter, credChain map[string]auth.CredentialStrategy, isUnprotectedEndpoint bool) (context.Context, error) { + ctx := r.Context() + log := appctx.GetLogger(ctx) + + // Add the request user-agent to the ctx + ctx = metadata.NewIncomingContext(ctx, metadata.New(map[string]string{ctxpkg.UserAgentHeader: r.UserAgent()})) + + client, err := pool.GetGatewayServiceClient(conf.GatewaySvc) + if err != nil { + logError(isUnprotectedEndpoint, log, err, "error getting the authsvc client", http.StatusUnauthorized, w) + return nil, err + } + + // reva token or auth token can be passed using the same technique (for example bearer) + // before validating it against an auth provider, we can check directly if it's a reva + // token and if not try to use it for authenticating the user. + for _, tokenStrategy := range tokenStrategies { + token := tokenStrategy.GetToken(r) + if token != "" { + if user, tokenScope, ok := isTokenValid(r, tokenManager, token); ok { + if err := insertGroupsInUser(ctx, userGroupsCache, client, user); err != nil { + logError(isUnprotectedEndpoint, log, err, "got an error retrieving groups for user "+user.Username, http.StatusInternalServerError, w) + return nil, err + } + return ctxWithUserInfo(ctx, r, user, token, tokenScope, r.Header.Get(ctxpkg.InitiatorHeader)), nil + } + } + } + + log.Warn().Msg("core access token not set") + + userAgentCredKeys := getCredsForUserAgent(r.UserAgent(), conf.CredentialsByUserAgent, conf.CredentialChain) + + // obtain credentials (basic auth, bearer token, ...) based on user agent + var creds *auth.Credentials + for _, k := range userAgentCredKeys { + creds, err = credChain[k].GetCredentials(w, r) + if err != nil { + log.Debug().Err(err).Msg("error retrieving credentials") + } + + if creds != nil { + log.Debug().Msgf("credentials obtained from credential strategy: type: %s, client_id: %s", creds.Type, creds.ClientID) + break + } + } + + // if no credentials are found, reply with authentication challenge depending on user agent + if creds == nil { + if !isUnprotectedEndpoint { + for _, key := range userAgentCredKeys { + if cred, ok := credChain[key]; ok { + cred.AddWWWAuthenticate(w, r, conf.Realm) + } else { + log.Error().Msg("auth credential strategy: " + key + "must have been loaded in init method") + w.WriteHeader(http.StatusInternalServerError) + return nil, errtypes.InternalError("no credentials found") + } + } + w.WriteHeader(http.StatusUnauthorized) + } + return nil, errtypes.PermissionDenied("no credentials found") + } + + req := &gateway.AuthenticateRequest{ + Type: creds.Type, + ClientId: creds.ClientID, + ClientSecret: creds.ClientSecret, + } + + log.Debug().Msgf("AuthenticateRequest: type: %s, client_id: %s against %s", req.Type, req.ClientId, conf.GatewaySvc) + + res, err := client.Authenticate(ctx, req) + if err != nil { + logError(isUnprotectedEndpoint, log, err, "error calling Authenticate", http.StatusUnauthorized, w) + return nil, err + } + + if res.Status.Code != rpc.Code_CODE_OK { + err := status.NewErrorFromCode(res.Status.Code, "auth") + logError(isUnprotectedEndpoint, log, err, "error generating access token from credentials", http.StatusUnauthorized, w) + return nil, err + } + + log.Info().Msg("core access token generated") // write token to response + + // write token to response + token := res.Token + tokenWriter.WriteToken(token, w) + + // validate token + u, tokenScope, err := tokenManager.DismantleToken(r.Context(), token) + if err != nil { + logError(isUnprotectedEndpoint, log, err, "error dismantling token", http.StatusUnauthorized, w) + return nil, err + } + + if sharedconf.SkipUserGroupsInToken() { + var groups []string + if groupsIf, err := userGroupsCache.Get(u.Id.OpaqueId); err == nil { + groups = groupsIf.([]string) + } else { + groupsRes, err := client.GetUserGroups(ctx, &userpb.GetUserGroupsRequest{UserId: u.Id}) + if err != nil { + logError(isUnprotectedEndpoint, log, err, "error retrieving user groups", http.StatusInternalServerError, w) + return nil, err + } + groups = groupsRes.Groups + _ = userGroupsCache.SetWithExpire(u.Id.OpaqueId, groupsRes.Groups, 3600*time.Second) + } + u.Groups = groups + } + + // ensure access to the resource is allowed + ok, err := scope.VerifyScope(ctx, tokenScope, r.URL.Path) + if err != nil { + logError(isUnprotectedEndpoint, log, err, "error verifying scope of access token", http.StatusInternalServerError, w) + return nil, err + } + if !ok { + err := errtypes.PermissionDenied("access to resource not allowed") + logError(isUnprotectedEndpoint, log, err, "access to resource not allowed", http.StatusUnauthorized, w) + return nil, err + } + + return ctxWithUserInfo(ctx, r, u, token, tokenScope, r.Header.Get(ctxpkg.InitiatorHeader)), nil +} + +func ctxWithUserInfo(ctx context.Context, r *http.Request, user *userpb.User, token string, tokenScope map[string]*authpb.Scope, initiatorid string) context.Context { + ctx = ctxpkg.ContextSetUser(ctx, user) + ctx = ctxpkg.ContextSetToken(ctx, token) + ctx = ctxpkg.ContextSetInitiator(ctx, initiatorid) + ctx = metadata.AppendToOutgoingContext(ctx, ctxpkg.TokenHeader, token) + ctx = metadata.AppendToOutgoingContext(ctx, ctxpkg.UserAgentHeader, r.UserAgent()) + ctx = metadata.AppendToOutgoingContext(ctx, ctxpkg.InitiatorHeader, initiatorid) + ctx = ctxpkg.ContextSetScopes(ctx, tokenScope) + return ctx +} + +func insertGroupsInUser(ctx context.Context, userGroupsCache gcache.Cache, client gateway.GatewayAPIClient, user *userpb.User) error { + if sharedconf.SkipUserGroupsInToken() { + var groups []string + if groupsIf, err := userGroupsCache.Get(user.Id.OpaqueId); err == nil { + groups = groupsIf.([]string) + } else { + groupsRes, err := client.GetUserGroups(ctx, &userpb.GetUserGroupsRequest{UserId: user.Id}) + if err != nil { + return err + } + groups = groupsRes.Groups + _ = userGroupsCache.SetWithExpire(user.Id.OpaqueId, groupsRes.Groups, 3600*time.Second) + } + user.Groups = groups + } + return nil +} + +func isTokenValid(r *http.Request, tokenManager token.Manager, token string) (*userpb.User, map[string]*authpb.Scope, bool) { + ctx := r.Context() + + u, tokenScope, err := tokenManager.DismantleToken(ctx, token) + if err != nil { + return nil, nil, false + } + + // ensure access to the resource is allowed + ok, err := scope.VerifyScope(ctx, tokenScope, r.URL.Path) + if err != nil { + return nil, nil, false + } + + return u, tokenScope, ok +} + +func logError(isUnprotectedEndpoint bool, log *zerolog.Logger, err error, msg string, status int, w http.ResponseWriter) { + if !isUnprotectedEndpoint { + log.Error().Err(err).Msg(msg) + w.WriteHeader(status) + } +} + +// getCredsForUserAgent returns the WWW Authenticate challenges keys to use given an http request +// and available credentials. +func getCredsForUserAgent(ua string, uam map[string]string, creds []string) []string { + if ua == "" || len(uam) == 0 { + return creds + } + + for u, cred := range uam { + if strings.Contains(ua, u) { + for _, v := range creds { + if v == cred { + return []string{cred} + } + } + return creds + + } + } + + return creds +} diff --git a/internal/http/interceptors/auth/auth_test.go b/internal/http/interceptors/auth/auth_test.go new file mode 100644 index 0000000000..7eca334e6b --- /dev/null +++ b/internal/http/interceptors/auth/auth_test.go @@ -0,0 +1,109 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package auth + +import ( + "testing" +) + +func TestGetCredsForUserAgent(t *testing.T) { + type test struct { + userAgent string + userAgentMap map[string]string + availableCredentials []string + expected []string + } + + tests := []*test{ + // no user agent we return all available credentials + { + userAgent: "", + userAgentMap: map[string]string{}, + availableCredentials: []string{"basic"}, + expected: []string{"basic"}, + }, + + // map set but user agent not in map + { + userAgent: "curl", + userAgentMap: map[string]string{"mirall": "basic"}, + availableCredentials: []string{"basic", "bearer"}, + expected: []string{"basic", "bearer"}, + }, + + // no user map we return all available credentials + { + userAgent: "mirall", + userAgentMap: map[string]string{}, + availableCredentials: []string{"basic"}, + expected: []string{"basic"}, + }, + + // user agent set but no mapping set we return all credentials + { + userAgent: "mirall", + userAgentMap: map[string]string{}, + availableCredentials: []string{"basic"}, + expected: []string{"basic"}, + }, + + // user mapping set to non available credential, we return all available + { + userAgent: "mirall", + userAgentMap: map[string]string{"mirall": "notfound"}, + availableCredentials: []string{"basic", "bearer"}, + expected: []string{"basic", "bearer"}, + }, + + // user mapping set and we return only desired credential + { + userAgent: "mirall", + userAgentMap: map[string]string{"mirall": "bearer"}, + availableCredentials: []string{"basic", "bearer"}, + expected: []string{"bearer"}, + }, + } + + for _, test := range tests { + got := getCredsForUserAgent( + test.userAgent, + test.userAgentMap, + test.availableCredentials) + + if !match(got, test.expected) { + fail(t, got, test.expected) + } + } +} + +func match(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +func fail(t *testing.T, got, expected []string) { + t.Fatalf("got: %+v expected: %+v", got, expected) +} diff --git a/internal/http/interceptors/auth/credential/loader/loader.go b/internal/http/interceptors/auth/credential/loader/loader.go new file mode 100644 index 0000000000..1c1495e0ea --- /dev/null +++ b/internal/http/interceptors/auth/credential/loader/loader.go @@ -0,0 +1,27 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package loader + +import ( + // Load core authentication strategies. + _ "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/credential/strategy/basic" + _ "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/credential/strategy/bearer" + _ "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/credential/strategy/ocmshares" + // Add your own here. +) diff --git a/internal/http/interceptors/auth/credential/registry/registry.go b/internal/http/interceptors/auth/credential/registry/registry.go new file mode 100644 index 0000000000..6c661c022a --- /dev/null +++ b/internal/http/interceptors/auth/credential/registry/registry.go @@ -0,0 +1,36 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package registry + +import ( + "github.com/opencloud-eu/reva/v2/pkg/auth" +) + +// NewCredentialFunc is the function that credential strategies +// should register at init time. +type NewCredentialFunc func(map[string]interface{}) (auth.CredentialStrategy, error) + +// NewCredentialFuncs is a map containing all the registered auth strategies. +var NewCredentialFuncs = map[string]NewCredentialFunc{} + +// Register registers a new auth strategy new function. +// Not safe for concurrent use. Safe for use from package init. +func Register(name string, f NewCredentialFunc) { + NewCredentialFuncs[name] = f +} diff --git a/internal/http/interceptors/auth/credential/strategy/basic/basic.go b/internal/http/interceptors/auth/credential/strategy/basic/basic.go new file mode 100644 index 0000000000..44320c13cb --- /dev/null +++ b/internal/http/interceptors/auth/credential/strategy/basic/basic.go @@ -0,0 +1,56 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package basic + +import ( + "fmt" + "net/http" + + "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/credential/registry" + "github.com/opencloud-eu/reva/v2/pkg/auth" +) + +func init() { + registry.Register("basic", New) +} + +type strategy struct{} + +// New returns a new auth strategy that checks for basic auth. +// See https://tools.ietf.org/html/rfc7617 +func New(m map[string]interface{}) (auth.CredentialStrategy, error) { + return &strategy{}, nil +} + +func (s *strategy) GetCredentials(w http.ResponseWriter, r *http.Request) (*auth.Credentials, error) { + id, secret, ok := r.BasicAuth() + if !ok { + return nil, fmt.Errorf("no basic auth provided") + } + return &auth.Credentials{Type: "basic", ClientID: id, ClientSecret: secret}, nil +} + +func (s *strategy) AddWWWAuthenticate(w http.ResponseWriter, r *http.Request, realm string) { + // TODO read realm from forwarded header? + if realm == "" { + // fall back to hostname if not configured + realm = r.Host + } + w.Header().Add("WWW-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm)) +} diff --git a/internal/http/interceptors/auth/credential/strategy/bearer/bearer.go b/internal/http/interceptors/auth/credential/strategy/bearer/bearer.go new file mode 100644 index 0000000000..27162f0276 --- /dev/null +++ b/internal/http/interceptors/auth/credential/strategy/bearer/bearer.go @@ -0,0 +1,67 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package bearer + +import ( + "fmt" + "net/http" + "strings" + + "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/credential/registry" + "github.com/opencloud-eu/reva/v2/pkg/auth" +) + +func init() { + registry.Register("bearer", New) +} + +type strategy struct{} + +// New returns a new auth strategy that checks "Bearer" OAuth Access Tokens +// See https://tools.ietf.org/html/rfc6750#section-6.1 +func New(m map[string]interface{}) (auth.CredentialStrategy, error) { + return &strategy{}, nil +} + +func (s *strategy) GetCredentials(w http.ResponseWriter, r *http.Request) (*auth.Credentials, error) { + // 1. check Authorization header + hdr := r.Header.Get("Authorization") + token := strings.TrimPrefix(hdr, "Bearer ") + if token != "" { + return &auth.Credentials{Type: "bearer", ClientSecret: token}, nil + } + // TODO 2. check form encoded body parameter for POST requests, see https://tools.ietf.org/html/rfc6750#section-2.2 + + // 3. check uri query parameter, see https://tools.ietf.org/html/rfc6750#section-2.3 + tokens, ok := r.URL.Query()["access_token"] + if !ok || len(tokens[0]) < 1 { + return nil, fmt.Errorf("no bearer auth provided") + } + return &auth.Credentials{Type: "bearer", ClientSecret: tokens[0]}, nil + +} + +func (s *strategy) AddWWWAuthenticate(w http.ResponseWriter, r *http.Request, realm string) { + // TODO read realm from forwarded header? + if realm == "" { + // fall back to hostname if not configured + realm = r.Host + } + w.Header().Add("WWW-Authenticate", fmt.Sprintf(`Bearer realm="%s"`, realm)) +} diff --git a/internal/http/interceptors/auth/credential/strategy/ocmshares/ocmshares.go b/internal/http/interceptors/auth/credential/strategy/ocmshares/ocmshares.go new file mode 100644 index 0000000000..76ab33867a --- /dev/null +++ b/internal/http/interceptors/auth/credential/strategy/ocmshares/ocmshares.go @@ -0,0 +1,58 @@ +// Copyright 2018-2023 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocmshares + +import ( + "fmt" + "net/http" + + "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/credential/registry" + "github.com/opencloud-eu/reva/v2/pkg/auth" +) + +func init() { + registry.Register("ocmshares", New) +} + +const ( + headerShareToken = "ocm-token" +) + +type strategy struct{} + +// New returns a new auth strategy that handles public share verification. +func New(m map[string]interface{}) (auth.CredentialStrategy, error) { + return &strategy{}, nil +} + +func (s *strategy) GetCredentials(w http.ResponseWriter, r *http.Request) (*auth.Credentials, error) { + token := r.Header.Get(headerShareToken) + if token == "" { + token = r.URL.Query().Get(headerShareToken) + } + if token == "" { + return nil, fmt.Errorf("no ocm token provided") + } + + return &auth.Credentials{Type: "ocmshares", ClientID: token}, nil +} + +func (s *strategy) AddWWWAuthenticate(w http.ResponseWriter, r *http.Request, realm string) { + // TODO read realm from forwarded header? +} diff --git a/internal/http/interceptors/auth/token/loader/loader.go b/internal/http/interceptors/auth/token/loader/loader.go new file mode 100644 index 0000000000..3f9ccb259c --- /dev/null +++ b/internal/http/interceptors/auth/token/loader/loader.go @@ -0,0 +1,26 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package loader + +import ( + // Load core token strategies. + _ "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/token/strategy/bearer" + _ "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/token/strategy/header" + // Add your own here. +) diff --git a/internal/http/interceptors/auth/token/registry/registry.go b/internal/http/interceptors/auth/token/registry/registry.go new file mode 100644 index 0000000000..474afd78e3 --- /dev/null +++ b/internal/http/interceptors/auth/token/registry/registry.go @@ -0,0 +1,34 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package registry + +import "github.com/opencloud-eu/reva/v2/pkg/auth" + +// NewTokenFunc is the function that token strategies +// should register at init time. +type NewTokenFunc func(map[string]interface{}) (auth.TokenStrategy, error) + +// NewTokenFuncs is a map containing all the registered auth strategies. +var NewTokenFuncs = map[string]NewTokenFunc{} + +// Register registers a new auth strategy new function. +// Not safe for concurrent use. Safe for use from package init. +func Register(name string, f NewTokenFunc) { + NewTokenFuncs[name] = f +} diff --git a/internal/http/interceptors/auth/token/strategy/bearer/bearer.go b/internal/http/interceptors/auth/token/strategy/bearer/bearer.go new file mode 100644 index 0000000000..0985985a1a --- /dev/null +++ b/internal/http/interceptors/auth/token/strategy/bearer/bearer.go @@ -0,0 +1,84 @@ +// Copyright 2018-2023 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package header + +import ( + "mime" + "net/http" + "strings" + + "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/token/registry" + "github.com/opencloud-eu/reva/v2/pkg/auth" +) + +func init() { + registry.Register("bearer", New) +} + +type b struct{} + +// New returns a new auth strategy that checks for bearer auth. +func New(m map[string]interface{}) (auth.TokenStrategy, error) { + return b{}, nil +} + +func (b) GetToken(r *http.Request) string { + // Authorization Request Header Field: https://www.rfc-editor.org/rfc/rfc6750#section-2.1 + if tkn, ok := getFromAuthorizationHeader(r); ok { + return tkn + } + + // Form-Encoded Body Parameter: https://www.rfc-editor.org/rfc/rfc6750#section-2.2 + if tkn, ok := getFromBody(r); ok { + return tkn + } + + // URI Query Parameter: https://www.rfc-editor.org/rfc/rfc6750#section-2.3 + if tkn, ok := getFromQueryParam(r); ok { + return tkn + } + + return "" +} + +func getFromAuthorizationHeader(r *http.Request) (string, bool) { + auth := r.Header.Get("Authorization") + tkn := strings.TrimPrefix(auth, "Bearer ") + return tkn, tkn != "" +} + +func getFromBody(r *http.Request) (string, bool) { + mediatype, _, err := mime.ParseMediaType(r.Header.Get("content-type")) + if err != nil { + return "", false + } + if mediatype != "application/x-www-form-urlencoded" { + return "", false + } + if err = r.ParseForm(); err != nil { + return "", false + } + tkn := r.Form.Get("access-token") + return tkn, tkn != "" +} + +func getFromQueryParam(r *http.Request) (string, bool) { + tkn := r.URL.Query().Get("access_token") + return tkn, tkn != "" +} diff --git a/internal/http/interceptors/auth/token/strategy/header/header.go b/internal/http/interceptors/auth/token/strategy/header/header.go new file mode 100644 index 0000000000..574c967303 --- /dev/null +++ b/internal/http/interceptors/auth/token/strategy/header/header.go @@ -0,0 +1,44 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package header + +import ( + "net/http" + + "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/token/registry" + "github.com/opencloud-eu/reva/v2/pkg/auth" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" +) + +func init() { + registry.Register("header", New) +} + +type strategy struct { + header string +} + +// New returns a new auth strategy that checks for basic auth. +func New(m map[string]interface{}) (auth.TokenStrategy, error) { + return &strategy{header: ctxpkg.TokenHeader}, nil +} + +func (s *strategy) GetToken(r *http.Request) string { + return r.Header.Get(s.header) +} diff --git a/internal/http/interceptors/auth/tokenwriter/loader/loader.go b/internal/http/interceptors/auth/tokenwriter/loader/loader.go new file mode 100644 index 0000000000..59831f1463 --- /dev/null +++ b/internal/http/interceptors/auth/tokenwriter/loader/loader.go @@ -0,0 +1,25 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package loader + +import ( + // Load core token writer strategies. + _ "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/tokenwriter/strategy/header" + // Add your own here. +) diff --git a/internal/http/interceptors/auth/tokenwriter/registry/registry.go b/internal/http/interceptors/auth/tokenwriter/registry/registry.go new file mode 100644 index 0000000000..5bd60d3eeb --- /dev/null +++ b/internal/http/interceptors/auth/tokenwriter/registry/registry.go @@ -0,0 +1,34 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package registry + +import "github.com/opencloud-eu/reva/v2/pkg/auth" + +// NewTokenFunc is the function that token writers +// should register at init time. +type NewTokenFunc func(map[string]interface{}) (auth.TokenWriter, error) + +// NewTokenFuncs is a map containing all the registered token writers. +var NewTokenFuncs = map[string]NewTokenFunc{} + +// Register registers a new token writer strategy new function. +// Not safe for concurrent use. Safe for use from package init. +func Register(name string, f NewTokenFunc) { + NewTokenFuncs[name] = f +} diff --git a/internal/http/interceptors/auth/tokenwriter/strategy/header/header.go b/internal/http/interceptors/auth/tokenwriter/strategy/header/header.go new file mode 100644 index 0000000000..7cefbfb6ef --- /dev/null +++ b/internal/http/interceptors/auth/tokenwriter/strategy/header/header.go @@ -0,0 +1,44 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package header + +import ( + "net/http" + + "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/tokenwriter/registry" + "github.com/opencloud-eu/reva/v2/pkg/auth" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" +) + +func init() { + registry.Register("header", New) +} + +type strategy struct { + header string +} + +// New returns a new token writer strategy that stores token in a header. +func New(m map[string]interface{}) (auth.TokenWriter, error) { + return &strategy{header: ctxpkg.TokenHeader}, nil +} + +func (s *strategy) WriteToken(token string, w http.ResponseWriter) { + w.Header().Set(s.header, token) +} diff --git a/opencloud/pkg/init/init.go b/opencloud/pkg/init/init.go index 6d53b78f4d..682ef7c5fd 100644 --- a/opencloud/pkg/init/init.go +++ b/opencloud/pkg/init/init.go @@ -281,7 +281,6 @@ func CreateConfig(insecure, forceOverwrite, diff bool, configPath, adminPassword cfg.Collaboration.App.Insecure = true cfg.Frontend.AppHandler = _insecureService cfg.Frontend.Archiver = _insecureService - cfg.Frontend.OCDav = _insecureService cfg.Graph.Spaces = _insecureService cfg.Graph.Events = _insecureEvents cfg.Notifications.Notifications.Events = _insecureEvents @@ -300,6 +299,7 @@ func CreateConfig(insecure, forceOverwrite, diff bool, configPath, adminPassword cfg.Thumbnails.Thumbnail.WebdavAllowInsecure = true cfg.Thumbnails.Thumbnail.Cs3AllowInsecure = true + cfg.Webdav.OCDav = _insecureService } yamlOutput, err := yaml.Marshal(cfg) if err != nil { diff --git a/opencloud/pkg/init/structs.go b/opencloud/pkg/init/structs.go index b49ae8726a..36c37012a0 100644 --- a/opencloud/pkg/init/structs.go +++ b/opencloud/pkg/init/structs.go @@ -46,6 +46,7 @@ type OpenCloudConfig struct { AuthService AuthService `yaml:"auth_service"` Clientlog Clientlog `yaml:"clientlog"` Activitylog Activitylog `yaml:"activitylog"` + Webdav WebdavServie `yaml:"webdav"` } // Activitylog is the configuration for the activitylog service @@ -104,7 +105,6 @@ type FrontendService struct { AppHandler InsecureService `yaml:"app_handler"` Archiver InsecureService ServiceAccount ServiceAccount `yaml:"service_account"` - OCDav InsecureService } // Gateway is the configuration for the gateway @@ -250,3 +250,8 @@ type Userlog struct { type WopiApp struct { Secret string `yaml:"secret"` } + +// WebdavServie is the configuration for the webdav service +type WebdavServie struct { + OCDav InsecureService +} diff --git a/pkg/config/helpers_test.go b/pkg/config/helpers_test.go index d31782a141..dfa84aee3e 100644 --- a/pkg/config/helpers_test.go +++ b/pkg/config/helpers_test.go @@ -99,8 +99,6 @@ frontend: service_account: service_account_id: c05389b2-d94c-4d01-a9b5-a2f97952cc14 service_account_secret: GW5.x1vDM&+NPRi++eV@.P7Tms4vj!=s - ocdav: - insecure: true auth_basic: auth_providers: ldap: @@ -173,6 +171,9 @@ clientlog: service_account: service_account_id: c05389b2-d94c-4d01-a9b5-a2f97952cc14 service_account_secret: GW5.x1vDM&+NPRi++eV@.P7Tms4vj!=s +webdav: + ocdav: + insecure: true ` filePath := "etc/opencloud/foo.yaml" fs := fstest.MapFS{ diff --git a/services/frontend/pkg/config/config.go b/services/frontend/pkg/config/config.go index bbb0b7f2f5..0cc81b1878 100644 --- a/services/frontend/pkg/config/config.go +++ b/services/frontend/pkg/config/config.go @@ -43,7 +43,6 @@ type Config struct { Archiver Archiver `yaml:"archiver"` DataGateway DataGateway `yaml:"data_gateway"` OCS OCS `yaml:"ocs"` - OCDav OCDav `yaml:"ocdav"` Checksums Checksums `yaml:"checksums"` ReadOnlyUserAttributes []string `yaml:"read_only_user_attributes" env:"FRONTEND_READONLY_USER_ATTRIBUTES" desc:"A list of user attributes to indicate as read-only. Supported values: 'user.onPremisesSamAccountName' (username), 'user.displayName', 'user.mail', 'user.passwordProfile' (password), 'user.appRoleAssignments' (role), 'user.memberOf' (groups), 'user.accountEnabled' (login allowed), 'drive.quota' (quota). See the Environment Variable Types description for more details." introductionVersion:"1.0.0"` LDAPServerWriteEnabled bool `yaml:"ldap_server_write_enabled" env:"OC_LDAP_SERVER_WRITE_ENABLED;FRONTEND_LDAP_SERVER_WRITE_ENABLED" desc:"Allow creating, modifying and deleting LDAP users via the GRAPH API. This can only be set to 'true' when keeping default settings for the LDAP user and group attribute types (the 'OC_LDAP_USER_SCHEMA_* and 'OC_LDAP_GROUP_SCHEMA_* variables)." introductionVersion:"1.0.0"` @@ -145,36 +144,6 @@ type OCS struct { ShowUserEmailInResults bool `yaml:"show_email_in_results" env:"OC_SHOW_USER_EMAIL_IN_RESULTS" desc:"Include user email addresses in responses. If absent or set to false emails will be omitted from results. Please note that admin users can always see all email addresses." introductionVersion:"1.0.0"` } -type OCDav struct { - Prefix string `yaml:"prefix" env:"OCDAV_HTTP_PREFIX;FRONTENT_OCDAV_HTTP_PREFIX" desc:"A URL path prefix for the handler." introductionVersion:"1.0.0"` - - SkipUserGroupsInToken bool `yaml:"skip_user_groups_in_token" env:"OCDAV_SKIP_USER_GROUPS_IN_TOKEN;FRONTENT_OCDAV_SKIP_USER_GROUPS_IN_TOKEN" desc:"Disables the loading of user's group memberships from the reva access token." introductionVersion:"1.0.0"` - - WebdavNamespace string `yaml:"webdav_namespace" env:"OCDAV_WEBDAV_NAMESPACE;FRONTENT_OCDAV_WEBDAV_NAMESPACE" desc:"Jail requests to /dav/webdav into this CS3 namespace. Supports template layouting with CS3 User properties." introductionVersion:"1.0.0"` - FilesNamespace string `yaml:"files_namespace" env:"OCDAV_FILES_NAMESPACE;FRONTENT_OCDAV_FILES_NAMESPACE" desc:"Jail requests to /dav/files/{username} into this CS3 namespace. Supports template layouting with CS3 User properties." introductionVersion:"1.0.0"` - SharesNamespace string `yaml:"shares_namespace" env:"OCDAV_SHARES_NAMESPACE;FRONTENT_OCDAV_SHARES_NAMESPACE" desc:"The human readable path for the share jail. Relative to a users personal space root. Upcased intentionally." introductionVersion:"1.0.0"` - OCMNamespace string `yaml:"ocm_namespace" env:"OCDAV_OCM_NAMESPACE;FRONTENT_OCDAV_OCM_NAMESPACE" desc:"The human readable path prefix for the ocm shares." introductionVersion:"1.0.0"` - // PublicURL used to redirect /s/{token} URLs to - PublicURL string `yaml:"public_url" env:"OC_URL;OCDAV_PUBLIC_URL;FRONTENT_OCDAV_PUBLIC_URL" desc:"URL where OpenCloud is reachable for users." introductionVersion:"1.0.0"` - - // Insecure certificates allowed when making requests to the gateway - Insecure bool `yaml:"insecure" env:"OC_INSECURE;OCDAV_INSECURE;FRONTENT_OCDAV_INSECURE" desc:"Allow insecure connections to the GATEWAY service." introductionVersion:"1.0.0"` - EnableHTTPTPC bool `yaml:"enable_http_tpc" env:"OCDAV_ENABLE_HTTP_TPC;FRONTENT_OCDAV_ENABLE_HTTP_TPC" desc:"Enable HTTP / WebDAV Third-Party-Copy support." introductionVersion:"%%NEXT%%"` - // Timeout in seconds when making requests to the gateway - Timeout int64 `yaml:"gateway_request_timeout" env:"OCDAV_GATEWAY_REQUEST_TIME;FRONTENT_OUTOCDAV_GATEWAY_REQUEST_TIMEOUT" desc:"Request timeout in seconds for requests from the oCDAV service to the GATEWAY service." introductionVersion:"1.0.0"` - - MachineAuthAPIKey string `yaml:"machine_auth_api_key" env:"OC_MACHINE_AUTH_API_KEY;OCDAV_MACHINE_AUTH_API_KEY;FRONTENT_OCDAV_MACHINE_AUTH_API_KEY" desc:"Machine auth API key used to validate internal requests necessary for the access to resources from other services." introductionVersion:"1.0.0"` - - AllowPropfindDepthInfinity bool `yaml:"allow_propfind_depth_infinity" env:"OCDAV_ALLOW_PROPFIND_DEPTH_INFINITY;FRONTENT_OCDAV_ALLOW_PROPFIND_DEPTH_INFINITY" desc:"Allow the use of depth infinity in PROPFINDS. When enabled, a propfind will traverse through all subfolders. If many subfolders are expected, depth infinity can cause heavy server load and/or delayed response times." introductionVersion:"1.0.0"` - - NameValidation NameValidation `yaml:"name_validation"` -} - -type NameValidation struct { - InvalidChars []string `yaml:"invalid_chars" env:"OCDAV_NAME_VALIDATION_INVALID_CHARS;FRONTENT_OCDAV_NAME_VALIDATION_INVALID_CHARS" desc:"List of characters that are not allowed in file or folder names." introductionVersion:"%%NEXT%%"` - MaxLength int `yaml:"max_length" env:"OCDAV_NAME_VALIDATION_MAX_LENGTH;FRONTENT_OCDAV_NAME_VALIDATION_MAX_LENGTH" desc:"Max length of file or folder names." introductionVersion:"%%NEXT%%"` -} - type CacheWarmupDrivers struct { CBOX CBOXDriver `yaml:"cbox,omitempty"` } diff --git a/services/frontend/pkg/config/defaults/defaultconfig.go b/services/frontend/pkg/config/defaults/defaultconfig.go index fb2a958d54..25c21bc147 100644 --- a/services/frontend/pkg/config/defaults/defaultconfig.go +++ b/services/frontend/pkg/config/defaults/defaultconfig.go @@ -118,24 +118,6 @@ func DefaultConfig() *config.Config { PublicShareMustHavePassword: true, IncludeOCMSharees: false, }, - OCDav: config.OCDav{ - Prefix: "", - SkipUserGroupsInToken: false, - - WebdavNamespace: "/users/{{.Id.OpaqueId}}", - FilesNamespace: "/users/{{.Id.OpaqueId}}", - SharesNamespace: "/Shares", - OCMNamespace: "/public", - PublicURL: "https://localhost:9200", - Insecure: false, - EnableHTTPTPC: false, - Timeout: 84300, - AllowPropfindDepthInfinity: false, - NameValidation: config.NameValidation{ - InvalidChars: []string{"\f", "\r", "\n", "\\"}, - MaxLength: 255, - }, - }, Middleware: config.Middleware{ Auth: config.Auth{ CredentialsByUserAgent: map[string]string{}, diff --git a/services/frontend/pkg/revaconfig/config.go b/services/frontend/pkg/revaconfig/config.go index 62d0cca733..6b09f5265a 100644 --- a/services/frontend/pkg/revaconfig/config.go +++ b/services/frontend/pkg/revaconfig/config.go @@ -357,34 +357,6 @@ func FrontendConfigFromStruct(cfg *config.Config, logger log.Logger) (map[string "include_ocm_sharees": cfg.OCS.IncludeOCMSharees, "show_email_in_results": cfg.OCS.ShowUserEmailInResults, }, - "ocdav": map[string]interface{}{ - "prefix": cfg.OCDav.Prefix, - "files_namespace": cfg.OCDav.FilesNamespace, - "webdav_namespace": cfg.OCDav.WebdavNamespace, - "shares_namespace": cfg.OCDav.SharesNamespace, - "ocm_namespace": cfg.OCDav.OCMNamespace, - "gatewaysvc": cfg.Reva.Address, - "timeout": cfg.OCDav.Timeout, - "insecure": cfg.OCDav.Insecure, - "enable_http_tpc": cfg.OCDav.EnableHTTPTPC, - "public_url": cfg.OCDav.PublicURL, - // still not supported - //"favorite_storage_driver": unused, - //"favorite_storage_drivers": unused, - "version": version.Legacy, - "version_string": version.LegacyString, - "edition": version.Edition, - "product": "OpenCloud", - "product_name": "OpenCloud", - "product_version": version.GetString(), - "allow_depth_infinity": cfg.OCDav.AllowPropfindDepthInfinity, - "validation": map[string]interface{}{ - "invalid_chars": cfg.OCDav.NameValidation.InvalidChars, - "max_length": cfg.OCDav.NameValidation.MaxLength, - }, - "url_signing_shared_secret": cfg.Commons.URLSigningSecret, - "machine_auth_apikey": cfg.MachineAuthAPIKey, - }, }, }, }, nil diff --git a/services/idp/package.json b/services/idp/package.json index 21da935a91..797bbf38be 100644 --- a/services/idp/package.json +++ b/services/idp/package.json @@ -7,7 +7,7 @@ "analyze": "source-map-explorer 'build/static/js/*.js'", "build": "node --openssl-legacy-provider scripts/build.js && rm -f build/service-worker.js", "licenses": "NODE_PATH=./node_modules node ../scripts/js-license-ranger.js", - "licenses:check": "license-checker-rseidelsohn --summary --relativeLicensePath --onlyAllow 'Python-2.0;Apache*;Apache License, Version 2.0;Apache-2.0;Apache 2.0;Artistic-2.0;BSD;BSD-3-Clause;CC-BY-3.0;CC-BY-4.0;CC0-1.0;ISC;MIT;MPL-2.0;Public Domain;Unicode-TOU;Unlicense;WTFPL;ODC-By-1.0;BlueOak-1.0.0;OFL-1.1' --excludePackages 'identifier;kpop;unicoderegexp' --clarificationsFile license-checker-clarifications.json", + "licenses:check": "license-checker-rseidelsohn --summary --relativeLicensePath --onlyAllow 'Python-2.0;Apache*;Apache License, Version 2.0;Apache-2.0;Apache 2.0;Artistic-2.0;BSD;BSD-3-Clause;CC-BY-3.0;CC-BY-4.0;CC0-1.0;ISC;MIT;MPL-2.0;Public Domain;Unicode-TOU;Unlicense;WTFPL;ODC-By-1.0;BlueOak-1.0.0;OFL-1.1' --excludePackages 'identifier;unicoderegexp' --clarificationsFile license-checker-clarifications.json", "licenses:csv": "license-checker-rseidelsohn --relativeLicensePath --csv --out ../../third-party-licenses/node/idp/third-party-licenses.csv", "licenses:save": "license-checker-rseidelsohn --relativeLicensePath --out /dev/null --files ../../third-party-licenses/node/idp/third-party-licenses", "lint": "eslint ./**/*.{tsx,ts,jsx,js}", @@ -89,7 +89,6 @@ "i18next-browser-languagedetector": "^8.1.0", "i18next-http-backend": "^3.0.2", "i18next-resources-to-backend": "^1.2.1", - "kpop": "https://download.kopano.io/community/kapp:/kpop-2.7.2.tgz", "query-string": "^9.2.0", "react": "^17.0.2", "react-app-polyfill": "^3.0.0", @@ -154,10 +153,5 @@ "webpack-manifest-plugin": "5.0.0", "workbox-webpack-plugin": "7.1.0" }, - "packageManager": "pnpm@9.15.4", - "pnpm": { - "overrides": { - "kpop>cldr": "" - } - } + "packageManager": "pnpm@9.15.4" } diff --git a/services/idp/pnpm-lock.yaml b/services/idp/pnpm-lock.yaml index fae3ee999c..62e9c9372a 100644 --- a/services/idp/pnpm-lock.yaml +++ b/services/idp/pnpm-lock.yaml @@ -4,9 +4,6 @@ settings: autoInstallPeers: true excludeLinksFromLockfile: false -overrides: - kpop>cldr: '' - importers: .: @@ -65,9 +62,6 @@ importers: i18next-resources-to-backend: specifier: ^1.2.1 version: 1.2.1 - kpop: - specifier: https://download.kopano.io/community/kapp:/kpop-2.7.2.tgz - version: https://download.kopano.io/community/kapp:/kpop-2.7.2.tgz(@gluejs/glue@0.3.0)(@material-ui/core@4.12.4(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(@material-ui/icons@4.11.3(@material-ui/core@4.12.4(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(notistack@0.8.9(@material-ui/core@4.12.4(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(oidc-client@1.11.5)(react-dom@17.0.2(react@17.0.2))(react-intl@2.9.0(prop-types@15.8.1)(react@17.0.2))(react@17.0.2) query-string: specifier: ^9.2.0 version: 9.2.0 @@ -1516,9 +1510,6 @@ packages: '@fontsource/roboto@5.2.5': resolution: {integrity: sha512-70r2UZ0raqLn5W+sPeKhqlf8wGvUXFWlofaDlcbt/S3d06+17gXKr3VNqDODB0I1ASme3dGT5OJj9NABt7OTZQ==} - '@gluejs/glue@0.3.0': - resolution: {integrity: sha512-byvFoZCbZW+A3Pg8JUU+8FjoPuF5l1v7mDeLJQP/YSeEcEDiD/YdUKLBUapPrcuyxclrtS8+peX4cxkh6awwTw==} - '@gulpjs/to-absolute-glob@4.0.0': resolution: {integrity: sha512-kjotm7XJrJ6v+7knhPaRgaT6q8F8K2jiafwYdNHLzmV0uGLuZY43FK6smNSHUPrhq5kX2slCUy+RGG/xGqmIKA==} engines: {node: '>=10.13.0'} @@ -2724,9 +2715,6 @@ packages: core-js@3.40.0: resolution: {integrity: sha512-7vsMc/Lty6AGnn7uFpYT56QesI5D2Y/UkgKounk87OP9Z2H9Z8kj6jzcSGAxFmUtDOS0ntK6lbQz+Nsa0Jj6mQ==} - core-js@3.43.0: - resolution: {integrity: sha512-N6wEbTTZSYOY2rYAn85CuvWWkCK6QweMn7/4Nr3w+gDBeBhk/x4EJeY6FPo4QzDoJZxVTv8U7CMvgWk6pOHHqA==} - core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} @@ -2738,11 +2726,6 @@ packages: resolution: {integrity: sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==} engines: {node: '>=10'} - crc32@0.2.2: - resolution: {integrity: sha512-PFZEGbDUeoNbL2GHIEpJRQGheXReDody/9axKTxhXtQqIL443wnNigtVZO9iuCIMPApKZRv7k2xr8euXHqNxQQ==} - engines: {node: '>= 0.4.0'} - hasBin: true - cross-fetch@4.0.0: resolution: {integrity: sha512-e4a5N8lVvuLgAWgnCrLr2PP0YyDOTHa9H/Rj54dirp61qXnNq46m82bRhNqIA5VccJtWBvPTFRV3TtvHUKPB1g==} @@ -2754,9 +2737,6 @@ packages: resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} engines: {node: '>= 8'} - crypto-js@4.2.0: - resolution: {integrity: sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q==} - crypto-random-string@2.0.0: resolution: {integrity: sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==} engines: {node: '>=8'} @@ -3718,9 +3698,6 @@ packages: resolution: {integrity: sha512-r0EI+HBMcXadMrugk0GCQ+6BQV39PiWAZVfq7oIckeGiN7sjRGyQxPdft3nQekFTCQbYxLBH+/axZMeH8UX6+w==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} - hsv-rgb@1.0.0: - resolution: {integrity: sha512-Azd6IP11LZm0cEczEnJw5B6zIgWdGlE4TSoM2eh+RPRbXSQCy/0JS2POEq0wOtbAZtxTJhEMGm3GUYGbnTIJGw==} - html-escaper@2.0.2: resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} @@ -3850,23 +3827,6 @@ packages: resolution: {integrity: sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==} engines: {node: '>= 0.4'} - intl-format-cache@2.2.9: - resolution: {integrity: sha512-Zv/u8wRpekckv0cLkwpVdABYST4hZNTDaX7reFetrYTJwxExR2VyTqQm+l0WmL0Qo8Mjb9Tf33qnfj0T7pjxdQ==} - - intl-messageformat-parser@1.4.0: - resolution: {integrity: sha512-/XkqFHKezO6UcF4Av2/Lzfrez18R0jyw7kRFhSeB/YRakdrgSc9QfFZUwNJI9swMwMoNPygK1ArC5wdFSjPw+A==} - deprecated: We've written a new parser that's 6x faster and is backwards compatible. Please use @formatjs/icu-messageformat-parser - - intl-messageformat@2.2.0: - resolution: {integrity: sha512-I+tSvHnXqJYjDfNmY95tpFMj30yoakC6OXAo+wu/wTMy6tA/4Fd4mvV7Uzs4cqK/Ap29sHhwjcY+78a8eifcXw==} - - intl-relativeformat@2.2.0: - resolution: {integrity: sha512-4bV/7kSKaPEmu6ArxXf9xjv1ny74Zkwuey8Pm01NH4zggPP7JHwg2STk8Y3JdspCKRDriwIyLRfEXnj2ZLr4Bw==} - deprecated: This package has been deprecated, please see migration guide at 'https://github.com/formatjs/formatjs/tree/master/packages/intl-relativeformat#migration-guide' - - invariant@2.2.4: - resolution: {integrity: sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==} - is-arguments@1.2.0: resolution: {integrity: sha512-7bVbi0huj/wrIAOzb8U1aszg9kdi3KN/CyU19CTI7tAoZYEZoL9yCDXpbXN+uPsuWnP02cyug1gleqq+TU+YCA==} engines: {node: '>= 0.4'} @@ -4045,10 +4005,6 @@ packages: isexe@2.0.0: resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} - iso-639-1@2.1.15: - resolution: {integrity: sha512-7c7mBznZu2ktfvyT582E2msM+Udc1EjOyhVRE/0ZsjD9LBtWSm23h3PtiRh2a35XoUsTQQjJXaJzuLjXsOdFDg==} - engines: {node: '>=6.0'} - istanbul-lib-coverage@3.2.2: resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==} engines: {node: '>=8'} @@ -4346,20 +4302,6 @@ packages: resolution: {integrity: sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA==} engines: {node: '>= 8'} - kpop@https://download.kopano.io/community/kapp:/kpop-2.7.2.tgz: - resolution: {tarball: https://download.kopano.io/community/kapp:/kpop-2.7.2.tgz} - version: 2.7.1 - engines: {node: '>=6.11.0'} - peerDependencies: - '@gluejs/glue': ^0.3.0 - '@material-ui/core': ^4.11.0 - '@material-ui/icons': ^4.9.1 - notistack: ^0.8.8 - oidc-client: ^1.11.0 - react: ^16.8.0 || ^17.0.0 - react-dom: ^16.8.0 || ^17.0.0 - react-intl: ^2.6.0 - language-subtag-registry@0.3.23: resolution: {integrity: sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==} @@ -4636,13 +4578,6 @@ packages: resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==} engines: {node: '>=0.10.0'} - notistack@0.8.9: - resolution: {integrity: sha512-nRHQVWUfgHnvnKrjRbRX9f+YAnbyh96yRyO5bEP/FCLVLuTZcJOwUr0GZ7Xr/8wK3+hXa9JYpXUkUhSxj1K8NQ==} - peerDependencies: - '@material-ui/core': ^3.2.0 || ^4.0.0 - react: ^16.8.0 - react-dom: ^16.8.0 - now-and-later@3.0.0: resolution: {integrity: sha512-pGO4pzSdaxhWTGkfSfHx3hVzJVslFPwBp2Myq9MYN/ChfJZF87ochMAXnvz6/58RJSf5ik2q9tXprBBrk2cpcg==} engines: {node: '>= 10.13.0'} @@ -4698,9 +4633,6 @@ packages: resolution: {integrity: sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA==} engines: {node: '>= 0.4'} - oidc-client@1.11.5: - resolution: {integrity: sha512-LcKrKC8Av0m/KD/4EFmo9Sg8fSQ+WFJWBrmtWd+tZkNn3WT/sQG3REmPANE9tzzhbjW6VkTNy4xhAXCfPApAOg==} - once@1.4.0: resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} @@ -5364,12 +5296,6 @@ packages: typescript: optional: true - react-intl@2.9.0: - resolution: {integrity: sha512-27jnDlb/d2A7mSJwrbOBnUgD+rPep+abmoJE511Tf8BnoONIAUehy/U1zZCHGO17mnOwMWxqN4qC0nW11cD6rA==} - peerDependencies: - prop-types: ^15.5.4 - react: ^0.14.9 || ^15.0.0 || ^16.0.0 - react-is@16.13.1: resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==} @@ -5698,9 +5624,6 @@ packages: seq@0.3.5: resolution: {integrity: sha512-sisY2Ln1fj43KBkRtXkesnRHYNdswIkIibvNe/0UKm2GZxjMbqmccpiatoKr/k2qX5VKiLU8xm+tz/74LAho4g==} - serialize-javascript@4.0.0: - resolution: {integrity: sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==} - serialize-javascript@6.0.2: resolution: {integrity: sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==} @@ -7918,8 +7841,6 @@ snapshots: '@fontsource/roboto@5.2.5': {} - '@gluejs/glue@0.3.0': {} - '@gulpjs/to-absolute-glob@4.0.0': dependencies: is-negated-glob: 1.0.0 @@ -9403,8 +9324,6 @@ snapshots: core-js@3.40.0: {} - core-js@3.43.0: {} - core-util-is@1.0.3: {} cosmiconfig@6.0.0: @@ -9423,8 +9342,6 @@ snapshots: path-type: 4.0.0 yaml: 1.10.2 - crc32@0.2.2: {} - cross-fetch@4.0.0(encoding@0.1.13): dependencies: node-fetch: 2.7.0(encoding@0.1.13) @@ -9443,8 +9360,6 @@ snapshots: shebang-command: 2.0.0 which: 2.0.2 - crypto-js@4.2.0: {} - crypto-random-string@2.0.0: {} css-blank-pseudo@7.0.1(postcss@8.5.4): @@ -10683,8 +10598,6 @@ snapshots: dependencies: lru-cache: 7.18.3 - hsv-rgb@1.0.0: {} - html-escaper@2.0.2: {} html-minifier-terser@6.1.0: @@ -10833,22 +10746,6 @@ snapshots: hasown: 2.0.2 side-channel: 1.1.0 - intl-format-cache@2.2.9: {} - - intl-messageformat-parser@1.4.0: {} - - intl-messageformat@2.2.0: - dependencies: - intl-messageformat-parser: 1.4.0 - - intl-relativeformat@2.2.0: - dependencies: - intl-messageformat: 2.2.0 - - invariant@2.2.4: - dependencies: - loose-envify: 1.4.0 - is-arguments@1.2.0: dependencies: call-bound: 1.0.4 @@ -11007,8 +10904,6 @@ snapshots: isexe@2.0.0: {} - iso-639-1@2.1.15: {} - istanbul-lib-coverage@3.2.2: {} istanbul-lib-instrument@6.0.3: @@ -11539,20 +11434,6 @@ snapshots: klona@2.0.6: {} - kpop@https://download.kopano.io/community/kapp:/kpop-2.7.2.tgz(@gluejs/glue@0.3.0)(@material-ui/core@4.12.4(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(@material-ui/icons@4.11.3(@material-ui/core@4.12.4(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(notistack@0.8.9(@material-ui/core@4.12.4(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(oidc-client@1.11.5)(react-dom@17.0.2(react@17.0.2))(react-intl@2.9.0(prop-types@15.8.1)(react@17.0.2))(react@17.0.2): - dependencies: - '@gluejs/glue': 0.3.0 - '@material-ui/core': 4.12.4(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2) - '@material-ui/icons': 4.11.3(@material-ui/core@4.12.4(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2) - crc32: 0.2.2 - hsv-rgb: 1.0.0 - iso-639-1: 2.1.15 - notistack: 0.8.9(@material-ui/core@4.12.4(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(react-dom@17.0.2(react@17.0.2))(react@17.0.2) - oidc-client: 1.11.5 - react: 17.0.2 - react-dom: 17.0.2(react@17.0.2) - react-intl: 2.9.0(prop-types@15.8.1)(react@17.0.2) - language-subtag-registry@0.3.23: {} language-tags@1.0.9: @@ -11787,16 +11668,6 @@ snapshots: normalize-range@0.1.2: {} - notistack@0.8.9(@material-ui/core@4.12.4(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2))(react-dom@17.0.2(react@17.0.2))(react@17.0.2): - dependencies: - '@material-ui/core': 4.12.4(@types/react@17.0.80)(react-dom@17.0.2(react@17.0.2))(react@17.0.2) - classnames: 2.5.1 - hoist-non-react-statics: 3.3.2 - prop-types: 15.8.1 - react: 17.0.2 - react-dom: 17.0.2(react@17.0.2) - react-is: 16.13.1 - now-and-later@3.0.0: dependencies: once: 1.4.0 @@ -11860,14 +11731,6 @@ snapshots: define-properties: 1.2.1 es-object-atoms: 1.1.1 - oidc-client@1.11.5: - dependencies: - acorn: 7.4.1 - base64-js: 1.5.1 - core-js: 3.43.0 - crypto-js: 4.2.0 - serialize-javascript: 4.0.0 - once@1.4.0: dependencies: wrappy: 1.0.2 @@ -12599,16 +12462,6 @@ snapshots: react-dom: 17.0.2(react@17.0.2) typescript: 5.8.3 - react-intl@2.9.0(prop-types@15.8.1)(react@17.0.2): - dependencies: - hoist-non-react-statics: 3.3.2 - intl-format-cache: 2.2.9 - intl-messageformat: 2.2.0 - intl-relativeformat: 2.2.0 - invariant: 2.2.4 - prop-types: 15.8.1 - react: 17.0.2 - react-is@16.13.1: {} react-is@17.0.2: {} @@ -12975,10 +12828,6 @@ snapshots: chainsaw: 0.0.9 hashish: 0.0.4 - serialize-javascript@4.0.0: - dependencies: - randombytes: 2.1.0 - serialize-javascript@6.0.2: dependencies: randombytes: 2.1.0 diff --git a/services/idp/src/App.jsx b/services/idp/src/App.jsx index 228781534d..315a5876eb 100644 --- a/services/idp/src/App.jsx +++ b/services/idp/src/App.jsx @@ -2,10 +2,7 @@ import React, {ReactElement, Suspense, lazy, useState, useEffect} from 'react'; import PropTypes from 'prop-types'; import {MuiThemeProvider} from '@material-ui/core/styles'; -import {defaultTheme} from 'kpop/es/theme'; - -import 'kpop/static/css/base.css'; -import 'kpop/static/css/scrollbar.css'; +import muiTheme from './theme'; import Spinner from './components/Spinner'; import * as version from './version'; @@ -52,7 +49,7 @@ const App = ({ bgImg }): ReactElement => { className={`oc-login-bg ${bgImg ? 'oc-login-bg-image' : ''}`} style={{backgroundImage: bgImg ? `url(${bgImg})` : undefined}} > - + }> diff --git a/services/idp/src/app.css b/services/idp/src/app.css index 9818ad17d4..6b7eaf5121 100644 --- a/services/idp/src/app.css +++ b/services/idp/src/app.css @@ -17,16 +17,25 @@ html { font-feature-settings: "cv11"; color: #20434f !important; + height: 100%; } body { font-family: OpenCloud, sans-serif; + height: 100%; + margin: 0; + padding: 0; } strong { font-weight: 600; } +#root { + height: 100%; + display: flex; +} + .oc-font-weight-light { font-weight: 300; } diff --git a/services/proxy/pkg/config/defaults/defaultconfig.go b/services/proxy/pkg/config/defaults/defaultconfig.go index 187e52be6f..6f5334efed 100644 --- a/services/proxy/pkg/config/defaults/defaultconfig.go +++ b/services/proxy/pkg/config/defaults/defaultconfig.go @@ -205,47 +205,47 @@ func DefaultPolicies() []config.Policy { // TODO what paths are returned? the href contains the full path so it should be possible to return urls from other spaces? // TODO or we allow a REPORT on /dav/spaces to search all spaces and /dav/space/{spaceid} to search a specific space // send webdav REPORT requests to search service - { - Type: config.RegexRoute, - Method: "REPORT", - Endpoint: "(/remote.php)?/(web)?dav", - Service: "eu.opencloud.web.webdav", - }, - { - Type: config.QueryRoute, - Endpoint: "/dav/?preview=1", - Service: "eu.opencloud.web.webdav", - }, - { - Type: config.QueryRoute, - Endpoint: "/webdav/?preview=1", - Service: "eu.opencloud.web.webdav", - }, + // { + // Type: config.RegexRoute, + // Method: "REPORT", + // Endpoint: "(/remote.php)?/(web)?dav", + // Service: "eu.opencloud.web.webdav", + // }, + // { + // Type: config.QueryRoute, + // Endpoint: "/dav/?preview=1", + // Service: "eu.opencloud.web.webdav", + // }, + // { + // Type: config.QueryRoute, + // Endpoint: "/webdav/?preview=1", + // Service: "eu.opencloud.web.webdav", + // }, { Endpoint: "/remote.php/", - Service: "eu.opencloud.web.frontend", + Service: "eu.opencloud.web.webdav", }, { Endpoint: "/dav/", - Service: "eu.opencloud.web.frontend", + Service: "eu.opencloud.web.webdav", }, { Endpoint: "/webdav/", - Service: "eu.opencloud.web.frontend", + Service: "eu.opencloud.web.webdav", }, { Endpoint: "/status", - Service: "eu.opencloud.web.frontend", + Service: "eu.opencloud.web.webdav", Unprotected: true, }, { Endpoint: "/status.php", - Service: "eu.opencloud.web.frontend", + Service: "eu.opencloud.web.webdav", Unprotected: true, }, { Endpoint: "/index.php/", - Service: "eu.opencloud.web.frontend", + Service: "eu.opencloud.web.webdav", }, { Endpoint: "/apps/", diff --git a/services/webdav/pkg/config/config.go b/services/webdav/pkg/config/config.go index 324704464c..e9f2834e0a 100644 --- a/services/webdav/pkg/config/config.go +++ b/services/webdav/pkg/config/config.go @@ -2,6 +2,7 @@ package config import ( "context" + "time" "github.com/opencloud-eu/opencloud/pkg/shared" "go-micro.dev/v4/client" @@ -26,4 +27,48 @@ type Config struct { WebdavNamespace string `yaml:"webdav_namespace" env:"WEBDAV_WEBDAV_NAMESPACE" desc:"CS3 path layout to use when forwarding /webdav requests" introductionVersion:"1.0.0"` RevaGateway string `yaml:"reva_gateway" env:"OC_REVA_GATEWAY" desc:"CS3 gateway used to look up user metadata" introductionVersion:"1.0.0"` Context context.Context `yaml:"-"` + + OCDav OCDav `yaml:"ocdav"` + FavoritesStore FavoritesStore `yaml:"favorites_store"` +} + +type OCDav struct { + Prefix string `yaml:"prefix" env:"OCDAV_HTTP_PREFIX;FRONTENT_OCDAV_HTTP_PREFIX" desc:"A URL path prefix for the handler." introductionVersion:"1.0.0"` + + SkipUserGroupsInToken bool `yaml:"skip_user_groups_in_token" env:"OCDAV_SKIP_USER_GROUPS_IN_TOKEN;FRONTENT_OCDAV_SKIP_USER_GROUPS_IN_TOKEN" desc:"Disables the loading of user's group memberships from the reva access token." introductionVersion:"1.0.0"` + + WebdavNamespace string `yaml:"webdav_namespace" env:"OCDAV_WEBDAV_NAMESPACE;FRONTENT_OCDAV_WEBDAV_NAMESPACE" desc:"Jail requests to /dav/webdav into this CS3 namespace. Supports template layouting with CS3 User properties." introductionVersion:"1.0.0"` + FilesNamespace string `yaml:"files_namespace" env:"OCDAV_FILES_NAMESPACE;FRONTENT_OCDAV_FILES_NAMESPACE" desc:"Jail requests to /dav/files/{username} into this CS3 namespace. Supports template layouting with CS3 User properties." introductionVersion:"1.0.0"` + SharesNamespace string `yaml:"shares_namespace" env:"OCDAV_SHARES_NAMESPACE;FRONTENT_OCDAV_SHARES_NAMESPACE" desc:"The human readable path for the share jail. Relative to a users personal space root. Upcased intentionally." introductionVersion:"1.0.0"` + OCMNamespace string `yaml:"ocm_namespace" env:"OCDAV_OCM_NAMESPACE;FRONTENT_OCDAV_OCM_NAMESPACE" desc:"The human readable path prefix for the ocm shares." introductionVersion:"1.0.0"` + // PublicURL used to redirect /s/{token} URLs to + PublicURL string `yaml:"public_url" env:"OC_URL;OCDAV_PUBLIC_URL;FRONTENT_OCDAV_PUBLIC_URL" desc:"URL where OpenCloud is reachable for users." introductionVersion:"1.0.0"` + + // Insecure certificates allowed when making requests to the gateway + Insecure bool `yaml:"insecure" env:"OC_INSECURE;OCDAV_INSECURE;FRONTENT_OCDAV_INSECURE" desc:"Allow insecure connections to the GATEWAY service." introductionVersion:"1.0.0"` + EnableHTTPTPC bool `yaml:"enable_http_tpc" env:"OCDAV_ENABLE_HTTP_TPC;FRONTENT_OCDAV_ENABLE_HTTP_TPC" desc:"Enable HTTP / WebDAV Third-Party-Copy support." introductionVersion:"%%NEXT%%"` + // Timeout in seconds when making requests to the gateway + Timeout int64 `yaml:"gateway_request_timeout" env:"OCDAV_GATEWAY_REQUEST_TIME;FRONTENT_OUTOCDAV_GATEWAY_REQUEST_TIMEOUT" desc:"Request timeout in seconds for requests from the oCDAV service to the GATEWAY service." introductionVersion:"1.0.0"` + + MachineAuthAPIKey string `yaml:"machine_auth_api_key" env:"OC_MACHINE_AUTH_API_KEY;OCDAV_MACHINE_AUTH_API_KEY;FRONTENT_OCDAV_MACHINE_AUTH_API_KEY" desc:"Machine auth API key used to validate internal requests necessary for the access to resources from other services." introductionVersion:"1.0.0"` + + AllowPropfindDepthInfinity bool `yaml:"allow_propfind_depth_infinity" env:"OCDAV_ALLOW_PROPFIND_DEPTH_INFINITY;FRONTENT_OCDAV_ALLOW_PROPFIND_DEPTH_INFINITY" desc:"Allow the use of depth infinity in PROPFINDS. When enabled, a propfind will traverse through all subfolders. If many subfolders are expected, depth infinity can cause heavy server load and/or delayed response times." introductionVersion:"1.0.0"` + + NameValidation NameValidation `yaml:"name_validation"` +} + +type NameValidation struct { + InvalidChars []string `yaml:"invalid_chars" env:"OCDAV_NAME_VALIDATION_INVALID_CHARS;FRONTENT_OCDAV_NAME_VALIDATION_INVALID_CHARS" desc:"List of characters that are not allowed in file or folder names." introductionVersion:"%%NEXT%%"` + MaxLength int `yaml:"max_length" env:"OCDAV_NAME_VALIDATION_MAX_LENGTH;FRONTENT_OCDAV_NAME_VALIDATION_MAX_LENGTH" desc:"Max length of file or folder names." introductionVersion:"%%NEXT%%"` +} + +// FavoritesStore configures the store to use +type FavoritesStore struct { + Store string `yaml:"store" env:"OC_PERSISTENT_STORE;WEBDAV_FAVORITES_STORE" desc:"The type of the store. Supported values are: 'memory', 'nats-js-kv'. See the text description for details." introductionVersion:"%%NEXT%%"` + Nodes []string `yaml:"nodes" env:"OC_PERSISTENT_STORE_NODES;WEBDAV_FAVORITES_STORE_NODES" desc:"A list of nodes to access the configured store. This has no effect when 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store. See the Environment Variable Types description for more details." introductionVersion:"%%NEXT%%"` + Database string `yaml:"database" env:"WEBDAV_FAVORITES_STORE_DATABASE" desc:"The database name the configured store should use." introductionVersion:"%%NEXT%%"` + Table string `yaml:"table" env:"WEBDAV_FAVORITES_STORE_TABLE" desc:"The database table the store should use." introductionVersion:"%%NEXT%%"` + TTL time.Duration `yaml:"ttl" env:"OC_PERSISTENT_STORE_TTL;WEBDAV_FAVORITES_STORE_TTL" desc:"Time to live for entries in the store. See the Environment Variable Types description for more details." introductionVersion:"%%NEXT%%"` + AuthUsername string `yaml:"username" env:"OC_PERSISTENT_STORE_AUTH_USERNAME;WEBDAV_FAVORITES_STORE_AUTH_USERNAME" desc:"The username to authenticate with the store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"%%NEXT%%"` + AuthPassword string `yaml:"password" env:"OC_PERSISTENT_STORE_AUTH_PASSWORD;WEBDAV_FAVORITES_STORE_AUTH_PASSWORD" desc:"The password to authenticate with the store. Only applies when store type 'nats-js-kv' is configured." introductionVersion:"%%NEXT%%"` } diff --git a/services/webdav/pkg/config/defaults/defaultconfig.go b/services/webdav/pkg/config/defaults/defaultconfig.go index 7bf35b7b01..4116afc10f 100644 --- a/services/webdav/pkg/config/defaults/defaultconfig.go +++ b/services/webdav/pkg/config/defaults/defaultconfig.go @@ -30,9 +30,48 @@ func DefaultConfig() *config.Config { Root: "/", Namespace: "eu.opencloud.web", CORS: config.CORS{ - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"}, - AllowedHeaders: []string{"Authorization", "Origin", "Content-Type", "Accept", "X-Requested-With", "X-Request-Id", "Cache-Control"}, + AllowedOrigins: []string{"https://localhost:9200"}, + AllowedMethods: []string{ + "OPTIONS", + "HEAD", + "GET", + "PUT", + "POST", + "PATCH", + "DELETE", + "MKCOL", + "PROPFIND", + "PROPPATCH", + "MOVE", + "COPY", + "REPORT", + "SEARCH", + }, + AllowedHeaders: []string{ + "Origin", + "Accept", + "Content-Type", + "Depth", + "Authorization", + "Ocs-Apirequest", + "If-None-Match", + "If-Match", + "Destination", + "Overwrite", + "X-Request-Id", + "X-Requested-With", + "Tus-Resumable", + "Tus-Checksum-Algorithm", + "Upload-Concat", + "Upload-Length", + "Upload-Metadata", + "Upload-Defer-Length", + "Upload-Expires", + "Upload-Checksum", + "Upload-Offset", + "X-HTTP-Method-Override", + "Cache-Control", + }, AllowCredentials: true, }, }, @@ -42,6 +81,33 @@ func DefaultConfig() *config.Config { OpenCloudPublicURL: "https://localhost:9200", WebdavNamespace: "/users/{{.Id.OpaqueId}}", RevaGateway: shared.DefaultRevaConfig().Address, + OCDav: config.OCDav{ + Prefix: "", + SkipUserGroupsInToken: false, + + WebdavNamespace: "/users/{{.Id.OpaqueId}}", + FilesNamespace: "/users/{{.Id.OpaqueId}}", + SharesNamespace: "/Shares", + OCMNamespace: "/public", + PublicURL: "https://localhost:9200", + Insecure: false, + EnableHTTPTPC: false, + Timeout: 84300, + AllowPropfindDepthInfinity: false, + NameValidation: config.NameValidation{ + InvalidChars: []string{"\f", "\r", "\n", "\\"}, + MaxLength: 255, + }, + }, + FavoritesStore: config.FavoritesStore{ + Store: "memory", + }, + // FavoritesStore: config.FavoritesStore{ + // Store: "nats-js-kv", + // Nodes: []string{"127.0.0.1:9233"}, + // Database: "webdav", + // Table: "", + // }, } } @@ -58,6 +124,17 @@ func EnsureDefaults(cfg *config.Config) { if cfg.Commons != nil { cfg.HTTP.TLS = cfg.Commons.HTTPServiceTLS } + + if cfg.OCDav.MachineAuthAPIKey == "" && cfg.Commons != nil && cfg.Commons.MachineAuthAPIKey != "" { + cfg.OCDav.MachineAuthAPIKey = cfg.Commons.MachineAuthAPIKey + } + + if (cfg.Commons != nil && cfg.Commons.OpenCloudURL != "") && + (cfg.HTTP.CORS.AllowedOrigins == nil || + len(cfg.HTTP.CORS.AllowedOrigins) == 1 && + cfg.HTTP.CORS.AllowedOrigins[0] == "https://localhost:9200") { + cfg.HTTP.CORS.AllowedOrigins = []string{cfg.Commons.OpenCloudURL} + } } // Sanitize sanitized the configuration diff --git a/services/webdav/pkg/ocdav/avatars.go b/services/webdav/pkg/ocdav/avatars.go new file mode 100644 index 0000000000..7b9ed0d42a --- /dev/null +++ b/services/webdav/pkg/ocdav/avatars.go @@ -0,0 +1,71 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "encoding/hex" + "net/http" + + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" +) + +// AvatarsHandler handles avatar requests +type AvatarsHandler struct { +} + +func (h *AvatarsHandler) Init(c *config.Config) error { + return nil +} + +// Handler handles requests +func (h *AvatarsHandler) Handler(s *svc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + log := appctx.GetLogger(ctx) + + if r.Method == http.MethodOptions { + // no need for the user, and we need to be able + // to answer preflight checks, which have no auth headers + r.URL.Path = "/" // always use / ... we just want the options answered so phoenix doesnt hiccup + s.handleOptions(w, r) + return + } + + _, r.URL.Path = router.ShiftPath(r.URL.Path) + if r.Method == http.MethodGet && r.URL.Path == "/128.png" { + // TODO load avatar url from user context? + const img = "89504E470D0A1A0A0000000D4948445200000080000000800806000000C33E61CB00000006624B474400FF00FF00FFA0BDA793000000097048597300000B1300000B1301009A9C180000000774494D4507E3061B080516D3ECF61E000008F24944415478DAED9D7D8C1D5515C07FDB76774B775BB7454AA54BBB2D5DDD765B6BD34A140B464CB07EA0113518016B4848FC438A1A448D9A18FF40316942524D544C900F49A17C2882120A28604D506C915AB160B7B2A14275B7BB606BCB76779F7FDC79F4CE79F7BD7DEFED7B3377E69E5FF2B233DBED9B7B3EEECCB977CE3D171445511445511445098B9680645D0BAC8C7EAE020A0E5D0C027B80DDC033EA1ED96521B001D80A3C1F19BB9ECF007003F0CEE83B15CFB90C781A189986D1CB7D8E007F06AE5035FBC599C0359181AA35E6716014188A3EA3D1EFAAFDFFAF025F06DEA2EA4F97EB81935318EB047037F0396035300FE8043A8039D1A723FADD3CA01FB80AB817989CE2BB4F0237AA1992E703C00B150CB313D812057DD36555D4DB7756B8DE41E0236A9664B8A982216E897A72B3980BDC5CE1CE70AB9AA779744541984BF1DF03BA136C4B77F4F871B5E519E074355763590E8C9519A62D4DB15DDDC07E47BBC681156AB6C6D0071C7328F93A60A607ED9B017CDED1BEA35140A94C83259122ED67EE316093876DDD28E61F26A3B69EAD66AC9F61D1AB463D1F7BCF075E126D1E5233D6C74EC7E4CBEA0CB47B317048B4FD6135676D5C2E14F83A705686DA3FD771F7D229E41A823E19507D2A83729CEF90A34FCD3B35F70BA5DD906159AE14B2FC5ACD5B99F384C20E016D19966726B04FC874819AB93C434259EFCD814C2B1C2319C5C14542513FCF916C5B856C17ABB94BF915F1A9D43CCDA2AD20FEDAFA5135779CD9A287FC2D8732EE12322E52B39FE28742391B722863BF90F17635BBA115386C296630C7B2DA492CFFC16423A5CA0C0F94B214938A55E4DE9CC73945E691EEAB6C6F1C605D140314F96D8E1DE009EBB82D923D78EE14CFC63C67DA9E2D64DDA1E687D7882751E49D717452E80DE692DFC99F723C26646E0F390638579C3F1280033CEE888182758035E27C57000EF09438EF0BD9017AC5F940000EB0479CF784EC004BACE362E66FDE1916E7DD213BC07CEBF8BF8104BE72B4B330640768B58E8F0734FA39661D7785EA002DE2FA2703790448676F0DD901EC123593013D02267CB90BCF48591105E110A13051A12304E500E3BEDC0A136666858E105410683B407B20778116605699BB41700E30621DCF09E80E709A757C22640778D93A9E1B501C603BFB70C80EF092753C0B3FD6FB27815DC6E65F213B80CCFFEB0DC0F8B27CCC3F43768003E27C6D000E20339E5F08D9019E9B423979E43C71BE97C0B1B3639E0A40DE3F089983E72FC4EBEAE41DBBDED1F36937C687B4703B55BA050F72E59B488F18EA3EAE0E509A07B826C70E2083DC87D5014C143C669DAFCFB103D8B28D3B82E020E9225EEA3DCF2B839EB4E41C414BCABEC19E4022635BC67D3E346886278AF99138BF3487C6DF2CCE7FA2FD3EEE8876EF78368732CA6251AD6AF6D2D180BDA54B9E6AEC2E25BE25CD633EF53C5FD86E1DCF06DE9D2307D8487C09FC1DDADF4B5981C98E29F692277224DB1F2DB926D0BD04CAF2AC784E2ECB814CB236D05E3573792E10CABA270732FD46C874A19AB9320396B286C9F664C9424C1188A23C2FFA38FCF20D3B185C80D9222EAB7C0C7893757EA7F6EFA9E9A174E3C7AC22B797D3E0AF4AEE168AFB520665F8AA90E101356BF57489DEB39F6C958D6FA77467D337AB59ABA705784828F033196AFF15A2ED8F12D6DAC786B086D22D57B2B07A688EA3DDEBD59CF5F103A1C86D1968F336D1E69FAA19EB6701A6744C5666079789B61ED367FFF4F99650EA11FC5C42D64A3CB3A9007C57CDD7189E168AFDBE876DBC91FCE734A4463F66F3485BC11FF4A87D978AB68D11C632B744B99AD2DD44CFF1A05DEB89BFC62E00D7AAB99AC30EA1E8D7800E8F82BE02709F9AA9799C46E9DE820748A7E2F65B8997BA2F06A81D6AA6E6D289C9A9B7153F98F070EB3D8E9E3F4AFCCD9FD244563B0C3044325BB17DC271ED02F02E354BF2C1D70987219AB9A6E0DA32C6FFA49A231DFACA18647B13AE7553996B6D5333A4CB324CA125DB2813C0CA065EA3D731D42B00B7A9FAFDC136CCFF68ECEE638BA2EF94A38F3655BB1FC8F705CDD87CF23E718D6FAADAFD19168E0AE3346338D625AE314C7CB58F921232FBA6995BCFDD21AEF551557FBAB4736AA38924B26F36503AF9A3A95E29F26002C33F89CC58BE4BCD900E1FA2741E3E89A8BC8D78E2C704F03E3547B2F43AC6E4572778FD2D8EEBF7A859926101F04A0AB77E89DCF5FC1029EF0016024B89EFBE5D00FE413AAF83DB319341765B4E92EF4297A97215A519C2C749E60D603916112FFD52DC14F2323557633803F3EEFD49C73377043F52C2CE1141617149DB4398323767AA19AB6739F005E09798248F51DC6FE00EE357DD80D3817F9769EBAB517CF040143C6AB018B10CB818F80EA61ED0781905CACF0EFC4CBBEAC45434A9468613983AC15F073691AF8A6815E9C1E4CF8F44069FAC5261C5D2EAABF07BE6AD0593A3F05C0D724D46BA18C2AC77E8CE93C1DB804F03B746B7F4420D9F21E07EE02BC0BA0CCADE0F5C8399391CA851F641E076E072329864DA1605463B6A10780CB38E6E2F701D8D7D97EF13E702D7037F8F460B6355EAE741E06D789E7FB004933675A04AA186819B31397C6B896FA516029D98E4D64B22BD1DA9426703C08F7DEA20B380B7535A0ACDF59C3B0CEC06BE019CA531B0933E4C8EE100A51948AE6078252916FADA8829803C51A191AF005FC32CA298AFF6ADA9632D8E628017A77874EE8E3A6162F402BFA8D0A8039852E8FD6AC786B10E938CF27205BD6F4F628EE18B94AED22D7E0E621226B40C7AF368053E4EE9CA287B7E6173332EDC8149B4745DF477C087D53689B3391A4DB86C720B0DAEA774D07191A3C0F96A87D4D952663839D8A85BCE2EC7977F9B6C54EC0A851EE0670E3BED9EEEDCC1FB1D51E73AD5B7B75CE888D12E99CE17CAE95B5D04E93F17519A2B5917B2FAC53ED56D6678840614A9FEACF8924DAAD7CCB0BEDA3B77A569C47788F3DFAB5E33C37E71BEB81E07586E1D1F45F7BACF1AF67ECC67D4E30036AFAB03648A494CB26A91B28B6567D5F0A573D07570596176E40045C3774ED7011670EA3DBFE23F2DC2E8EDF538408B389EA77ACD2C6DF5C40007556FB9E1AFD5F472175762B66D9B2D6EFF05F19332E7D4F877AE7F6FF66327EF8FB53F015BB50F288AA2288AA2288A62F83FEC37068C6750398B0000000049454E44AE426082" + decoded, err := hex.DecodeString(img) + if err != nil { + log.Error().Err(err).Msg("error decoding string") + w.WriteHeader(http.StatusInternalServerError) + } + w.Header().Set(net.HeaderContentType, "image/png") + if _, err := w.Write(decoded); err != nil { + log.Error().Err(err).Msg("error writing data response") + } + return + } + + w.WriteHeader(http.StatusNotFound) + }) +} diff --git a/services/webdav/pkg/ocdav/config/config.go b/services/webdav/pkg/ocdav/config/config.go new file mode 100644 index 0000000000..130383adb1 --- /dev/null +++ b/services/webdav/pkg/ocdav/config/config.go @@ -0,0 +1,86 @@ +package config + +import "github.com/opencloud-eu/reva/v2/pkg/sharedconf" + +// Config holds the config options that need to be passed down to all ocdav handlers +type Config struct { + Prefix string `mapstructure:"prefix"` + // FilesNamespace prefixes the namespace, optionally with user information. + // Example: if FilesNamespace is /users/{{substr 0 1 .Username}}/{{.Username}} + // and received path is /docs the internal path will be: + // /users///docs + FilesNamespace string `mapstructure:"files_namespace"` + // WebdavNamespace prefixes the namespace, optionally with user information. + // Example: if WebdavNamespace is /users/{{substr 0 1 .Username}}/{{.Username}} + // and received path is /docs the internal path will be: + // /users///docs + WebdavNamespace string `mapstructure:"webdav_namespace"` + SharesNamespace string `mapstructure:"shares_namespace"` + OCMNamespace string `mapstructure:"ocm_namespace"` + GatewaySvc string `mapstructure:"gatewaysvc"` + Timeout int64 `mapstructure:"timeout"` + Insecure bool `mapstructure:"insecure"` + // If true, HTTP COPY will expect the HTTP-TPC (third-party copy) headers + EnableHTTPTpc bool `mapstructure:"enable_http_tpc"` + PublicURL string `mapstructure:"public_url"` + FavoriteStorageDriver string `mapstructure:"favorite_storage_driver"` + FavoriteStorageDrivers map[string]map[string]interface{} `mapstructure:"favorite_storage_drivers"` + Version string `mapstructure:"version"` + VersionString string `mapstructure:"version_string"` + Edition string `mapstructure:"edition"` + Product string `mapstructure:"product"` + ProductName string `mapstructure:"product_name"` + ProductVersion string `mapstructure:"product_version"` + AllowPropfindDepthInfinitiy bool `mapstructure:"allow_depth_infinity"` + + NameValidation NameValidation `mapstructure:"validation"` + + // SharedSecret used to sign the 'oc:download' URLs + URLSigningSharedSecret string `mapstructure:"url_signing_shared_secret"` + + MachineAuthAPIKey string `mapstructure:"machine_auth_apikey"` +} + +// NameValidation is the validation configuration for file and folder names +type NameValidation struct { + InvalidChars []string `mapstructure:"invalid_chars"` + MaxLength int `mapstructure:"max_length"` +} + +// Init initializes the configuration +func (c *Config) Init() { + // note: default c.Prefix is an empty string + c.GatewaySvc = sharedconf.GetGatewaySVC(c.GatewaySvc) + + if c.FavoriteStorageDriver == "" { + c.FavoriteStorageDriver = "memory" + } + + if c.Version == "" { + c.Version = "10.0.11.5" + } + + if c.VersionString == "" { + c.VersionString = "10.0.11" + } + + if c.Product == "" { + c.Product = "reva" + } + + if c.ProductName == "" { + c.ProductName = "reva" + } + + if c.ProductVersion == "" { + c.ProductVersion = "10.0.11" + } + + if c.NameValidation.InvalidChars == nil { + c.NameValidation.InvalidChars = []string{"\f", "\r", "\n", "\\"} + } + + if c.NameValidation.MaxLength == 0 { + c.NameValidation.MaxLength = 255 + } +} diff --git a/services/webdav/pkg/ocdav/context.go b/services/webdav/pkg/ocdav/context.go new file mode 100644 index 0000000000..7c0a6d443f --- /dev/null +++ b/services/webdav/pkg/ocdav/context.go @@ -0,0 +1,20 @@ +package ocdav + +import ( + "context" + + cs3storage "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" +) + +type tokenStatInfoKey struct{} + +// ContextWithTokenStatInfo adds the token stat info to the context +func ContextWithTokenStatInfo(ctx context.Context, info *cs3storage.ResourceInfo) context.Context { + return context.WithValue(ctx, tokenStatInfoKey{}, info) +} + +// TokenStatInfoFromContext returns the token stat info from the context +func TokenStatInfoFromContext(ctx context.Context) (*cs3storage.ResourceInfo, bool) { + v, ok := ctx.Value(tokenStatInfoKey{}).(*cs3storage.ResourceInfo) + return v, ok +} diff --git a/services/webdav/pkg/ocdav/copy.go b/services/webdav/pkg/ocdav/copy.go new file mode 100644 index 0000000000..1ed99f3a5c --- /dev/null +++ b/services/webdav/pkg/ocdav/copy.go @@ -0,0 +1,782 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "fmt" + "io" + "net/http" + "path" + "path/filepath" + "strconv" + + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/errtypes" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" + "github.com/opencloud-eu/reva/v2/pkg/rhttp" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/rs/zerolog" +) + +type copy struct { + source *provider.Reference + sourceInfo *provider.ResourceInfo + destination *provider.Reference + depth net.Depth + successCode int +} + +func (s *svc) handlePathCopy(w http.ResponseWriter, r *http.Request, ns string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "copy") + defer span.End() + + if !isBodyEmpty(r) { + w.WriteHeader(http.StatusUnsupportedMediaType) + b, err := errors.Marshal(http.StatusUnsupportedMediaType, "body must be empty", "", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + if s.c.EnableHTTPTpc { + if r.Header.Get("Source") != "" { + // HTTP Third-Party Copy Pull mode + s.handleTPCPull(ctx, w, r, ns) + return + } else if r.Header.Get("Destination") != "" { + // HTTP Third-Party Copy Push mode + s.handleTPCPush(ctx, w, r, ns) + return + } + } + + // Local copy: in this case Destination is mandatory + src := path.Join(ns, r.URL.Path) + + dh := r.Header.Get(net.HeaderDestination) + baseURI := r.Context().Value(net.CtxKeyBaseURI).(string) + dst, err := net.ParseDestination(baseURI, dh) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "failed to extract destination", "", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + if err := ValidateName(filename(src), s.nameValidators); err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "source failed naming rules", "", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + if err := ValidateDestination(filename(dst), s.nameValidators); err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "destination failed naming rules", "", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + dst = path.Join(ns, dst) + + sublog := appctx.GetLogger(ctx).With().Str("src", src).Str("dst", dst).Logger() + + srcSpace, status, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, src) + if err != nil { + sublog.Error().Err(err).Str("path", src).Msg("failed to look up storage space") + w.WriteHeader(http.StatusInternalServerError) + return + } + if status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&sublog, w, status) + return + } + dstSpace, status, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, dst) + if err != nil { + sublog.Error().Err(err).Str("path", dst).Msg("failed to look up storage space") + w.WriteHeader(http.StatusInternalServerError) + return + } + if status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&sublog, w, status) + return + } + + cp := s.prepareCopy(ctx, w, r, spacelookup.MakeRelativeReference(srcSpace, src, false), spacelookup.MakeRelativeReference(dstSpace, dst, false), &sublog, dstSpace.GetRoot().GetStorageId() == utils.ShareStorageProviderID) + if cp == nil { + return + } + + if err := s.executePathCopy(ctx, s.gatewaySelector, w, r, cp); err != nil { + sublog.Error().Err(err).Str("depth", cp.depth.String()).Msg("error executing path copy") + w.WriteHeader(http.StatusInternalServerError) + } + w.WriteHeader(cp.successCode) +} + +func (s *svc) executePathCopy(ctx context.Context, selector pool.Selectable[gateway.GatewayAPIClient], w http.ResponseWriter, r *http.Request, cp *copy) error { + log := appctx.GetLogger(ctx) + log.Debug().Str("src", cp.sourceInfo.Path).Str("dst", cp.destination.Path).Msg("descending") + + client, err := selector.Next() + if err != nil { + return err + } + + var fileid string + if cp.sourceInfo.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + // create dir + createReq := &provider.CreateContainerRequest{ + Ref: cp.destination, + } + createRes, err := client.CreateContainer(ctx, createReq) + if err != nil { + log.Error().Err(err).Msg("error performing create container grpc request") + return err + } + if createRes.Status.Code != rpc.Code_CODE_OK { + if createRes.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + w.WriteHeader(http.StatusForbidden) + m := fmt.Sprintf("Permission denied to create %v", createReq.Ref.Path) + b, err := errors.Marshal(http.StatusForbidden, m, "", "") + errors.HandleWebdavError(log, w, b, err) + } + return nil + } + + // TODO: also copy properties: https://tools.ietf.org/html/rfc4918#section-9.8.2 + + if cp.depth != net.DepthInfinity { + return nil + } + + // descend for children + listReq := &provider.ListContainerRequest{ + Ref: cp.source, + } + res, err := client.ListContainer(ctx, listReq) + if err != nil { + return err + } + if res.Status.Code != rpc.Code_CODE_OK { + w.WriteHeader(http.StatusInternalServerError) + return nil + } + + for i := range res.Infos { + child := filepath.Base(res.Infos[i].Path) + src := &provider.Reference{ + ResourceId: cp.source.ResourceId, + Path: utils.MakeRelativePath(filepath.Join(cp.source.Path, child)), + } + childDst := &provider.Reference{ + ResourceId: cp.destination.ResourceId, + Path: utils.MakeRelativePath(filepath.Join(cp.destination.Path, child)), + } + err := s.executePathCopy(ctx, selector, w, r, ©{source: src, sourceInfo: res.Infos[i], destination: childDst, depth: cp.depth, successCode: cp.successCode}) + if err != nil { + return err + } + } + + // we need to stat again to get the fileid + r, err := client.Stat(ctx, &provider.StatRequest{Ref: cp.destination}) + if err != nil { + return err + } + + if r.GetStatus().GetCode() != rpc.Code_CODE_OK { + return fmt.Errorf("status code %d", r.GetStatus().GetCode()) + } + + fileid = storagespace.FormatResourceID(r.GetInfo().GetId()) + } else { + // copy file + + // 1. get download url + + dReq := &provider.InitiateFileDownloadRequest{ + Ref: cp.source, + } + + dRes, err := client.InitiateFileDownload(ctx, dReq) + if err != nil { + return err + } + + if dRes.Status.Code != rpc.Code_CODE_OK { + return fmt.Errorf("status code %d", dRes.Status.Code) + } + + var downloadEP, downloadToken string + for _, p := range dRes.Protocols { + if p.Protocol == "spaces" { + downloadEP, downloadToken = p.DownloadEndpoint, p.Token + } + } + + // 2. get upload url + + uReq := &provider.InitiateFileUploadRequest{ + Ref: cp.destination, + Opaque: &typespb.Opaque{ + Map: map[string]*typespb.OpaqueEntry{ + "Upload-Length": { + Decoder: "plain", + // TODO: handle case where size is not known in advance + Value: []byte(strconv.FormatUint(cp.sourceInfo.GetSize(), 10)), + }, + }, + }, + } + + uRes, err := client.InitiateFileUpload(ctx, uReq) + if err != nil { + return err + } + + if uRes.Status.Code != rpc.Code_CODE_OK { + if uRes.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + w.WriteHeader(http.StatusForbidden) + m := fmt.Sprintf("Permissions denied to create %v", uReq.Ref.Path) + b, err := errors.Marshal(http.StatusForbidden, m, "", "") + errors.HandleWebdavError(log, w, b, err) + return nil + } + errors.HandleErrorStatus(log, w, uRes.Status) + return nil + } + + var uploadEP, uploadToken string + for _, p := range uRes.Protocols { + if p.Protocol == "tus" { + uploadEP, uploadToken = p.UploadEndpoint, p.Token + } + } + + // 3. do download + + httpDownloadReq, err := rhttp.NewRequest(ctx, "GET", downloadEP, nil) + if err != nil { + return err + } + httpDownloadReq.Header.Set(TokenTransportHeader, downloadToken) + + httpDownloadRes, err := s.client.Do(httpDownloadReq) + if err != nil { + return err + } + defer httpDownloadRes.Body.Close() + if httpDownloadRes.StatusCode == http.StatusForbidden { + w.WriteHeader(http.StatusForbidden) + b, err := errors.Marshal(http.StatusForbidden, http.StatusText(http.StatusForbidden), "", strconv.Itoa(http.StatusForbidden)) + errors.HandleWebdavError(log, w, b, err) + return nil + } + if httpDownloadRes.StatusCode != http.StatusOK { + return fmt.Errorf("status code %d", httpDownloadRes.StatusCode) + } + + // 4. do upload + fileid, err = s.tusUpload(ctx, uploadEP, uploadToken, httpDownloadRes.Body, int64(cp.sourceInfo.GetSize())) + if err != nil { + return err + } + } + + w.Header().Set(net.HeaderOCFileID, fileid) + return nil +} + +func (s *svc) handleSpacesCopy(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "spaces_copy") + defer span.End() + + if !isBodyEmpty(r) { + w.WriteHeader(http.StatusUnsupportedMediaType) + b, err := errors.Marshal(http.StatusUnsupportedMediaType, "body must be empty", "", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + dh := r.Header.Get(net.HeaderDestination) + baseURI := r.Context().Value(net.CtxKeyBaseURI).(string) + dst, err := net.ParseDestination(baseURI, dh) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("path", r.URL.Path).Str("destination", dst).Logger() + + srcRef, err := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + dstSpaceID, dstRelPath := router.ShiftPath(dst) + + dstRef, err := spacelookup.MakeStorageSpaceReference(dstSpaceID, dstRelPath) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + cp := s.prepareCopy(ctx, w, r, &srcRef, &dstRef, &sublog, dstRef.GetResourceId().GetStorageId() == utils.ShareStorageProviderID) + if cp == nil { + return + } + + err = s.executeSpacesCopy(ctx, w, s.gatewaySelector, cp) + if err != nil { + sublog.Error().Err(err).Str("depth", cp.depth.String()).Msg("error descending directory") + w.WriteHeader(http.StatusInternalServerError) + } + w.WriteHeader(cp.successCode) +} + +func (s *svc) executeSpacesCopy(ctx context.Context, w http.ResponseWriter, selector pool.Selectable[gateway.GatewayAPIClient], cp *copy) error { + log := appctx.GetLogger(ctx) + log.Debug().Interface("src", cp.sourceInfo).Interface("dst", cp.destination).Msg("descending") + + client, err := selector.Next() + if err != nil { + return err + } + + var fileid string + if cp.sourceInfo.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + // create dir + createReq := &provider.CreateContainerRequest{ + Ref: cp.destination, + } + createRes, err := client.CreateContainer(ctx, createReq) + if err != nil { + log.Error().Err(err).Msg("error performing create container grpc request") + return err + } + if createRes.Status.Code != rpc.Code_CODE_OK { + if createRes.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + w.WriteHeader(http.StatusForbidden) + // TODO path could be empty or relative... + m := fmt.Sprintf("Permission denied to create %v", createReq.Ref.Path) + b, err := errors.Marshal(http.StatusForbidden, m, "", "") + errors.HandleWebdavError(log, w, b, err) + } + return nil + } + + // TODO: also copy properties: https://tools.ietf.org/html/rfc4918#section-9.8.2 + + if cp.depth != net.DepthInfinity { + return nil + } + + // descend for children + listReq := &provider.ListContainerRequest{Ref: &provider.Reference{ResourceId: cp.sourceInfo.Id, Path: "."}} + res, err := client.ListContainer(ctx, listReq) + if err != nil { + return err + } + if res.Status.Code != rpc.Code_CODE_OK { + w.WriteHeader(http.StatusInternalServerError) + return nil + } + + for i := range res.Infos { + childRef := &provider.Reference{ + ResourceId: cp.destination.ResourceId, + Path: utils.MakeRelativePath(path.Join(cp.destination.Path, res.Infos[i].Path)), + } + err := s.executeSpacesCopy(ctx, w, selector, ©{sourceInfo: res.Infos[i], destination: childRef, depth: cp.depth, successCode: cp.successCode}) + if err != nil { + return err + } + } + + // we need to stat again to get the fileid + r, err := client.Stat(ctx, &provider.StatRequest{Ref: cp.destination}) + if err != nil { + return err + } + + if r.GetStatus().GetCode() != rpc.Code_CODE_OK { + return fmt.Errorf("stat: status code %d", r.GetStatus().GetCode()) + } + + fileid = storagespace.FormatResourceID(r.GetInfo().GetId()) + } else { + // copy file + // 1. get download url + dReq := &provider.InitiateFileDownloadRequest{Ref: &provider.Reference{ResourceId: cp.sourceInfo.Id, Path: "."}} + dRes, err := client.InitiateFileDownload(ctx, dReq) + if err != nil { + return err + } + + if dRes.Status.Code != rpc.Code_CODE_OK { + return fmt.Errorf("status code %d", dRes.Status.Code) + } + + var downloadEP, downloadToken string + for _, p := range dRes.Protocols { + if p.Protocol == "spaces" { + downloadEP, downloadToken = p.DownloadEndpoint, p.Token + } + } + // 2. get upload url + uReq := &provider.InitiateFileUploadRequest{ + Ref: cp.destination, + Opaque: &typespb.Opaque{ + Map: map[string]*typespb.OpaqueEntry{ + net.HeaderUploadLength: { + Decoder: "plain", + // TODO: handle case where size is not known in advance + Value: []byte(strconv.FormatUint(cp.sourceInfo.GetSize(), 10)), + }, + }, + }, + } + + uRes, err := client.InitiateFileUpload(ctx, uReq) + if err != nil { + return err + } + + if uRes.Status.Code != rpc.Code_CODE_OK { + if uRes.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + w.WriteHeader(http.StatusForbidden) + // TODO path can be empty or relative + m := fmt.Sprintf("Permissions denied to create %v", uReq.Ref.Path) + b, err := errors.Marshal(http.StatusForbidden, m, "", "") + errors.HandleWebdavError(log, w, b, err) + return nil + } + errors.HandleErrorStatus(log, w, uRes.Status) + return nil + } + + var uploadEP, uploadToken string + for _, p := range uRes.Protocols { + if p.Protocol == "tus" { + uploadEP, uploadToken = p.UploadEndpoint, p.Token + } + } + + // 3. do download + httpDownloadReq, err := rhttp.NewRequest(ctx, http.MethodGet, downloadEP, nil) + if err != nil { + return err + } + if downloadToken != "" { + httpDownloadReq.Header.Set(TokenTransportHeader, downloadToken) + } + + httpDownloadRes, err := s.client.Do(httpDownloadReq) + if err != nil { + return err + } + defer httpDownloadRes.Body.Close() + switch httpDownloadRes.StatusCode { + case http.StatusForbidden, http.StatusTooEarly: + w.WriteHeader(httpDownloadRes.StatusCode) + b, err := errors.Marshal(http.StatusForbidden, http.StatusText(httpDownloadRes.StatusCode), "", strconv.Itoa(httpDownloadRes.StatusCode)) + errors.HandleWebdavError(log, w, b, err) + return nil + case http.StatusOK: + // ok + default: + return fmt.Errorf("status code %d", httpDownloadRes.StatusCode) + } + + // 4. do upload + fileid, err = s.tusUpload(ctx, uploadEP, uploadToken, httpDownloadRes.Body, int64(cp.sourceInfo.GetSize())) + if err != nil { + return err + } + } + + w.Header().Set(net.HeaderOCFileID, fileid) + return nil +} + +func (s *svc) prepareCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, srcRef, dstRef *provider.Reference, log *zerolog.Logger, destInShareJail bool) *copy { + isChild, err := s.referenceIsChildOf(ctx, s.gatewaySelector, dstRef, srcRef) + if err != nil { + switch err.(type) { + case errtypes.IsNotSupported: + log.Error().Err(err).Msg("can not detect recursive copy operation. missing machine auth configuration?") + w.WriteHeader(http.StatusForbidden) + default: + log.Error().Err(err).Msg("error while trying to detect recursive copy operation") + w.WriteHeader(http.StatusInternalServerError) + } + } + if isChild { + w.WriteHeader(http.StatusConflict) + b, err := errors.Marshal(http.StatusBadRequest, "can not copy a folder into one of its children", "", "") + errors.HandleWebdavError(log, w, b, err) + return nil + } + + isParent, err := s.referenceIsChildOf(ctx, s.gatewaySelector, srcRef, dstRef) + if err != nil { + switch err.(type) { + case errtypes.IsNotFound: + isParent = false + case errtypes.IsNotSupported: + log.Error().Err(err).Msg("can not detect recursive copy operation. missing machine auth configuration?") + w.WriteHeader(http.StatusForbidden) + return nil + default: + log.Error().Err(err).Msg("error while trying to detect recursive copy operation") + w.WriteHeader(http.StatusInternalServerError) + return nil + } + } + + if isParent { + w.WriteHeader(http.StatusConflict) + b, err := errors.Marshal(http.StatusBadRequest, "can not copy a folder into its parent", "", "") + errors.HandleWebdavError(log, w, b, err) + return nil + + } + + if srcRef.Path == dstRef.Path && srcRef.ResourceId == dstRef.ResourceId { + w.WriteHeader(http.StatusConflict) + b, err := errors.Marshal(http.StatusBadRequest, "source and destination are the same", "", "") + errors.HandleWebdavError(log, w, b, err) + return nil + } + + oh := r.Header.Get(net.HeaderOverwrite) + overwrite, err := net.ParseOverwrite(oh) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Overwrite header is set to incorrect value %v", overwrite) + b, err := errors.Marshal(http.StatusBadRequest, m, "", "") + errors.HandleWebdavError(log, w, b, err) + return nil + } + dh := r.Header.Get(net.HeaderDepth) + depth, err := net.ParseDepth(dh) + + if err != nil { + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Depth header is set to incorrect value %v", dh) + b, err := errors.Marshal(http.StatusBadRequest, m, "", "") + errors.HandleWebdavError(log, w, b, err) + return nil + } + if dh == "" { + // net.ParseDepth returns "1" for an empty value but copy expects "infinity" + // so we overwrite it here + depth = net.DepthInfinity + } + + log.Debug().Bool("overwrite", overwrite).Str("depth", depth.String()).Msg("copy") + + client, err := s.gatewaySelector.Next() + if err != nil { + log.Error().Err(err).Msg("error selecting next client") + w.WriteHeader(http.StatusInternalServerError) + return nil + } + + srcStatReq := &provider.StatRequest{Ref: srcRef} + srcStatRes, err := client.Stat(ctx, srcStatReq) + switch { + case err != nil: + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return nil + case srcStatRes.Status.Code == rpc.Code_CODE_NOT_FOUND: + errors.HandleErrorStatus(log, w, srcStatRes.Status) + m := fmt.Sprintf("Resource %v not found", srcStatReq.Ref.Path) + b, err := errors.Marshal(http.StatusNotFound, m, "", "") + errors.HandleWebdavError(log, w, b, err) + return nil + case srcStatRes.Status.Code != rpc.Code_CODE_OK: + errors.HandleErrorStatus(log, w, srcStatRes.Status) + return nil + } + if utils.IsSpaceRoot(srcStatRes.GetInfo()) { + log.Error().Msg("the source is disallowed") + w.WriteHeader(http.StatusBadRequest) + return nil + } + + dstStatReq := &provider.StatRequest{Ref: dstRef} + dstStatRes, err := client.Stat(ctx, dstStatReq) + switch { + case err != nil: + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return nil + case dstStatRes.Status.Code != rpc.Code_CODE_OK && dstStatRes.Status.Code != rpc.Code_CODE_NOT_FOUND: + errors.HandleErrorStatus(log, w, dstStatRes.Status) + return nil + } + + successCode := http.StatusCreated // 201 if new resource was created, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + if dstStatRes.Status.Code == rpc.Code_CODE_OK { + successCode = http.StatusNoContent // 204 if target already existed, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + + if !overwrite { + log.Warn().Bool("overwrite", overwrite).Msg("dst already exists") + w.WriteHeader(http.StatusPreconditionFailed) + m := fmt.Sprintf("Could not overwrite Resource %v", dstRef.Path) + b, err := errors.Marshal(http.StatusPreconditionFailed, m, "", "") + errors.HandleWebdavError(log, w, b, err) // 412, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + return nil + } + + if utils.IsSpaceRoot(dstStatRes.GetInfo()) { + log.Error().Msg("overwriting is not allowed") + w.WriteHeader(http.StatusBadRequest) + return nil + } + + // delete existing tree when overwriting a directory or replacing a file with a directory + if dstStatRes.Info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER || + (dstStatRes.Info.Type == provider.ResourceType_RESOURCE_TYPE_FILE && + srcStatRes.Info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER) { + + // we must not allow to override mountpoints - so we check if we have access to the parent. If not this is a mountpoint + if destInShareJail { + res, err := client.GetPath(ctx, &provider.GetPathRequest{ResourceId: dstStatRes.GetInfo().GetId()}) + if err != nil || res.GetStatus().GetCode() != rpc.Code_CODE_OK { + log.Error().Err(err).Msg("error sending grpc get path request") + w.WriteHeader(http.StatusInternalServerError) + return nil + } + + dir, file := filepath.Split(filepath.Clean(res.GetPath())) + if dir == "/" || dir == "" || file == "" { + log.Error().Msg("must not overwrite mount points") + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte("must not overwrite mount points")) + return nil + } + } + + delReq := &provider.DeleteRequest{Ref: dstRef} + delRes, err := client.Delete(ctx, delReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc delete request") + w.WriteHeader(http.StatusInternalServerError) + return nil + } + + if delRes.Status.Code != rpc.Code_CODE_OK && delRes.Status.Code != rpc.Code_CODE_NOT_FOUND { + errors.HandleErrorStatus(log, w, delRes.Status) + return nil + } + } + } else if p := path.Dir(dstRef.Path); p != "" { + // check if an intermediate path / the parent exists + pRef := &provider.Reference{ + ResourceId: dstRef.ResourceId, + Path: utils.MakeRelativePath(p), + } + intStatReq := &provider.StatRequest{Ref: pRef} + intStatRes, err := client.Stat(ctx, intStatReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return nil + } + if intStatRes.Status.Code != rpc.Code_CODE_OK { + if intStatRes.Status.Code == rpc.Code_CODE_NOT_FOUND { + // 409 if intermediate dir is missing, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + log.Debug().Interface("parent", pRef).Interface("status", intStatRes.Status).Msg("conflict") + w.WriteHeader(http.StatusConflict) + } else { + errors.HandleErrorStatus(log, w, intStatRes.Status) + } + return nil + } + // TODO what if intermediate is a file? + } + + return ©{source: srcRef, sourceInfo: srcStatRes.Info, depth: depth, successCode: successCode, destination: dstRef} +} + +func (s *svc) tusUpload(ctx context.Context, uploadEP, uploadToken string, body io.Reader, size int64) (string, error) { + chunkSize := int64(10000000) + var offset int64 + var fileid string + + for offset < size { + n := chunkSize + if offset+n > size { + n = size - offset + } + + req, err := rhttp.NewRequest(ctx, http.MethodPatch, uploadEP, io.LimitReader(body, n)) + if err != nil { + return "", err + } + + req.Header.Set(TokenTransportHeader, uploadToken) + req.Header.Set(net.HeaderTusResumable, "1.0.0") + req.Header.Set(net.HeaderUploadOffset, strconv.FormatInt(offset, 10)) + req.Header.Set(net.HeaderContentType, "application/offset+octet-stream") + req.ContentLength = n + + res, err := s.client.Do(req) + if err != nil { + return "", err + } + + if res.StatusCode != http.StatusNoContent && res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { + res.Body.Close() + return "", fmt.Errorf("unexpected status code during TUS upload: %d", res.StatusCode) + } + + if id := res.Header.Get(net.HeaderOCFileID); id != "" { + fileid = id + } + + newOffsetStr := res.Header.Get(net.HeaderUploadOffset) + res.Body.Close() + + if newOffsetStr != "" { + newOffset, err := strconv.ParseInt(newOffsetStr, 10, 64) + if err != nil { + return "", fmt.Errorf("invalid Upload-Offset header: %v", err) + } + offset = newOffset + } else { + offset += n + } + } + return fileid, nil +} diff --git a/services/webdav/pkg/ocdav/dav.go b/services/webdav/pkg/ocdav/dav.go new file mode 100644 index 0000000000..5711f21c97 --- /dev/null +++ b/services/webdav/pkg/ocdav/dav.go @@ -0,0 +1,454 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "fmt" + "net/http" + "path" + "path/filepath" + "strings" + + gatewayv1beta1 "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" + "github.com/opencloud-eu/reva/v2/pkg/storage/utils/grants" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "go.opentelemetry.io/otel/trace" + "google.golang.org/grpc/metadata" +) + +const ( + _trashbinPath = "trash-bin" + + // WwwAuthenticate captures the Www-Authenticate header string. + WwwAuthenticate = "Www-Authenticate" +) + +const ( + ErrListingMembers = "ERR_LISTING_MEMBERS_NOT_ALLOWED" + ErrInvalidCredentials = "ERR_INVALID_CREDENTIALS" + ErrMissingBasicAuth = "ERR_MISSING_BASIC_AUTH" + ErrMissingBearerAuth = "ERR_MISSING_BEARER_AUTH" + ErrFileNotFoundInRoot = "ERR_FILE_NOT_FOUND_IN_ROOT" +) + +// DavHandler routes to the different sub handlers +type DavHandler struct { + AvatarsHandler *AvatarsHandler + FilesHandler *WebDavHandler + FilesHomeHandler *WebDavHandler + MetaHandler *MetaHandler + TrashbinHandler *TrashbinHandler + SpacesHandler *SpacesHandler + PublicFolderHandler *WebDavHandler + PublicFileHandler *PublicFileHandler + SharesHandler *WebDavHandler + OCMSharesHandler *WebDavHandler +} + +func (h *DavHandler) Init(c *config.Config) error { + h.AvatarsHandler = new(AvatarsHandler) + if err := h.AvatarsHandler.Init(c); err != nil { + return err + } + h.FilesHandler = new(WebDavHandler) + if err := h.FilesHandler.Init(c.FilesNamespace, false); err != nil { + return err + } + h.FilesHomeHandler = new(WebDavHandler) + if err := h.FilesHomeHandler.Init(c.WebdavNamespace, true); err != nil { + return err + } + h.MetaHandler = new(MetaHandler) + if err := h.MetaHandler.Init(c); err != nil { + return err + } + h.TrashbinHandler = new(TrashbinHandler) + if err := h.TrashbinHandler.Init(c); err != nil { + return err + } + + h.SpacesHandler = new(SpacesHandler) + if err := h.SpacesHandler.Init(c); err != nil { + return err + } + + h.PublicFolderHandler = new(WebDavHandler) + if err := h.PublicFolderHandler.Init("public", true); err != nil { // jail public file requests to /public/ prefix + return err + } + + h.PublicFileHandler = new(PublicFileHandler) + if err := h.PublicFileHandler.Init("public"); err != nil { // jail public file requests to /public/ prefix + return err + } + + h.OCMSharesHandler = new(WebDavHandler) + if err := h.OCMSharesHandler.Init(c.OCMNamespace, true); err != nil { + return err + } + + return nil +} + +func isOwner(userIDorName string, user *userv1beta1.User) bool { + return userIDorName != "" && (userIDorName == user.Id.OpaqueId || strings.EqualFold(userIDorName, user.Username)) +} + +// Handler handles requests +func (h *DavHandler) Handler(s *svc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + log := appctx.GetLogger(ctx) + + // if there is no file in the request url we assume the request url is: "/remote.php/dav/files" + // https://github.com/owncloud/core/blob/18475dac812064b21dabcc50f25ef3ffe55691a5/tests/acceptance/features/apiWebdavOperations/propfind.feature + if r.URL.Path == "/files" { + log.Debug().Str("path", r.URL.Path).Msg("method not allowed") + contextUser, ok := ctxpkg.ContextGetUser(ctx) + if ok { + r.URL.Path = path.Join(r.URL.Path, contextUser.Username) + } + + if r.Header.Get(net.HeaderDepth) == "" { + w.WriteHeader(http.StatusMethodNotAllowed) + b, err := errors.Marshal(http.StatusMethodNotAllowed, "Listing members of this collection is disabled", "", ErrListingMembers) + if err != nil { + log.Error().Msgf("error marshaling xml response: %s", b) + w.WriteHeader(http.StatusInternalServerError) + return + } + _, err = w.Write(b) + if err != nil { + log.Error().Msgf("error writing xml response: %s", b) + w.WriteHeader(http.StatusInternalServerError) + return + } + return + } + } + + var head string + head, r.URL.Path = router.ShiftPath(r.URL.Path) + + switch head { + case "avatars": + h.AvatarsHandler.Handler(s).ServeHTTP(w, r) + case "files": + var requestUserID string + var oldPath = r.URL.Path + + // detect and check current user in URL + requestUserID, r.URL.Path = router.ShiftPath(r.URL.Path) + + // note: some requests like OPTIONS don't forward the user + contextUser, ok := ctxpkg.ContextGetUser(ctx) + if ok && isOwner(requestUserID, contextUser) { + // use home storage handler when user was detected + base := path.Join(ctx.Value(net.CtxKeyBaseURI).(string), "files", requestUserID) + ctx := context.WithValue(ctx, net.CtxKeyBaseURI, base) + r = r.WithContext(ctx) + + h.FilesHomeHandler.Handler(s).ServeHTTP(w, r) + } else { + r.URL.Path = oldPath + base := path.Join(ctx.Value(net.CtxKeyBaseURI).(string), "files") + ctx := context.WithValue(ctx, net.CtxKeyBaseURI, base) + r = r.WithContext(ctx) + + h.FilesHandler.Handler(s).ServeHTTP(w, r) + } + case "meta": + base := path.Join(ctx.Value(net.CtxKeyBaseURI).(string), "meta") + ctx = context.WithValue(ctx, net.CtxKeyBaseURI, base) + r = r.WithContext(ctx) + h.MetaHandler.Handler(s).ServeHTTP(w, r) + case "ocm": + base := path.Join(ctx.Value(net.CtxKeyBaseURI).(string), "ocm") + ctx := context.WithValue(ctx, net.CtxKeyBaseURI, base) + c, err := s.gatewaySelector.Next() + if err != nil { + w.WriteHeader(http.StatusNotFound) + return + } + + // OC10 and Nextcloud (OCM 1.0) are using basic auth for carrying the + // ocm share id. + var ocmshare, sharedSecret string + username, _, ok := r.BasicAuth() + if ok { + // OCM 1.0 + ocmshare = username + sharedSecret = username + r.URL.Path = filepath.Join("/", ocmshare, r.URL.Path) + } else { + ocmshare, _ = router.ShiftPath(r.URL.Path) + sharedSecret = strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ") + } + + authRes, err := handleOCMAuth(ctx, c, ocmshare, sharedSecret) + switch { + case err != nil: + log.Error().Err(err).Msg("error during ocm authentication") + w.WriteHeader(http.StatusInternalServerError) + return + case authRes.Status.Code == rpc.Code_CODE_PERMISSION_DENIED: + log.Debug().Str("ocmshare", ocmshare).Msg("permission denied") + fallthrough + case authRes.Status.Code == rpc.Code_CODE_UNAUTHENTICATED: + log.Debug().Str("ocmshare", ocmshare).Msg("unauthorized") + w.WriteHeader(http.StatusUnauthorized) + return + case authRes.Status.Code == rpc.Code_CODE_NOT_FOUND: + log.Debug().Str("ocmshare", ocmshare).Msg("not found") + w.WriteHeader(http.StatusNotFound) + return + case authRes.Status.Code != rpc.Code_CODE_OK: + log.Error().Str("ocmshare", ocmshare).Interface("status", authRes.Status).Msg("grpc auth request failed") + w.WriteHeader(http.StatusInternalServerError) + return + } + + ctx = ctxpkg.ContextSetToken(ctx, authRes.Token) + ctx = ctxpkg.ContextSetUser(ctx, authRes.User) + ctx = metadata.AppendToOutgoingContext(ctx, ctxpkg.TokenHeader, authRes.Token) + + log.Debug().Str("ocmshare", ocmshare).Interface("user", authRes.User).Msg("OCM user authenticated") + + r = r.WithContext(ctx) + h.OCMSharesHandler.Handler(s).ServeHTTP(w, r) + case "trash-bin": + base := path.Join(ctx.Value(net.CtxKeyBaseURI).(string), "trash-bin") + ctx := context.WithValue(ctx, net.CtxKeyBaseURI, base) + r = r.WithContext(ctx) + h.TrashbinHandler.Handler(s).ServeHTTP(w, r) + case "spaces": + base := path.Join(ctx.Value(net.CtxKeyBaseURI).(string), "spaces") + ctx := context.WithValue(ctx, net.CtxKeyBaseURI, base) + r = r.WithContext(ctx) + h.SpacesHandler.Handler(s, h.TrashbinHandler).ServeHTTP(w, r) + case "public-files": + base := path.Join(ctx.Value(net.CtxKeyBaseURI).(string), "public-files") + ctx = context.WithValue(ctx, net.CtxKeyBaseURI, base) + + var res *gatewayv1beta1.AuthenticateResponse + token, _ := router.ShiftPath(r.URL.Path) + var hasValidBasicAuthHeader bool + var pass string + var err error + // If user is authenticated + _, userExists := ctxpkg.ContextGetUser(ctx) + if userExists { + client, err := s.gatewaySelector.Next() + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + psRes, err := client.GetPublicShare(ctx, &link.GetPublicShareRequest{ + Ref: &link.PublicShareReference{ + Spec: &link.PublicShareReference_Token{ + Token: token, + }, + }}) + if err != nil && !strings.Contains(err.Error(), "core access token not found") { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + // If the link is internal then 307 redirect + if psRes.Status.Code == rpc.Code_CODE_OK && grants.PermissionsEqual(psRes.Share.Permissions.GetPermissions(), &provider.ResourcePermissions{}) { + if psRes.GetShare().GetResourceId() != nil { + rUrl := path.Join("/dav/spaces", storagespace.FormatResourceID(psRes.GetShare().GetResourceId())) + http.Redirect(w, r, rUrl, http.StatusTemporaryRedirect) + return + } + log.Debug().Str("token", token).Interface("status", psRes.Status).Msg("resource id not found") + w.WriteHeader(http.StatusNotFound) + return + } + } + + if _, pass, hasValidBasicAuthHeader = r.BasicAuth(); hasValidBasicAuthHeader { + res, err = handleBasicAuth(r.Context(), s.gatewaySelector, token, pass) + } else { + q := r.URL.Query() + sig := q.Get("signature") + expiration := q.Get("expiration") + // We restrict the pre-signed urls to downloads. + if sig != "" && expiration != "" && !(r.Method == http.MethodGet || r.Method == http.MethodHead) { + w.WriteHeader(http.StatusUnauthorized) + return + } + res, err = handleSignatureAuth(r.Context(), s.gatewaySelector, token, sig, expiration) + } + + switch { + case err != nil: + w.WriteHeader(http.StatusInternalServerError) + return + case res.Status.Code == rpc.Code_CODE_PERMISSION_DENIED: + fallthrough + case res.Status.Code == rpc.Code_CODE_UNAUTHENTICATED: + w.WriteHeader(http.StatusUnauthorized) + if hasValidBasicAuthHeader { + b, err := errors.Marshal(http.StatusUnauthorized, "Username or password was incorrect", "", ErrInvalidCredentials) + errors.HandleWebdavError(log, w, b, err) + return + } + b, err := errors.Marshal(http.StatusUnauthorized, "No 'Authorization: Basic' header found", "", ErrMissingBasicAuth) + errors.HandleWebdavError(log, w, b, err) + return + case res.Status.Code == rpc.Code_CODE_NOT_FOUND: + w.WriteHeader(http.StatusNotFound) + return + case res.Status.Code != rpc.Code_CODE_OK: + w.WriteHeader(http.StatusInternalServerError) + return + } + + if userExists { + // Build new context without an authenticated user. + // the public link should be resolved by the 'publicshares' authenticated user + baseURI := ctx.Value(net.CtxKeyBaseURI).(string) + logger := appctx.GetLogger(ctx) + span := trace.SpanFromContext(ctx) + span.End() + ctx = trace.ContextWithSpan(context.Background(), span) + ctx = appctx.WithLogger(ctx, logger) + ctx = context.WithValue(ctx, net.CtxKeyBaseURI, baseURI) + } + ctx = ctxpkg.ContextSetToken(ctx, res.Token) + ctx = ctxpkg.ContextSetUser(ctx, res.User) + ctx = metadata.AppendToOutgoingContext(ctx, ctxpkg.TokenHeader, res.Token) + + r = r.WithContext(ctx) + + // the public share manager knew the token, but does the referenced target still exist? + sRes, err := getTokenStatInfo(ctx, s.gatewaySelector, token) + switch { + case err != nil: + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + case sRes.Status.Code == rpc.Code_CODE_PERMISSION_DENIED: + fallthrough + case sRes.Status.Code == rpc.Code_CODE_OK && grants.PermissionsEqual(sRes.GetInfo().GetPermissionSet(), &provider.ResourcePermissions{}): + // If the link is internal + if !userExists { + w.Header().Add(WwwAuthenticate, fmt.Sprintf("Bearer realm=\"%s\", charset=\"UTF-8\"", r.Host)) + w.WriteHeader(http.StatusUnauthorized) + b, err := errors.Marshal(http.StatusUnauthorized, "No 'Authorization: Bearer' header found", "", ErrMissingBearerAuth) + errors.HandleWebdavError(log, w, b, err) + return + } + fallthrough + case sRes.Status.Code == rpc.Code_CODE_NOT_FOUND: + log.Debug().Str("token", token).Interface("status", res.Status).Msg("resource not found") + w.WriteHeader(http.StatusNotFound) // log the difference + return + case sRes.Status.Code == rpc.Code_CODE_UNAUTHENTICATED: + log.Debug().Str("token", token).Interface("status", res.Status).Msg("unauthorized") + w.WriteHeader(http.StatusUnauthorized) + return + case sRes.Status.Code != rpc.Code_CODE_OK: + log.Error().Str("token", token).Interface("status", res.Status).Msg("grpc stat request failed") + w.WriteHeader(http.StatusInternalServerError) + return + } + log.Debug().Interface("statInfo", sRes.Info).Msg("Stat info from public link token path") + + ctx := ContextWithTokenStatInfo(ctx, sRes.Info) + r = r.WithContext(ctx) + if sRes.Info.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER { + h.PublicFileHandler.Handler(s).ServeHTTP(w, r) + } else { + h.PublicFolderHandler.Handler(s).ServeHTTP(w, r) + } + + default: + w.WriteHeader(http.StatusNotFound) + b, err := errors.Marshal(http.StatusNotFound, "File not found in root", "", ErrFileNotFoundInRoot) + errors.HandleWebdavError(log, w, b, err) + } + }) +} + +func getTokenStatInfo(ctx context.Context, selector pool.Selectable[gatewayv1beta1.GatewayAPIClient], token string) (*provider.StatResponse, error) { + client, err := selector.Next() + if err != nil { + return nil, err + } + + return client.Stat(ctx, &provider.StatRequest{Ref: &provider.Reference{ + ResourceId: &provider.ResourceId{ + StorageId: utils.PublicStorageProviderID, + SpaceId: utils.PublicStorageSpaceID, + OpaqueId: token, + }, + }}) +} + +func handleBasicAuth(ctx context.Context, selector pool.Selectable[gatewayv1beta1.GatewayAPIClient], token, pw string) (*gatewayv1beta1.AuthenticateResponse, error) { + c, err := selector.Next() + if err != nil { + return nil, err + } + authenticateRequest := gatewayv1beta1.AuthenticateRequest{ + Type: "publicshares", + ClientId: token, + ClientSecret: "password|" + pw, + } + + return c.Authenticate(ctx, &authenticateRequest) +} + +func handleSignatureAuth(ctx context.Context, selector pool.Selectable[gatewayv1beta1.GatewayAPIClient], token, sig, expiration string) (*gatewayv1beta1.AuthenticateResponse, error) { + c, err := selector.Next() + if err != nil { + return nil, err + } + authenticateRequest := gatewayv1beta1.AuthenticateRequest{ + Type: "publicshares", + ClientId: token, + ClientSecret: "signature|" + sig + "|" + expiration, + } + + return c.Authenticate(ctx, &authenticateRequest) +} + +func handleOCMAuth(ctx context.Context, c gatewayv1beta1.GatewayAPIClient, ocmshare, sharedSecret string) (*gatewayv1beta1.AuthenticateResponse, error) { + return c.Authenticate(ctx, &gatewayv1beta1.AuthenticateRequest{ + Type: "ocmshares", + ClientId: ocmshare, + ClientSecret: sharedSecret, + }) +} diff --git a/services/webdav/pkg/ocdav/delete.go b/services/webdav/pkg/ocdav/delete.go new file mode 100644 index 0000000000..e44b219f16 --- /dev/null +++ b/services/webdav/pkg/ocdav/delete.go @@ -0,0 +1,149 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "errors" + "net/http" + "path" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/errtypes" + rstatus "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" + "github.com/opencloud-eu/reva/v2/pkg/utils" +) + +func (s *svc) handlePathDelete(w http.ResponseWriter, r *http.Request, ns string) (status int, err error) { + ctx := r.Context() + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(ctx, "path_delete") + defer span.End() + + if !isBodyEmpty(r) { + return http.StatusUnsupportedMediaType, errors.New("body must be empty") + } + + fn := path.Join(ns, r.URL.Path) + + space, rpcStatus, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, fn) + switch { + case err != nil: + span.RecordError(err) + return http.StatusInternalServerError, err + case rpcStatus.Code != rpc.Code_CODE_OK: + return rstatus.HTTPStatusFromCode(rpcStatus.Code), errtypes.NewErrtypeFromStatus(rpcStatus) + } + + return s.handleDelete(ctx, w, r, spacelookup.MakeRelativeReference(space, fn, false)) +} + +func (s *svc) handleDelete(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference) (status int, err error) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(ctx, "delete") + defer span.End() + + req := &provider.DeleteRequest{ + Ref: ref, + LockId: requestLock(r), + } + + // FIXME the lock token is part of the application level protocol, it should be part of the DeleteRequest message not the opaque + ih, ok := parseIfHeader(r.Header.Get(net.HeaderIf)) + if ok { + if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 { + req.Opaque = utils.AppendPlainToOpaque(req.Opaque, "lockid", ih.lists[0].conditions[0].Token) + } + } else if r.Header.Get(net.HeaderIf) != "" { + return http.StatusBadRequest, errtypes.BadRequest("invalid if header") + } + + client, err := s.gatewaySelector.Next() + if err != nil { + return http.StatusInternalServerError, errtypes.InternalError(err.Error()) + } + + res, err := client.Delete(ctx, req) + switch { + case err != nil: + span.RecordError(err) + return http.StatusInternalServerError, err + case res.Status.Code == rpc.Code_CODE_OK: + return http.StatusNoContent, nil + case res.Status.Code == rpc.Code_CODE_NOT_FOUND: + //lint:ignore ST1005 mimic the exact oc10 error message + return http.StatusNotFound, errors.New("Resource not found") + case res.Status.Code == rpc.Code_CODE_PERMISSION_DENIED: + status = http.StatusForbidden + if lockID := utils.ReadPlainFromOpaque(res.Opaque, "lockid"); lockID != "" { + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We add angle brackets. + w.Header().Set("Lock-Token", "<"+lockID+">") + status = http.StatusLocked + } + // check if user has access to resource + sRes, err := client.Stat(ctx, &provider.StatRequest{Ref: ref}) + if err != nil { + span.RecordError(err) + return http.StatusInternalServerError, err + } + if sRes.Status.Code != rpc.Code_CODE_OK { + // return not found error so we do not leak existence of a file + // TODO hide permission failed for users without access in every kind of request + // TODO should this be done in the driver? + //lint:ignore ST1005 mimic the exact oc10 error message + return http.StatusNotFound, errors.New("Resource not found") + } + return status, errors.New("") // mimic the oc10 error messages which have an empty message in this case + case res.Status.Code == rpc.Code_CODE_INTERNAL && res.Status.Message == "can't delete mount path": + // 405 must generate an Allow header + w.Header().Set("Allow", "PROPFIND, MOVE, COPY, POST, PROPPATCH, HEAD, GET, OPTIONS, LOCK, UNLOCK, REPORT, SEARCH, PUT") + return http.StatusMethodNotAllowed, errtypes.PermissionDenied(res.Status.Message) + } + return rstatus.HTTPStatusFromCode(res.Status.Code), errtypes.NewErrtypeFromStatus(res.Status) +} + +func (s *svc) handleSpacesDelete(w http.ResponseWriter, r *http.Request, spaceID string) (status int, err error) { + ctx := r.Context() + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(ctx, "spaces_delete") + defer span.End() + + if !isBodyEmpty(r) { + return http.StatusUnsupportedMediaType, errors.New("body must be empty") + } + + ref, err := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) + if err != nil { + return http.StatusBadRequest, err + } + + // do not allow deleting spaces via dav endpoint - use graph endpoint instead + // we get a relative reference coming from the space root + // so if the path is "empty" and no opaque id is present or the opaque id equals + // the space id, we are referencing the space + rid := ref.GetResourceId() + if ref.GetPath() == "." && + (rid.GetOpaqueId() == "" || rid.GetOpaqueId() == rid.GetSpaceId()) { + return http.StatusMethodNotAllowed, errors.New("deleting spaces via dav is not allowed") + } + + return s.handleDelete(ctx, w, r, &ref) +} diff --git a/services/webdav/pkg/ocdav/errors/error.go b/services/webdav/pkg/ocdav/errors/error.go new file mode 100644 index 0000000000..437539cafc --- /dev/null +++ b/services/webdav/pkg/ocdav/errors/error.go @@ -0,0 +1,214 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package errors + +import ( + "bytes" + "encoding/xml" + "net/http" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" + "github.com/pkg/errors" + "github.com/rs/zerolog" +) + +var sabreException = map[int]string{ + + // the commented states have no corresponding exception in sabre/dav, + // see https://github.com/sabre-io/dav/tree/master/lib/DAV/Exception + + // http.StatusMultipleChoices: "Multiple Choices", + // http.StatusMovedPermanently: "Moved Permanently", + // http.StatusFound: "Found", + // http.StatusSeeOther: "See Other", + // http.StatusNotModified: "Not Modified", + // http.StatusUseProxy: "Use Proxy", + // http.StatusTemporaryRedirect: "Temporary Redirect", + // http.StatusPermanentRedirect: "Permanent Redirect", + + http.StatusBadRequest: "Sabre\\DAV\\Exception\\BadRequest", + http.StatusUnauthorized: "Sabre\\DAV\\Exception\\NotAuthenticated", + http.StatusPaymentRequired: "Sabre\\DAV\\Exception\\PaymentRequired", + http.StatusForbidden: "Sabre\\DAV\\Exception\\Forbidden", // InvalidResourceType, InvalidSyncToken, TooManyMatches + http.StatusNotFound: "Sabre\\DAV\\Exception\\NotFound", + http.StatusMethodNotAllowed: "Sabre\\DAV\\Exception\\MethodNotAllowed", + // http.StatusNotAcceptable: "Not Acceptable", + // http.StatusProxyAuthRequired: "Proxy Authentication Required", + // http.StatusRequestTimeout: "Request Timeout", + http.StatusConflict: "Sabre\\DAV\\Exception\\Conflict", // LockTokenMatchesRequestUri + // http.StatusGone: "Gone", + http.StatusLengthRequired: "Sabre\\DAV\\Exception\\LengthRequired", + http.StatusPreconditionFailed: "Sabre\\DAV\\Exception\\PreconditionFailed", + // http.StatusRequestEntityTooLarge: "Request Entity Too Large", + // http.StatusRequestURITooLong: "Request URI Too Long", + http.StatusUnsupportedMediaType: "Sabre\\DAV\\Exception\\UnsupportedMediaType", // ReportNotSupported + http.StatusRequestedRangeNotSatisfiable: "Sabre\\DAV\\Exception\\RequestedRangeNotSatisfiable", + // http.StatusExpectationFailed: "Expectation Failed", + // http.StatusTeapot: "I'm a teapot", + // http.StatusMisdirectedRequest: "Misdirected Request", + // http.StatusUnprocessableEntity: "Unprocessable Entity", + http.StatusLocked: "Sabre\\DAV\\Exception\\Locked", // ConflictingLock + // http.StatusFailedDependency: "Failed Dependency", + // http.StatusTooEarly: "Too Early", + // http.StatusUpgradeRequired: "Upgrade Required", + // http.StatusPreconditionRequired: "Precondition Required", + // http.StatusTooManyRequests: "Too Many Requests", + // http.StatusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large", + // http.StatusUnavailableForLegalReasons: "Unavailable For Legal Reasons", + + // http.StatusInternalServerError: "Internal Server Error", + http.StatusNotImplemented: "Sabre\\DAV\\Exception\\NotImplemented", + // http.StatusBadGateway: "Bad Gateway", + http.StatusServiceUnavailable: "Sabre\\DAV\\Exception\\ServiceUnavailable", + // http.StatusGatewayTimeout: "Gateway Timeout", + // http.StatusHTTPVersionNotSupported: "HTTP Version Not Supported", + // http.StatusVariantAlsoNegotiates: "Variant Also Negotiates", + http.StatusInsufficientStorage: "Sabre\\DAV\\Exception\\InsufficientStorage", + // http.StatusLoopDetected: "Loop Detected", + // http.StatusNotExtended: "Not Extended", + // http.StatusNetworkAuthenticationRequired: "Network Authentication Required", +} + +// SabreException returns a sabre exception text for the HTTP status code. It returns the empty +// string if the code is unknown. +func SabreException(code int) string { + return sabreException[code] +} + +// Exception represents a ocdav exception +type Exception struct { + Code int + Message string + Header string +} + +// Marshal just calls the xml marshaller for a given exception. +func Marshal(code int, message string, header string, errorCode string) ([]byte, error) { + xmlstring, err := xml.Marshal(&ErrorXML{ + Xmlnsd: "DAV", + Xmlnss: "http://sabredav.org/ns", + Exception: sabreException[code], + Message: message, + Header: header, + ErrorCode: errorCode, + }) + if err != nil { + return nil, err + } + var buf bytes.Buffer + buf.WriteString(xml.Header) + buf.Write(xmlstring) + return buf.Bytes(), err +} + +// ErrorXML holds the xml representation of an error +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error +type ErrorXML struct { + XMLName xml.Name `xml:"d:error"` + Xmlnsd string `xml:"xmlns:d,attr"` + Xmlnss string `xml:"xmlns:s,attr"` + Exception string `xml:"s:exception"` + Message string `xml:"s:message"` + ErrorCode string `xml:"s:errorcode"` + InnerXML []byte `xml:",innerxml"` + // Header is used to indicate the conflicting request header + Header string `xml:"s:header,omitempty"` +} + +var ( + // ErrInvalidDepth is an invalid depth header error + ErrInvalidDepth = errors.New("webdav: invalid depth") + // ErrInvalidPropfind is an invalid propfind error + ErrInvalidPropfind = errors.New("webdav: invalid propfind") + // ErrInvalidProppatch is an invalid proppatch error + ErrInvalidProppatch = errors.New("webdav: invalid proppatch") + // ErrInvalidLockInfo is an invalid lock error + ErrInvalidLockInfo = errors.New("webdav: invalid lock info") + // ErrUnsupportedLockInfo is an unsupported lock error + ErrUnsupportedLockInfo = errors.New("webdav: unsupported lock info") + // ErrInvalidTimeout is an invalid timeout error + ErrInvalidTimeout = errors.New("webdav: invalid timeout") + // ErrInvalidIfHeader is an invalid if header error + ErrInvalidIfHeader = errors.New("webdav: invalid If header") + // ErrUnsupportedMethod is an unsupported method error + ErrUnsupportedMethod = errors.New("webdav: unsupported method") + // ErrInvalidLockToken is an invalid lock token error + ErrInvalidLockToken = errors.New("webdav: invalid lock token") + // ErrConfirmationFailed is returned by a LockSystem's Confirm method. + ErrConfirmationFailed = errors.New("webdav: confirmation failed") + // ErrForbidden is returned by a LockSystem's Unlock method. + ErrForbidden = errors.New("webdav: forbidden") + // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods. + ErrLocked = errors.New("webdav: locked") + // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods. + ErrNoSuchLock = errors.New("webdav: no such lock") + // ErrNotImplemented is returned when hitting not implemented code paths + ErrNotImplemented = errors.New("webdav: not implemented") + // ErrTokenNotFound is returned when a token is not found + ErrTokenStatInfoMissing = errors.New("webdav: token stat info missing") +) + +// HandleErrorStatus checks the status code, logs a Debug or Error level message +// and writes an appropriate http status +func HandleErrorStatus(log *zerolog.Logger, w http.ResponseWriter, s *rpc.Status) { + hsc := status.HTTPStatusFromCode(s.Code) + if s.Code == rpc.Code_CODE_ABORTED { + // aborted is used for etag an lock mismatches, which translates to 412 + // in case a real Conflict response is needed, the calling code needs to send the header + hsc = http.StatusPreconditionFailed + } + if hsc == http.StatusInternalServerError { + log.Error().Interface("status", s).Int("code", hsc).Msg(http.StatusText(hsc)) + } else { + log.Debug().Interface("status", s).Int("code", hsc).Msg(http.StatusText(hsc)) + } + w.WriteHeader(hsc) +} + +// HandleWebdavError checks the status code, logs an error and creates a webdav response body +// if needed +func HandleWebdavError(log *zerolog.Logger, w http.ResponseWriter, b []byte, err error) { + if err != nil { + log.Error().Msgf("error marshaling xml response: %s", b) + w.WriteHeader(http.StatusInternalServerError) + return + } + _, err = w.Write(b) + if err != nil { + log.Err(err).Msg("error writing response") + } +} + +func NewErrFromStatus(s *rpc.Status) error { + switch s.GetCode() { + case rpc.Code_CODE_OK: + return nil + case rpc.Code_CODE_DEADLINE_EXCEEDED: + return ErrInvalidTimeout + case rpc.Code_CODE_PERMISSION_DENIED: + return ErrForbidden + case rpc.Code_CODE_LOCKED, rpc.Code_CODE_FAILED_PRECONDITION: + return ErrLocked + case rpc.Code_CODE_UNIMPLEMENTED: + return ErrNotImplemented + default: + return errors.New(s.GetMessage()) + } +} diff --git a/services/webdav/pkg/ocdav/filedrop.go b/services/webdav/pkg/ocdav/filedrop.go new file mode 100644 index 0000000000..53cb41799f --- /dev/null +++ b/services/webdav/pkg/ocdav/filedrop.go @@ -0,0 +1,47 @@ +package ocdav + +import ( + "context" + "errors" + "path/filepath" + "strconv" + "strings" + + gatewayv1beta1 "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" +) + +// FindName returns the next filename available when the current +func FindName(ctx context.Context, client gatewayv1beta1.GatewayAPIClient, name string, parentid *provider.ResourceId) (string, *rpc.Status, error) { + lReq := &provider.ListContainerRequest{ + Ref: &provider.Reference{ + ResourceId: parentid, + }, + } + lRes, err := client.ListContainer(ctx, lReq) + if err != nil { + return "", nil, err + } + if lRes.Status.Code != rpc.Code_CODE_OK { + return "", lRes.Status, nil + } + // iterate over the listing to determine next suffix + var itemMap = make(map[string]struct{}) + for _, fi := range lRes.Infos { + itemMap[fi.GetName()] = struct{}{} + } + ext := filepath.Ext(name) + fileName := strings.TrimSuffix(name, ext) + if strings.HasSuffix(fileName, ".tar") { + fileName = strings.TrimSuffix(fileName, ".tar") + ext = filepath.Ext(fileName) + "." + ext + } + // starts with two because "normal" humans begin counting with 1 and we say the existing file is the first one + for i := 2; i < len(itemMap)+3; i++ { + if _, ok := itemMap[fileName+" ("+strconv.Itoa(i)+")"+ext]; !ok { + return fileName + " (" + strconv.Itoa(i) + ")" + ext, lRes.GetStatus(), nil + } + } + return "", nil, errors.New("could not determine new filename") +} diff --git a/services/webdav/pkg/ocdav/get.go b/services/webdav/pkg/ocdav/get.go new file mode 100644 index 0000000000..78cf751ea7 --- /dev/null +++ b/services/webdav/pkg/ocdav/get.go @@ -0,0 +1,189 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "io" + "net/http" + "path" + "strconv" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/rhttp" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/rs/zerolog" +) + +func (s *svc) handlePathGet(w http.ResponseWriter, r *http.Request, ns string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "get") + defer span.End() + + fn := path.Join(ns, r.URL.Path) + + sublog := appctx.GetLogger(ctx).With().Str("path", fn).Str("svc", "ocdav").Str("handler", "get").Logger() + + space, status, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, fn) + if err != nil { + sublog.Error().Err(err).Str("path", fn).Msg("failed to look up storage space") + w.WriteHeader(http.StatusInternalServerError) + return + } + if status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&sublog, w, status) + return + } + + s.handleGet(ctx, w, r, spacelookup.MakeRelativeReference(space, fn, false), "spaces", sublog) +} + +func (s *svc) handleGet(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference, dlProtocol string, log zerolog.Logger) { + client, err := s.gatewaySelector.Next() + if err != nil { + log.Error().Err(err).Msg("error selecting next client") + w.WriteHeader(http.StatusInternalServerError) + return + } + sReq := &provider.StatRequest{ + Ref: ref, + } + sRes, err := client.Stat(ctx, sReq) + if err != nil { + log.Error().Err(err).Msg("error stat resource") + w.WriteHeader(http.StatusInternalServerError) + return + } else if sRes.Status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&log, w, sRes.Status) + return + } + + if sRes.Info.Type != provider.ResourceType_RESOURCE_TYPE_FILE { + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusOK) + return + } + + if status := utils.ReadPlainFromOpaque(sRes.GetInfo().GetOpaque(), "status"); status == "processing" { + w.WriteHeader(http.StatusTooEarly) + return + } + + dReq := &provider.InitiateFileDownloadRequest{Ref: ref} + dRes, err := client.InitiateFileDownload(ctx, dReq) + switch { + case err != nil: + log.Error().Err(err).Msg("error initiating file download") + w.WriteHeader(http.StatusInternalServerError) + return + case dRes.Status.Code != rpc.Code_CODE_OK: + errors.HandleErrorStatus(&log, w, dRes.Status) + return + } + + var ep, token string + for _, p := range dRes.Protocols { + if p.Protocol == dlProtocol { + ep, token = p.DownloadEndpoint, p.Token + } + } + + httpReq, err := rhttp.NewRequest(ctx, http.MethodGet, ep, nil) + if err != nil { + log.Error().Err(err).Msg("error creating http request") + w.WriteHeader(http.StatusInternalServerError) + return + } + httpReq.Header.Set(TokenTransportHeader, token) + + if r.Header.Get(net.HeaderRange) != "" { + httpReq.Header.Set(net.HeaderRange, r.Header.Get(net.HeaderRange)) + } + + httpClient := s.client + + httpRes, err := httpClient.Do(httpReq) + if err != nil { + log.Error().Err(err).Msg("error performing http request") + w.WriteHeader(http.StatusInternalServerError) + return + } + defer httpRes.Body.Close() + + // copy only the headers relevant for the content served by the datagateway + // more headers are already present from the GET request + copyHeader(w.Header(), httpRes.Header, net.HeaderContentType) + copyHeader(w.Header(), httpRes.Header, net.HeaderContentLength) + copyHeader(w.Header(), httpRes.Header, net.HeaderContentRange) + copyHeader(w.Header(), httpRes.Header, net.HeaderOCFileID) + copyHeader(w.Header(), httpRes.Header, net.HeaderOCETag) + copyHeader(w.Header(), httpRes.Header, net.HeaderOCChecksum) + copyHeader(w.Header(), httpRes.Header, net.HeaderETag) + copyHeader(w.Header(), httpRes.Header, net.HeaderLastModified) + copyHeader(w.Header(), httpRes.Header, net.HeaderAcceptRanges) + copyHeader(w.Header(), httpRes.Header, net.HeaderContentDisposistion) + + w.WriteHeader(httpRes.StatusCode) + + if httpRes.StatusCode != http.StatusOK && httpRes.StatusCode != http.StatusPartialContent { + // swallow the body and set content-length to 0 to prevent reverse proxies from trying to read from it + w.Header().Set("Content-Length", "0") + return + } + + var c int64 + if c, err = io.Copy(w, httpRes.Body); err != nil { + log.Error().Err(err).Msg("error finishing copying data to response") + } + if httpRes.Header.Get(net.HeaderContentLength) != "" { + i, err := strconv.ParseInt(httpRes.Header.Get(net.HeaderContentLength), 10, 64) + if err != nil { + log.Error().Err(err).Str("content-length", httpRes.Header.Get(net.HeaderContentLength)).Msg("invalid content length in datagateway response") + } + if i != c { + log.Error().Int64("content-length", i).Int64("transferred-bytes", c).Msg("content length vs transferred bytes mismatch") + } + } + // TODO we need to send the If-Match etag in the GET to the datagateway to prevent race conditions between stating and reading the file +} + +func copyHeader(dist, src http.Header, header string) { + if src.Get(header) != "" { + dist.Set(header, src.Get(header)) + } +} + +func (s *svc) handleSpacesGet(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "spaces_get") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("spaceid", spaceID).Str("handler", "get").Logger() + + ref, err := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + s.handleGet(ctx, w, r, &ref, "spaces", sublog) +} diff --git a/services/webdav/pkg/ocdav/head.go b/services/webdav/pkg/ocdav/head.go new file mode 100644 index 0000000000..de773fa07c --- /dev/null +++ b/services/webdav/pkg/ocdav/head.go @@ -0,0 +1,120 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "fmt" + "net/http" + "path" + "strconv" + "strings" + "time" + + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/xs" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/rs/zerolog" +) + +func (s *svc) handlePathHead(w http.ResponseWriter, r *http.Request, ns string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "head") + defer span.End() + + fn := path.Join(ns, r.URL.Path) + + sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() + + space, status, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, fn) + if err != nil { + sublog.Error().Err(err).Str("path", fn).Msg("failed to look up storage space") + w.WriteHeader(http.StatusInternalServerError) + return + } + if status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&sublog, w, status) + return + } + + s.handleHead(ctx, w, r, spacelookup.MakeRelativeReference(space, fn, false), sublog) +} + +func (s *svc) handleHead(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference, log zerolog.Logger) { + client, err := s.gatewaySelector.Next() + if err != nil { + log.Error().Err(err).Msg("error selecting next client") + w.WriteHeader(http.StatusInternalServerError) + return + } + req := &provider.StatRequest{Ref: ref} + res, err := client.Stat(ctx, req) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if res.Status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&log, w, res.Status) + return + } + + info := res.Info + w.Header().Set(net.HeaderContentType, info.MimeType) + w.Header().Set(net.HeaderETag, info.Etag) + w.Header().Set(net.HeaderOCFileID, storagespace.FormatResourceID(info.Id)) + w.Header().Set(net.HeaderOCETag, info.Etag) + if info.Checksum != nil { + w.Header().Set(net.HeaderOCChecksum, fmt.Sprintf("%s:%s", strings.ToUpper(string(xs.GRPC2PKGXS(info.Checksum.Type))), info.Checksum.Sum)) + } + t := utils.TSToTime(info.Mtime).UTC() + lastModifiedString := t.Format(time.RFC1123Z) + w.Header().Set(net.HeaderLastModified, lastModifiedString) + w.Header().Set(net.HeaderContentLength, strconv.FormatUint(info.Size, 10)) + if info.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER { + w.Header().Set(net.HeaderAcceptRanges, "bytes") + } + if utils.ReadPlainFromOpaque(res.GetInfo().GetOpaque(), "status") == "processing" { + w.WriteHeader(http.StatusTooEarly) + return + } + w.WriteHeader(http.StatusOK) +} + +func (s *svc) handleSpacesHead(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "spaces_head") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("path", r.URL.Path).Logger() + + ref, err := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + s.handleHead(ctx, w, r, &ref, sublog) +} diff --git a/services/webdav/pkg/ocdav/if.go b/services/webdav/pkg/ocdav/if.go new file mode 100644 index 0000000000..c331fcbc98 --- /dev/null +++ b/services/webdav/pkg/ocdav/if.go @@ -0,0 +1,193 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ocdav + +// copy of https://github.com/golang/net/blob/master/webdav/if.go + +// The If header is covered by Section 10.4. +// http://www.webdav.org/specs/rfc4918.html#HEADER_If + +import ( + "strings" +) + +// ifHeader is a disjunction (OR) of ifLists. +type ifHeader struct { + lists []ifList +} + +// ifList is a conjunction (AND) of Conditions, and an optional resource tag. +type ifList struct { + resourceTag string + conditions []Condition +} + +// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string +// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is +// returned by req.Header.Get("If") for a http.Request req. +func parseIfHeader(httpHeader string) (h ifHeader, ok bool) { + s := strings.TrimSpace(httpHeader) + switch tokenType, _, _ := lex(s); tokenType { + case '(': + return parseNoTagLists(s) + case angleTokenType: + return parseTaggedLists(s) + default: + return ifHeader{}, false + } +} + +func parseNoTagLists(s string) (h ifHeader, ok bool) { + for { + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + } +} + +func parseTaggedLists(s string) (h ifHeader, ok bool) { + resourceTag, n := "", 0 + for first := true; ; first = false { + tokenType, tokenStr, remaining := lex(s) + switch tokenType { + case angleTokenType: + if !first && n == 0 { + return ifHeader{}, false + } + resourceTag, n = tokenStr, 0 + s = remaining + case '(': + n++ + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + l.resourceTag = resourceTag + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + default: + return ifHeader{}, false + } + } +} + +func parseList(s string) (l ifList, remaining string, ok bool) { + tokenType, _, s := lex(s) + if tokenType != '(' { + return ifList{}, "", false + } + for { + tokenType, _, remaining = lex(s) + if tokenType == ')' { + if len(l.conditions) == 0 { + return ifList{}, "", false + } + return l, remaining, true + } + c, remaining, ok := parseCondition(s) + if !ok { + return ifList{}, "", false + } + l.conditions = append(l.conditions, c) + s = remaining + } +} + +func parseCondition(s string) (c Condition, remaining string, ok bool) { + tokenType, tokenStr, s := lex(s) + if tokenType == notTokenType { + c.Not = true + tokenType, tokenStr, s = lex(s) + } + switch tokenType { + case strTokenType, angleTokenType: + c.Token = tokenStr + case squareTokenType: + c.ETag = tokenStr + default: + return Condition{}, "", false + } + return c, s, true +} + +// Single-rune tokens like '(' or ')' have a token type equal to their rune. +// All other tokens have a negative token type. +const ( + errTokenType = rune(-1) + eofTokenType = rune(-2) + strTokenType = rune(-3) + notTokenType = rune(-4) + angleTokenType = rune(-5) + squareTokenType = rune(-6) +) + +func lex(s string) (tokenType rune, tokenStr string, remaining string) { + // The net/textproto Reader that parses the HTTP header will collapse + // Linear White Space that spans multiple "\r\n" lines to a single " ", + // so we don't need to look for '\r' or '\n'. + for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') { + s = s[1:] + } + if len(s) == 0 { + return eofTokenType, "", "" + } + i := 0 +loop: + for ; i < len(s); i++ { + switch s[i] { + case '\t', ' ', '(', ')', '<', '>', '[', ']': + break loop + } + } + + if i != 0 { + tokenStr, remaining = s[:i], s[i:] + if tokenStr == "Not" { + return notTokenType, "", remaining + } + return strTokenType, tokenStr, remaining + } + + j := 0 + switch s[0] { + case '<': + j, tokenType = strings.IndexByte(s, '>'), angleTokenType + case '[': + j, tokenType = strings.IndexByte(s, ']'), squareTokenType + default: + return rune(s[0]), "", s[1:] + } + if j < 0 { + return errTokenType, "", "" + } + return tokenType, s[1:j], s[j+1:] +} diff --git a/services/webdav/pkg/ocdav/if_test.go b/services/webdav/pkg/ocdav/if_test.go new file mode 100644 index 0000000000..fcdcb6780e --- /dev/null +++ b/services/webdav/pkg/ocdav/if_test.go @@ -0,0 +1,338 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +// copy of https://github.com/golang/net/blob/master/webdav/if_test.go + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseIfHeader(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + want ifHeader + }{{ + "bad: empty", + ``, + ifHeader{}, + }, { + "bad: no parens", + `foobar`, + ifHeader{}, + }, { + "bad: empty list #1", + `()`, + ifHeader{}, + }, { + "bad: empty list #2", + `(a) (b c) () (d)`, + ifHeader{}, + }, { + "bad: no list after resource #1", + ``, + ifHeader{}, + }, { + "bad: no list after resource #2", + ` (a)`, + ifHeader{}, + }, { + "bad: no list after resource #3", + ` (a) (b) `, + ifHeader{}, + }, { + "bad: no-tag-list followed by tagged-list", + `(a) (b) (c)`, + ifHeader{}, + }, { + "bad: unfinished list", + `(a`, + ifHeader{}, + }, { + "bad: unfinished ETag", + `([b`, + ifHeader{}, + }, { + "bad: unfinished Notted list", + `(Not a`, + ifHeader{}, + }, { + "bad: double Not", + `(Not Not a)`, + ifHeader{}, + }, { + "good: one list with a Token", + `(a)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }}, + }, + }, { + "good: one list with an ETag", + `([a])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + ETag: `a`, + }}, + }}, + }, + }, { + "good: one list with three Nots", + `(Not a Not b Not [d])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }, { + Not: true, + Token: `b`, + }, { + Not: true, + ETag: `d`, + }}, + }}, + }, + }, { + "good: two lists", + `(a) (b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Token: `b`, + }}, + }}, + }, + }, { + "good: two Notted lists", + `(Not a) (Not b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `b`, + }}, + }}, + }, + }, { + "section 7.5.1", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/users/f/fielding/index.html`, + conditions: []Condition{{ + Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`, + }}, + }}, + }, + }, { + "section 7.5.2 #1", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #2", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #3", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/member`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 9.9.6", + `() + ()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`, + }}, + }, { + conditions: []Condition{{ + Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`, + }}, + }}, + }, + }, { + "section 9.10.8", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`, + }}, + }}, + }, + }, { + "section 10.4.6", + `( + ["I am an ETag"]) + (["I am another ETag"])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `"I am an ETag"`, + }}, + }, { + conditions: []Condition{{ + ETag: `"I am another ETag"`, + }}, + }}, + }, + }, { + "section 10.4.7", + `(Not + )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`, + }}, + }}, + }, + }, { + "section 10.4.8", + `() + (Not )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `DAV:no-lock`, + }}, + }}, + }, + }, { + "section 10.4.9", + ` + ( + [W/"A weak ETag"]) (["strong ETag"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/resource1`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `W/"A weak ETag"`, + }}, + }, { + resourceTag: `/resource1`, + conditions: []Condition{{ + ETag: `"strong ETag"`, + }}, + }}, + }, + }, { + "section 10.4.10", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/specs/`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }}, + }, + }, { + "section 10.4.11 #1", + ` (["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + ETag: `"4217"`, + }}, + }}, + }, + }, { + "section 10.4.11 #2", + ` (Not ["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + Not: true, + ETag: `"4217"`, + }}, + }}, + }, + }} + + for _, tc := range testCases { + got, ok := parseIfHeader(strings.ReplaceAll(tc.input, "\n", "")) + if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok { + t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok) + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want) + continue + } + } +} diff --git a/services/webdav/pkg/ocdav/locks.go b/services/webdav/pkg/ocdav/locks.go new file mode 100644 index 0000000000..37e8cb00a5 --- /dev/null +++ b/services/webdav/pkg/ocdav/locks.go @@ -0,0 +1,707 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "path" + "regexp" + "strconv" + "strings" + "time" + + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/google/uuid" + ocdavErrors "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/prop" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "go.opentelemetry.io/otel/attribute" +) + +// Most of this is taken from https://github.com/golang/net/blob/master/webdav/lock.go + +// From RFC4918 http://www.webdav.org/specs/rfc4918.html#lock-tokens +// This specification encourages servers to create Universally Unique Identifiers (UUIDs) for lock tokens, +// and to use the URI form defined by "A Universally Unique Identifier (UUID) URN Namespace" ([RFC4122]). +// However, servers are free to use any URI (e.g., from another scheme) so long as it meets the uniqueness +// requirements. For example, a valid lock token might be constructed using the "opaquelocktoken" scheme +// defined in Appendix C. +// +// Example: "urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6" +// +// we stick to the recommendation and use the URN Namespace +const lockTokenPrefix = "urn:uuid:" + +var requestLockRegex = regexp.MustCompile(`\(<(urn:uuid:[0-9a-fA-F-]+)>\)`) + +// TODO(jfd) implement lock +// see Web Distributed Authoring and Versioning (WebDAV) Locking Protocol: +// https://www.greenbytes.de/tech/webdav/draft-reschke-webdav-locking-latest.html +// Webdav supports a Depth: infinity lock, wopi only needs locks on files + +// https://www.greenbytes.de/tech/webdav/draft-reschke-webdav-locking-latest.html#write.locks.and.the.if.request.header +// [...] a lock token MUST be submitted in the If header for all locked resources +// that a method may interact with or the method MUST fail. [...] +/* + COPY /~fielding/index.html HTTP/1.1 + Host: example.com + Destination: http://example.com/users/f/fielding/index.html + If: + () +*/ + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo +type lockInfo struct { + XMLName xml.Name `xml:"lockinfo"` + Exclusive *struct{} `xml:"lockscope>exclusive"` + Shared *struct{} `xml:"lockscope>shared"` + Write *struct{} `xml:"locktype>write"` + Owner owner `xml:"owner"` + LockID string `xml:"locktoken>href"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner +type owner struct { + InnerXML string `xml:",innerxml"` +} + +// Condition can match a WebDAV resource, based on a token or ETag. +// Exactly one of Token and ETag should be non-empty. +type Condition struct { + Not bool + Token string + ETag string +} + +// LockSystem manages access to a collection of named resources. The elements +// in a lock name are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +type LockSystem interface { + // Confirm confirms that the caller can claim all of the locks specified by + // the given conditions, and that holding the union of all of those locks + // gives exclusive access to all of the named resources. Up to two resources + // can be named. Empty names are ignored. + // + // Exactly one of release and err will be non-nil. If release is non-nil, + // all of the requested locks are held until release is called. Calling + // release does not unlock the lock, in the WebDAV UNLOCK sense, but once + // Confirm has confirmed that a lock claim is valid, that lock cannot be + // Confirmed again until it has been released. + // + // If Confirm returns ErrConfirmationFailed then the Handler will continue + // to try any other set of locks presented (a WebDAV HTTP request can + // present more than one set of locks). If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + Confirm(ctx context.Context, now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error) + + // Create creates a lock with the given depth, duration, owner and root + // (name). The depth will either be negative (meaning infinite) or zero. + // + // If Create returns ErrLocked then the Handler will write a "423 Locked" + // HTTP status. If it returns any other non-nil error, the Handler will + // write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + // + // The token returned identifies the created lock. It should be an absolute + // URI as defined by RFC 3986, Section 4.3. In particular, it should not + // contain whitespace. + Create(ctx context.Context, now time.Time, details LockDetails) (token string, err error) + + // Refresh refreshes the lock with the given token. + // + // If Refresh returns ErrLocked then the Handler will write a "423 Locked" + // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write + // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + Refresh(ctx context.Context, now time.Time, ref *provider.Reference, token string) error + + // Unlock unlocks the lock with the given token. + // + // If Unlock returns ErrForbidden then the Handler will write a "403 + // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler + // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock + // then the Handler will write a "409 Conflict" HTTP Status. If it returns + // any other non-nil error, the Handler will write a "500 Internal Server + // Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for + // when to use each error. + Unlock(ctx context.Context, now time.Time, ref *provider.Reference, token string) error +} + +// NewCS3LS returns a new CS3 based LockSystem. +func NewCS3LS(s pool.Selectable[gateway.GatewayAPIClient]) LockSystem { + return &cs3LS{ + selector: s, + } +} + +type cs3LS struct { + selector pool.Selectable[gateway.GatewayAPIClient] +} + +func (cls *cs3LS) Confirm(ctx context.Context, now time.Time, name0, name1 string, conditions ...Condition) (func(), error) { + return nil, ocdavErrors.ErrNotImplemented +} + +func (cls *cs3LS) Create(ctx context.Context, now time.Time, details LockDetails) (string, error) { + // always assume depth infinity? + /* + if !details.ZeroDepth { + The CS3 Lock api currently has no depth property, it only locks single resources + return "", ocdavErrors.ErrUnsupportedLockInfo + } + */ + + u := ctxpkg.ContextMustGetUser(ctx) + + // add metadata via opaque + // TODO: upate cs3api: https://github.com/cs3org/cs3apis/issues/213 + o := utils.AppendPlainToOpaque(nil, "lockownername", u.GetDisplayName()) + o = utils.AppendPlainToOpaque(o, "locktime", now.Format(time.RFC3339)) + + lockid := details.LockID + if lockid == "" { + // Having a lock token provides no special access rights. Anyone can find out anyone + // else's lock token by performing lock discovery. Locks must be enforced based upon + // whatever authentication mechanism is used by the server, not based on the secrecy + // of the token values. + // see: http://www.webdav.org/specs/rfc2518.html#n-lock-tokens + token := uuid.New() + + lockid = lockTokenPrefix + token.String() + } + r := &provider.SetLockRequest{ + Ref: details.Root, + Lock: &provider.Lock{ + Opaque: o, + Type: provider.LockType_LOCK_TYPE_EXCL, + User: details.UserID, // no way to set an app lock? TODO maybe via the ownerxml + //AppName: , // TODO use a urn scheme? + LockId: lockid, + }, + } + if details.Duration > 0 { + expiration := time.Now().UTC().Add(details.Duration) + r.Lock.Expiration = &types.Timestamp{ + Seconds: uint64(expiration.Unix()), + Nanos: uint32(expiration.Nanosecond()), + } + } + + client, err := cls.selector.Next() + if err != nil { + return "", err + } + + res, err := client.SetLock(ctx, r) + if err != nil { + return "", err + } + switch res.GetStatus().GetCode() { + case rpc.Code_CODE_OK: + return lockid, nil + default: + return "", ocdavErrors.NewErrFromStatus(res.GetStatus()) + } + +} + +func (cls *cs3LS) Refresh(ctx context.Context, now time.Time, ref *provider.Reference, token string) error { + u := ctxpkg.ContextMustGetUser(ctx) + + // add metadata via opaque + // TODO: upate cs3api: https://github.com/cs3org/cs3apis/issues/213 + o := utils.AppendPlainToOpaque(nil, "lockownername", u.GetDisplayName()) + o = utils.AppendPlainToOpaque(o, "locktime", now.Format(time.RFC3339)) + + if token == "" { + return errors.New("token is empty") + } + + r := &provider.RefreshLockRequest{ + Ref: ref, + Lock: &provider.Lock{ + Opaque: o, + Type: provider.LockType_LOCK_TYPE_EXCL, + //AppName: , // TODO use a urn scheme? + LockId: token, + User: u.GetId(), + }, + } + + client, err := cls.selector.Next() + if err != nil { + return err + } + + res, err := client.RefreshLock(ctx, r) + if err != nil { + return err + } + switch res.GetStatus().GetCode() { + case rpc.Code_CODE_OK: + return nil + + default: + return ocdavErrors.NewErrFromStatus(res.GetStatus()) + } +} + +func (cls *cs3LS) Unlock(ctx context.Context, now time.Time, ref *provider.Reference, token string) error { + u := ctxpkg.ContextMustGetUser(ctx) + + r := &provider.UnlockRequest{ + Ref: ref, + Lock: &provider.Lock{ + LockId: token, // can be a token or a Coded-URL + User: u.Id, + }, + } + + client, err := cls.selector.Next() + if err != nil { + return err + } + + res, err := client.Unlock(ctx, r) + if err != nil { + return err + } + + newErr := ocdavErrors.NewErrFromStatus(res.GetStatus()) + if newErr != nil { + appctx.GetLogger(ctx).Error().Str("token", token).Interface("unlock", ref).Msg("could not unlock " + res.GetStatus().GetMessage()) + } + return newErr +} + +// LockDetails are a lock's metadata. +type LockDetails struct { + // Root is the root resource name being locked. For a zero-depth lock, the + // root is the only resource being locked. + Root *provider.Reference + // Duration is the lock timeout. A negative duration means infinite. + Duration time.Duration + // OwnerXML is the verbatim XML given in a LOCK HTTP request. + // + // TODO: does the "verbatim" nature play well with XML namespaces? + // Does the OwnerXML field need to have more structure? See + // https://codereview.appspot.com/175140043/#msg2 + OwnerXML string + UserID *userpb.UserId + // ZeroDepth is whether the lock has zero depth. If it does not have zero + // depth, it has infinite depth. + ZeroDepth bool + // OwnerName is the name of the lock owner + OwnerName string + // Locktime is the time the lock was created + Locktime time.Time + // LockID is the lock token + LockID string +} + +func readLockInfo(r io.Reader) (li lockInfo, status int, err error) { + c := &countingReader{r: r} + if err = xml.NewDecoder(c).Decode(&li); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to refresh the lock. + // http://www.webdav.org/specs/rfc4918.html#refreshing-locks + return lockInfo{}, 0, nil + } + err = ocdavErrors.ErrInvalidLockInfo + } + return lockInfo{}, http.StatusBadRequest, err + } + // We only support exclusive (non-shared) write locks. In practice, these are + // the only types of locks that seem to matter. + // We are ignoring the any properties in the lock details, and assume an exclusive write lock is requested. + // https://datatracker.ietf.org/doc/html/rfc4918#section-7 only describes write locks + // + // if li.Exclusive == nil || li.Shared != nil { + // return lockInfo{}, http.StatusNotImplemented, errors.ErrUnsupportedLockInfo + // } + // what should we return if the user requests a shared lock? or leaves out the locktype? the testsuite will only send the property lockscope, not locktype + // the oc tests cover both shared and exclusive locks. What is the WOPI lock? a shared or an exclusive lock? + // since it is issued by a service it seems to be an exclusive lock. + // the owner could be a link to the collaborative app ... to join the session + return li, 0, nil +} + +type countingReader struct { + n int + r io.Reader +} + +func (c *countingReader) Read(p []byte) (int, error) { + n, err := c.r.Read(p) + c.n += n + return n, err +} + +const infiniteTimeout = -1 + +// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is +// empty, an infiniteTimeout is returned. +func parseTimeout(s string) (time.Duration, error) { + if s == "" { + return infiniteTimeout, nil + } + if i := strings.IndexByte(s, ','); i >= 0 { + s = s[:i] + } + s = strings.TrimSpace(s) + if s == "Infinite" { + return infiniteTimeout, nil + } + const pre = "Second-" + if !strings.HasPrefix(s, pre) { + return 0, ocdavErrors.ErrInvalidTimeout + } + s = s[len(pre):] + if s == "" || s[0] < '0' || '9' < s[0] { + return 0, ocdavErrors.ErrInvalidTimeout + } + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || 1<<32-1 < n { + return 0, ocdavErrors.ErrInvalidTimeout + } + return time.Duration(n) * time.Second, nil +} + +const ( + infiniteDepth = -1 + invalidDepth = -2 +) + +// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and +// infiniteDepth. Parsing any other string returns invalidDepth. +// +// Different WebDAV methods have further constraints on valid depths: +// - PROPFIND has no further restrictions, as per section 9.1. +// - COPY accepts only "0" or "infinity", as per section 9.8.3. +// - MOVE accepts only "infinity", as per section 9.9.2. +// - LOCK accepts only "0" or "infinity", as per section 9.10.3. +// +// These constraints are enforced by the handleXxx methods. +func parseDepth(s string) int { + switch s { + case "0": + return 0 + case "1": + return 1 + case "infinity": + return infiniteDepth + } + return invalidDepth +} + +/* +the oc 10 wopi app code locks like this: + + $storage->lockNodePersistent($file->getInternalPath(), [ + 'token' => $wopiLock, + 'owner' => "{$user->getDisplayName()} via Office Online" + ]); + +if owner is empty it defaults to '{displayname} ({email})', which is not a url ... but ... shrug + +The LockManager also defaults to exclusive locks: + + $scope = ILock::LOCK_SCOPE_EXCLUSIVE; + if (isset($lockInfo['scope'])) { + $scope = $lockInfo['scope']; + } +*/ +func (s *svc) handleLock(w http.ResponseWriter, r *http.Request, ns string) (retStatus int, retErr error) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), fmt.Sprintf("%s %v", r.Method, r.URL.Path)) + defer span.End() + + span.SetAttributes(attribute.String("component", "ocdav")) + + fn := path.Join(ns, r.URL.Path) // TODO do we still need to jail if we query the registry about the spaces? + + // TODO instead of using a string namespace ns pass in the space with the request? + ref, cs3Status, err := spacelookup.LookupReferenceForPath(ctx, s.gatewaySelector, fn) + if err != nil { + return http.StatusInternalServerError, err + } + if cs3Status.Code != rpc.Code_CODE_OK { + return http.StatusInternalServerError, ocdavErrors.NewErrFromStatus(cs3Status) + } + + return s.lockReference(ctx, w, r, ref) +} + +func (s *svc) handleSpacesLock(w http.ResponseWriter, r *http.Request, spaceID string) (retStatus int, retErr error) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), fmt.Sprintf("%s %v", r.Method, r.URL.Path)) + defer span.End() + + span.SetAttributes(attribute.String("component", "ocdav")) + + ref, err := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) + if err != nil { + return http.StatusBadRequest, fmt.Errorf("invalid space id") + } + + return s.lockReference(ctx, w, r, &ref) +} + +func (s *svc) lockReference(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference) (retStatus int, retErr error) { + sublog := appctx.GetLogger(ctx).With().Interface("ref", ref).Logger() + duration, err := parseTimeout(r.Header.Get(net.HeaderTimeout)) + if err != nil { + return http.StatusBadRequest, ocdavErrors.ErrInvalidTimeout + } + + li, status, err := readLockInfo(r.Body) + if err != nil { + return status, ocdavErrors.ErrInvalidLockInfo + } + + u := ctxpkg.ContextMustGetUser(ctx) + token, now, created := "", time.Now(), false + ld := LockDetails{UserID: u.Id, Root: ref, Duration: duration, OwnerName: u.GetDisplayName(), Locktime: now, LockID: li.LockID} + if li == (lockInfo{}) { + // An empty lockInfo means to refresh the lock. + ih, ok := parseIfHeader(r.Header.Get(net.HeaderIf)) + if !ok { + return http.StatusBadRequest, ocdavErrors.ErrInvalidIfHeader + } + if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 { + token = ih.lists[0].conditions[0].Token + } + if token == "" { + return http.StatusBadRequest, ocdavErrors.ErrInvalidLockToken + } + err = s.LockSystem.Refresh(ctx, now, ref, token) + if err != nil { + if err == ocdavErrors.ErrNoSuchLock { + return http.StatusPreconditionFailed, err + } + return http.StatusInternalServerError, err + } + + ld.LockID = token + + } else { + // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request, + // then the request MUST act as if a "Depth:infinity" had been submitted." + depth := infiniteDepth + if hdr := r.Header.Get(net.HeaderDepth); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.10.3 says that "Values other than 0 or infinity must not be + // used with the Depth header on a LOCK method". + return http.StatusBadRequest, ocdavErrors.ErrInvalidDepth + } + } + /* our url path has been shifted, so we don't need to do this? + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + */ + // TODO look up username and email + // if li.Owner.InnerXML == "" { + // // PHP version: 'owner' => "{$user->getDisplayName()} via Office Online" + // ld.OwnerXML = ld.UserID.OpaqueId + // } + ld.OwnerXML = li.Owner.InnerXML // TODO optional, should be a URL + ld.ZeroDepth = depth == 0 + + //TODO: @jfd the code tries to create a lock for a file that may not even exist, + // should we do that in the decomposedfs as well? the node does not exist + // this actually is a name based lock ... ugh + token, err = s.LockSystem.Create(ctx, now, ld) + + // + if err != nil { + switch { + case errors.Is(err, ocdavErrors.ErrLocked): + return http.StatusLocked, err + case errors.Is(err, ocdavErrors.ErrForbidden): + return http.StatusForbidden, err + default: + return http.StatusInternalServerError, err + } + } + + defer func() { + if retErr != nil { + if err := s.LockSystem.Unlock(ctx, now, ref, token); err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("lock", ld).Msg("could not unlock after failed lock") + } + } + }() + + // Create the resource if it didn't previously exist. + // TODO use sdk to stat? + /* + if _, err := s.FileSystem.Stat(ctx, reqPath); err != nil { + f, err := s.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + // TODO: detect missing intermediate dirs and return http.StatusConflict? + return http.StatusInternalServerError, err + } + f.Close() + created = true + } + */ + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We add angle brackets. + w.Header().Set("Lock-Token", "<"+token+">") + } + + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + if created { + // This is "w.WriteHeader(http.StatusCreated)" and not "return + // http.StatusCreated, nil" because we write our own (XML) response to w + // and Handler.ServeHTTP would otherwise write "Created". + w.WriteHeader(http.StatusCreated) + } + n, err := writeLockInfo(w, token, ld) + if err != nil { + sublog.Err(err).Int("bytes_written", n).Msg("error writing response") + } + return 0, nil +} + +func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) { + depth := "infinity" + if ld.ZeroDepth { + depth = "0" + } + href := ld.Root.Path // FIXME add base url and space? + + lockdiscovery := strings.Builder{} + lockdiscovery.WriteString(xml.Header) + lockdiscovery.WriteString("\n") + lockdiscovery.WriteString(" \n") + lockdiscovery.WriteString(" \n") + lockdiscovery.WriteString(fmt.Sprintf(" %s\n", depth)) + if ld.OwnerXML != "" { + lockdiscovery.WriteString(fmt.Sprintf(" %s\n", ld.OwnerXML)) + } + if ld.Duration > 0 { + timeout := ld.Duration / time.Second + lockdiscovery.WriteString(fmt.Sprintf(" Second-%d\n", timeout)) + } else { + lockdiscovery.WriteString(" Infinite\n") + } + if token != "" { + lockdiscovery.WriteString(fmt.Sprintf(" %s\n", prop.Escape(token))) + } + if href != "" { + lockdiscovery.WriteString(fmt.Sprintf(" %s\n", prop.Escape(href))) + } + if ld.OwnerName != "" { + lockdiscovery.WriteString(fmt.Sprintf(" %s\n", prop.Escape(ld.OwnerName))) + } + if !ld.Locktime.IsZero() { + lockdiscovery.WriteString(fmt.Sprintf(" %s\n", prop.Escape(ld.Locktime.Format(time.RFC3339)))) + } + + lockdiscovery.WriteString("") + + return fmt.Fprint(w, lockdiscovery.String()) +} + +func (s *svc) handleUnlock(w http.ResponseWriter, r *http.Request, ns string) (status int, err error) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), fmt.Sprintf("%s %v", r.Method, r.URL.Path)) + defer span.End() + + span.SetAttributes(attribute.String("component", "ocdav")) + + fn := path.Join(ns, r.URL.Path) // TODO do we still need to jail if we query the registry about the spaces? + + // TODO instead of using a string namespace ns pass in the space with the request? + ref, cs3Status, err := spacelookup.LookupReferenceForPath(ctx, s.gatewaySelector, fn) + if err != nil { + return http.StatusInternalServerError, err + } + if cs3Status.Code != rpc.Code_CODE_OK { + return http.StatusInternalServerError, ocdavErrors.NewErrFromStatus(cs3Status) + } + + return s.unlockReference(ctx, w, r, ref) +} + +func (s *svc) handleSpaceUnlock(w http.ResponseWriter, r *http.Request, spaceID string) (status int, err error) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), fmt.Sprintf("%s %v", r.Method, r.URL.Path)) + defer span.End() + + span.SetAttributes(attribute.String("component", "ocdav")) + + ref, err := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) + if err != nil { + return http.StatusBadRequest, fmt.Errorf("invalid space id") + } + + return s.unlockReference(ctx, w, r, &ref) +} + +func (s *svc) unlockReference(ctx context.Context, _ http.ResponseWriter, r *http.Request, ref *provider.Reference) (retStatus int, retErr error) { + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value should be a Coded-URL OR a token. We strip its angle brackets. + t := r.Header.Get(net.HeaderLockToken) + if len(t) > 2 && t[0] == '<' && t[len(t)-1] == '>' { + t = t[1 : len(t)-1] + } + + err := s.LockSystem.Unlock(ctx, time.Now(), ref, t) + switch { + case err == nil: + return http.StatusNoContent, nil + case errors.Is(err, ocdavErrors.ErrLocked): + return http.StatusLocked, err + case errors.Is(err, ocdavErrors.ErrForbidden): + return http.StatusForbidden, err + } + return http.StatusInternalServerError, err +} + +func requestLock(r *http.Request) string { + matches := requestLockRegex.FindStringSubmatch(r.Header.Get(net.HeaderIf)) + if len(matches) < 2 { + return "" + } + + return matches[1] // the first match is the whole string, the second is the token +} diff --git a/services/webdav/pkg/ocdav/meta.go b/services/webdav/pkg/ocdav/meta.go new file mode 100644 index 0000000000..77fa966cc5 --- /dev/null +++ b/services/webdav/pkg/ocdav/meta.go @@ -0,0 +1,245 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "encoding/xml" + "fmt" + "net/http" + "path" + "strings" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/prop" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/propfind" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" +) + +// MetaHandler handles meta requests +type MetaHandler struct { + VersionsHandler *VersionsHandler +} + +func (h *MetaHandler) Init(c *config.Config) error { + h.VersionsHandler = new(VersionsHandler) + return h.VersionsHandler.Init(c) +} + +// Handler handles requests +func (h *MetaHandler) Handler(s *svc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + var id string + id, r.URL.Path = router.ShiftPath(r.URL.Path) + if id == "" { + if r.Method != MethodPropfind { + w.WriteHeader(http.StatusBadRequest) + return + } + h.handleEmptyID(w, r) + return + } + + did, err := storagespace.ParseID(id) + if err != nil { + logger := appctx.GetLogger(r.Context()) + logger.Debug().Str("prop", net.PropOcMetaPathForUser).Msg("invalid resource id") + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Invalid resource id %v", id) + b, err := errors.Marshal(http.StatusBadRequest, m, "", "") + errors.HandleWebdavError(logger, w, b, err) + return + } + if did.StorageId == "" && did.OpaqueId == "" && strings.Count(id, ":") >= 2 { + logger := appctx.GetLogger(r.Context()) + logger.Warn().Str("id", id).Msg("detected invalid : separated resourceid id, trying to split it ... but fix the client that made the request") + // try splitting with : + parts := strings.SplitN(id, ":", 3) + did.StorageId = parts[0] + did.SpaceId = parts[1] + did.OpaqueId = parts[2] + } + + var head string + head, r.URL.Path = router.ShiftPath(r.URL.Path) + switch head { + case "": + if r.Method != MethodPropfind { + w.WriteHeader(http.StatusBadRequest) + return + } + h.handlePathForUser(w, r, s, &did) + case "v": + h.VersionsHandler.Handler(s, &did).ServeHTTP(w, r) + default: + w.WriteHeader(http.StatusNotFound) + } + + }) +} + +func (h *MetaHandler) handlePathForUser(w http.ResponseWriter, r *http.Request, s *svc, rid *provider.ResourceId) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "meta_propfind") + defer span.End() + + id := storagespace.FormatResourceID(rid) + sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("resourceid", id).Logger() + sublog.Info().Msg("calling get path for user") + + pf, status, err := propfind.ReadPropfind(r.Body) + if err != nil { + sublog.Debug().Err(err).Msg("error reading propfind request") + w.WriteHeader(status) + return + } + + if ok := hasProp(&pf, net.PropOcMetaPathForUser); !ok { + sublog.Debug().Str("prop", net.PropOcMetaPathForUser).Msg("error finding prop in request") + w.WriteHeader(http.StatusBadRequest) + return + } + client, err := s.gatewaySelector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error selecting next client") + w.WriteHeader(http.StatusInternalServerError) + return + } + pathReq := &provider.GetPathRequest{ResourceId: rid} + pathRes, err := client.GetPath(ctx, pathReq) + if err != nil { + sublog.Error().Err(err).Msg("could not send GetPath grpc request: transport error") + w.WriteHeader(http.StatusInternalServerError) + return + } + + switch pathRes.Status.Code { + case rpc.Code_CODE_NOT_FOUND: + sublog.Debug().Str("code", string(pathRes.Status.Code)).Msg("resource not found") + w.WriteHeader(http.StatusNotFound) + m := fmt.Sprintf("Resource %s not found", id) + b, err := errors.Marshal(http.StatusNotFound, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + case rpc.Code_CODE_PERMISSION_DENIED: + // raise StatusNotFound so that resources can't be enumerated + sublog.Debug().Str("code", string(pathRes.Status.Code)).Msg("resource access denied") + w.WriteHeader(http.StatusNotFound) + m := fmt.Sprintf("Resource %s not found", id) + b, err := errors.Marshal(http.StatusNotFound, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + + propstatOK := propfind.PropstatXML{ + Status: "HTTP/1.1 200 OK", + Prop: []prop.PropertyXML{ + prop.Escaped("oc:meta-path-for-user", pathRes.Path), + prop.Escaped("oc:id", id), + prop.Escaped("oc:fileid", id), + prop.Escaped("oc:spaceid", storagespace.FormatStorageID(rid.GetStorageId(), rid.GetSpaceId())), + }, + } + baseURI := ctx.Value(net.CtxKeyBaseURI).(string) + msr := propfind.NewMultiStatusResponseXML() + msr.Responses = []*propfind.ResponseXML{ + { + Href: net.EncodePath(path.Join(baseURI, id) + "/"), + Propstat: []propfind.PropstatXML{ + propstatOK, + }, + }, + } + propRes, err := xml.Marshal(msr) + if err != nil { + sublog.Error().Err(err).Msg("error marshalling propfind response xml") + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Header().Set(net.HeaderDav, "1, 3, extended-mkcol") + w.Header().Set(net.HeaderContentType, "application/xml; charset=utf-8") + w.WriteHeader(http.StatusMultiStatus) + if _, err := w.Write(propRes); err != nil { + sublog.Error().Err(err).Msg("error writing propfind response") + return + } +} + +func (h *MetaHandler) handleEmptyID(w http.ResponseWriter, r *http.Request) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "meta_propfind") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Logger() + pf, status, err := propfind.ReadPropfind(r.Body) + if err != nil { + sublog.Debug().Err(err).Msg("error reading propfind request") + w.WriteHeader(status) + return + } + + if ok := hasProp(&pf, net.PropOcMetaPathForUser); !ok { + sublog.Debug().Str("prop", net.PropOcMetaPathForUser).Msg("error finding prop in request") + w.WriteHeader(http.StatusBadRequest) + return + } + + propstatNotFound := propfind.PropstatXML{ + Status: "HTTP/1.1 404 Not Found", + } + baseURI := ctx.Value(net.CtxKeyBaseURI).(string) + msr := propfind.NewMultiStatusResponseXML() + msr.Responses = []*propfind.ResponseXML{ + { + Href: net.EncodePath(baseURI + "/"), + Propstat: []propfind.PropstatXML{ + propstatNotFound, + }, + }, + } + propRes, err := xml.Marshal(msr) + if err != nil { + sublog.Error().Err(err).Msg("error marshalling propfind response xml") + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Header().Set(net.HeaderDav, "1, 3, extended-mkcol") + w.Header().Set(net.HeaderContentType, "application/xml; charset=utf-8") + w.WriteHeader(http.StatusMultiStatus) + if _, err := w.Write(propRes); err != nil { + sublog.Error().Err(err).Msg("error writing propfind response") + return + } +} + +func hasProp(pf *propfind.XML, key string) bool { + for i := range pf.Prop { + k := fmt.Sprintf("%s/%s", pf.Prop[i].Space, pf.Prop[i].Local) + if k == key { + return true + } + } + return false +} diff --git a/services/webdav/pkg/ocdav/mkcol.go b/services/webdav/pkg/ocdav/mkcol.go new file mode 100644 index 0000000000..9c5666c4b6 --- /dev/null +++ b/services/webdav/pkg/ocdav/mkcol.go @@ -0,0 +1,169 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "errors" + "fmt" + "net/http" + "path" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/errtypes" + rstatus "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/rs/zerolog" +) + +func (s *svc) handlePathMkcol(w http.ResponseWriter, r *http.Request, ns string) (status int, err error) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "mkcol") + defer span.End() + + if err := ValidateName(filename(r.URL.Path), s.nameValidators); err != nil { + return http.StatusBadRequest, err + } + fn := path.Join(ns, r.URL.Path) + sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() + + client, err := s.gatewaySelector.Next() + if err != nil { + return http.StatusInternalServerError, errtypes.InternalError(err.Error()) + } + + // stat requested path to make sure it isn't existing yet + // NOTE: It could be on another storage provider than the 'parent' of it + sr, err := client.Stat(ctx, &provider.StatRequest{ + Ref: &provider.Reference{ + Path: fn, + }, + }) + switch { + case err != nil: + return http.StatusInternalServerError, err + case sr.Status.Code == rpc.Code_CODE_OK: + // https://www.rfc-editor.org/rfc/rfc4918#section-9.3.1: + // 405 (Method Not Allowed) - MKCOL can only be executed on an unmapped URL. + return http.StatusMethodNotAllowed, fmt.Errorf("The resource you tried to create already exists") + case sr.Status.Code == rpc.Code_CODE_ABORTED: + return http.StatusPreconditionFailed, errtypes.NewErrtypeFromStatus(sr.Status) + case sr.Status.Code != rpc.Code_CODE_NOT_FOUND: + return rstatus.HTTPStatusFromCode(sr.Status.Code), errtypes.NewErrtypeFromStatus(sr.Status) + } + + parentPath := path.Dir(fn) + + space, rpcStatus, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, parentPath) + switch { + case err != nil: + return http.StatusInternalServerError, err + case rpcStatus.Code == rpc.Code_CODE_NOT_FOUND: + // https://www.rfc-editor.org/rfc/rfc4918#section-9.3.1: + // 409 (Conflict) - A collection cannot be made at the Request-URI until + // one or more intermediate collections have been created. The server + // MUST NOT create those intermediate collections automatically. + return http.StatusConflict, fmt.Errorf("intermediate collection does not exist") + case rpcStatus.Code == rpc.Code_CODE_ABORTED: + return http.StatusPreconditionFailed, errtypes.NewErrtypeFromStatus(rpcStatus) + case rpcStatus.Code != rpc.Code_CODE_OK: + return rstatus.HTTPStatusFromCode(rpcStatus.Code), errtypes.NewErrtypeFromStatus(rpcStatus) + } + + return s.handleMkcol(ctx, w, r, spacelookup.MakeRelativeReference(space, parentPath, false), spacelookup.MakeRelativeReference(space, fn, false), sublog) +} + +func (s *svc) handleSpacesMkCol(w http.ResponseWriter, r *http.Request, spaceID string) (status int, err error) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "spaces_mkcol") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("spaceid", spaceID).Str("handler", "mkcol").Logger() + + if err := ValidateName(filename(r.URL.Path), s.nameValidators); err != nil { + return http.StatusBadRequest, err + } + + parentRef, err := spacelookup.MakeStorageSpaceReference(spaceID, path.Dir(r.URL.Path)) + if err != nil { + return http.StatusBadRequest, fmt.Errorf("invalid space id") + } + childRef, _ := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) + + return s.handleMkcol(ctx, w, r, &parentRef, &childRef, sublog) +} + +func (s *svc) handleMkcol(ctx context.Context, w http.ResponseWriter, r *http.Request, parentRef, childRef *provider.Reference, log zerolog.Logger) (status int, err error) { + if !isBodyEmpty(r) { + // We currently do not support extended mkcol https://datatracker.ietf.org/doc/rfc5689/ + // TODO let clients send a body with properties to set on the new resource + return http.StatusUnsupportedMediaType, fmt.Errorf("extended-mkcol not supported") + } + + client, err := s.gatewaySelector.Next() + if err != nil { + return http.StatusInternalServerError, errtypes.InternalError(err.Error()) + } + req := &provider.CreateContainerRequest{Ref: childRef} + res, err := client.CreateContainer(ctx, req) + switch { + case err != nil: + return http.StatusInternalServerError, err + case res.Status.Code == rpc.Code_CODE_OK: + w.WriteHeader(http.StatusCreated) + return 0, nil + case res.Status.Code == rpc.Code_CODE_NOT_FOUND: + // This should never happen because if the parent collection does not exist we should + // get a Code_CODE_FAILED_PRECONDITION. We play stupid and return what the response gave us + //lint:ignore ST1005 mimic the exact oc10 error message + return http.StatusNotFound, errors.New("Resource not found") + case res.Status.Code == rpc.Code_CODE_PERMISSION_DENIED: + // check if user has access to parent + sRes, err := client.Stat(ctx, &provider.StatRequest{Ref: &provider.Reference{ + ResourceId: childRef.GetResourceId(), + Path: utils.MakeRelativePath(path.Dir(childRef.Path)), + }}) + if err != nil { + return http.StatusInternalServerError, err + } + if sRes.Status.Code != rpc.Code_CODE_OK { + // return not found error so we do not leak existence of a file + // TODO hide permission failed for users without access in every kind of request + // TODO should this be done in the driver? + //lint:ignore ST1005 mimic the exact oc10 error message + return http.StatusNotFound, errors.New("Resource not found") + } + return http.StatusForbidden, errors.New(sRes.Status.Message) + case res.Status.Code == rpc.Code_CODE_ABORTED: + return http.StatusPreconditionFailed, errors.New(res.Status.Message) + case res.Status.Code == rpc.Code_CODE_FAILED_PRECONDITION: + // https://www.rfc-editor.org/rfc/rfc4918#section-9.3.1: + // 409 (Conflict) - A collection cannot be made at the Request-URI until + // one or more intermediate collections have been created. The server + // MUST NOT create those intermediate collections automatically. + return http.StatusConflict, errors.New(res.Status.Message) + case res.Status.Code == rpc.Code_CODE_ALREADY_EXISTS: + // https://www.rfc-editor.org/rfc/rfc4918#section-9.3.1: + // 405 (Method Not Allowed) - MKCOL can only be executed on an unmapped URL. + //lint:ignore ST1005 mimic the exact oc10 error message + return http.StatusMethodNotAllowed, errors.New("The resource you tried to create already exists") + } + return rstatus.HTTPStatusFromCode(res.Status.Code), errtypes.NewErrtypeFromStatus(res.Status) +} diff --git a/services/webdav/pkg/ocdav/move.go b/services/webdav/pkg/ocdav/move.go new file mode 100644 index 0000000000..a00de726cc --- /dev/null +++ b/services/webdav/pkg/ocdav/move.go @@ -0,0 +1,361 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "fmt" + "net/http" + "path" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/errtypes" + rstatus "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/rs/zerolog" +) + +func (s *svc) handlePathMove(w http.ResponseWriter, r *http.Request, ns string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "move") + defer span.End() + + if !isBodyEmpty(r) { + w.WriteHeader(http.StatusUnsupportedMediaType) + b, err := errors.Marshal(http.StatusUnsupportedMediaType, "body must be empty", "", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + srcPath := path.Join(ns, r.URL.Path) + dh := r.Header.Get(net.HeaderDestination) + baseURI := r.Context().Value(net.CtxKeyBaseURI).(string) + dstPath, err := net.ParseDestination(baseURI, dh) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "failed to extract destination", "", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + if err := ValidateName(filename(srcPath), s.nameValidators); err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "source failed naming rules", "", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + if err := ValidateDestination(filename(dstPath), s.nameValidators); err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "destination naming rules", "", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + dstPath = path.Join(ns, dstPath) + + sublog := s.log.With().Str("src", srcPath).Str("dst", dstPath).Logger() + + srcSpace, status, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, srcPath) + if err != nil { + sublog.Error().Err(err).Str("path", srcPath).Msg("failed to look up source storage space") + w.WriteHeader(http.StatusInternalServerError) + return + } + if status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&sublog, w, status) + return + } + dstSpace, status, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, dstPath) + if err != nil { + sublog.Error().Err(err).Str("path", dstPath).Msg("failed to look up destination storage space") + w.WriteHeader(http.StatusInternalServerError) + return + } + if status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&sublog, w, status) + return + } + + s.handleMove(ctx, w, r, spacelookup.MakeRelativeReference(srcSpace, srcPath, false), spacelookup.MakeRelativeReference(dstSpace, dstPath, false), sublog) +} + +func (s *svc) handleSpacesMove(w http.ResponseWriter, r *http.Request, srcSpaceID string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "spaces_move") + defer span.End() + + if !isBodyEmpty(r) { + w.WriteHeader(http.StatusUnsupportedMediaType) + b, err := errors.Marshal(http.StatusUnsupportedMediaType, "body must be empty", "", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + dh := r.Header.Get(net.HeaderDestination) + baseURI := r.Context().Value(net.CtxKeyBaseURI).(string) + dst, err := net.ParseDestination(baseURI, dh) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + sublog := s.log.With().Str("spaceid", srcSpaceID).Str("path", r.URL.Path).Logger() + + srcRef, err := spacelookup.MakeStorageSpaceReference(srcSpaceID, r.URL.Path) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + dstSpaceID, dstRelPath := router.ShiftPath(dst) + + if dstRelPath != "" && dstRelPath != "." && dstRelPath != "/" { + err := ValidateDestination(filename(dstRelPath), s.nameValidators) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "destination naming rules", "", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + } + + dstRef, err := spacelookup.MakeStorageSpaceReference(dstSpaceID, dstRelPath) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + s.handleMove(ctx, w, r, &srcRef, &dstRef, sublog) +} + +func (s *svc) handleMove(ctx context.Context, w http.ResponseWriter, r *http.Request, src, dst *provider.Reference, log zerolog.Logger) { + isChild, err := s.referenceIsChildOf(ctx, s.gatewaySelector, dst, src) + if err != nil { + switch err.(type) { + case errtypes.IsNotFound: + w.WriteHeader(http.StatusNotFound) + case errtypes.IsNotSupported: + log.Error().Err(err).Msg("can not detect recursive move operation. missing machine auth configuration?") + w.WriteHeader(http.StatusForbidden) + default: + log.Error().Err(err).Msg("error while trying to detect recursive move operation") + w.WriteHeader(http.StatusInternalServerError) + } + return + } + if isChild { + w.WriteHeader(http.StatusConflict) + b, err := errors.Marshal(http.StatusBadRequest, "can not move a folder into one of its children", "", "") + errors.HandleWebdavError(&log, w, b, err) + return + } + + isParent, err := s.referenceIsChildOf(ctx, s.gatewaySelector, src, dst) + if err != nil { + switch err.(type) { + case errtypes.IsNotFound: + isParent = false + case errtypes.IsNotSupported: + log.Error().Err(err).Msg("can not detect recursive move operation. missing machine auth configuration?") + w.WriteHeader(http.StatusForbidden) + return + default: + log.Error().Err(err).Msg("error while trying to detect recursive move operation") + w.WriteHeader(http.StatusInternalServerError) + return + } + } + if isParent { + w.WriteHeader(http.StatusConflict) + b, err := errors.Marshal(http.StatusBadRequest, "can not move a folder into its parent", "", "") + errors.HandleWebdavError(&log, w, b, err) + return + + } + + oh := r.Header.Get(net.HeaderOverwrite) + log.Debug().Str("overwrite", oh).Msg("move") + + overwrite, err := net.ParseOverwrite(oh) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + client, err := s.gatewaySelector.Next() + if err != nil { + log.Error().Err(err).Msg("error selecting next client") + w.WriteHeader(http.StatusInternalServerError) + return + } + + // check src exists + srcStatReq := &provider.StatRequest{Ref: src} + srcStatRes, err := client.Stat(ctx, srcStatReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if srcStatRes.Status.Code != rpc.Code_CODE_OK { + if srcStatRes.Status.Code == rpc.Code_CODE_NOT_FOUND { + w.WriteHeader(http.StatusNotFound) + m := fmt.Sprintf("Resource %v not found", srcStatReq.Ref.Path) + b, err := errors.Marshal(http.StatusNotFound, m, "", "") + errors.HandleWebdavError(&log, w, b, err) + } + errors.HandleErrorStatus(&log, w, srcStatRes.Status) + return + } + if utils.IsSpaceRoot(srcStatRes.GetInfo()) { + log.Error().Msg("the source is disallowed") + w.WriteHeader(http.StatusBadRequest) + return + } + + // check dst exists + dstStatReq := &provider.StatRequest{Ref: dst} + dstStatRes, err := client.Stat(ctx, dstStatReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if dstStatRes.Status.Code != rpc.Code_CODE_OK && dstStatRes.Status.Code != rpc.Code_CODE_NOT_FOUND { + errors.HandleErrorStatus(&log, w, dstStatRes.Status) + return + } + + successCode := http.StatusCreated // 201 if new resource was created, see https://tools.ietf.org/html/rfc4918#section-9.9.4 + if dstStatRes.Status.Code == rpc.Code_CODE_OK { + successCode = http.StatusNoContent // 204 if target already existed, see https://tools.ietf.org/html/rfc4918#section-9.9.4 + + if utils.IsSpaceRoot(dstStatRes.GetInfo()) { + log.Error().Msg("overwriting is not allowed") + w.WriteHeader(http.StatusBadRequest) + return + } + if !overwrite { + log.Warn().Bool("overwrite", overwrite).Msg("dst already exists") + w.WriteHeader(http.StatusPreconditionFailed) // 412, see https://tools.ietf.org/html/rfc4918#section-9.9.4 + return + } + // delete existing tree + delReq := &provider.DeleteRequest{Ref: dst} + delRes, err := client.Delete(ctx, delReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc delete request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if delRes.Status.Code != rpc.Code_CODE_OK && delRes.Status.Code != rpc.Code_CODE_NOT_FOUND { + errors.HandleErrorStatus(&log, w, delRes.Status) + return + } + } else { + // check if an intermediate path / the parent exists + intStatReq := &provider.StatRequest{Ref: &provider.Reference{ + ResourceId: dst.ResourceId, + Path: utils.MakeRelativePath(path.Dir(dst.Path)), + }} + intStatRes, err := client.Stat(ctx, intStatReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if intStatRes.Status.Code != rpc.Code_CODE_OK { + if intStatRes.Status.Code == rpc.Code_CODE_NOT_FOUND { + // 409 if intermediate dir is missing, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + log.Debug().Interface("parent", dst).Interface("status", intStatRes.Status).Msg("conflict") + w.WriteHeader(http.StatusConflict) + } else { + errors.HandleErrorStatus(&log, w, intStatRes.Status) + } + return + } + // TODO what if intermediate is a file? + } + // resolve the destination path + if dst.Path == "." { + dst.Path = utils.MakeRelativePath(dstStatRes.GetInfo().GetName()) + dst.ResourceId = dstStatRes.GetInfo().GetParentId() + } + mReq := &provider.MoveRequest{ + Source: src, + Destination: dst, + LockId: requestLock(r), + } + mRes, err := client.Move(ctx, mReq) + if err != nil { + log.Error().Err(err).Msg("error sending move grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if mRes.Status.Code != rpc.Code_CODE_OK { + status := rstatus.HTTPStatusFromCode(mRes.Status.Code) + m := mRes.Status.Message + switch mRes.Status.Code { + case rpc.Code_CODE_ABORTED: + status = http.StatusPreconditionFailed + case rpc.Code_CODE_PERMISSION_DENIED: + status = http.StatusForbidden + case rpc.Code_CODE_UNIMPLEMENTED: + // We translate this into a Bad Gateway error as per https://www.rfc-editor.org/rfc/rfc4918#section-9.9.4 + // > 502 (Bad Gateway) - This may occur when the destination is on another + // > server and the destination server refuses to accept the resource. + // > This could also occur when the destination is on another sub-section + // > of the same server namespace. + status = http.StatusBadGateway + } + + w.WriteHeader(status) + + b, err := errors.Marshal(status, m, "", "") + errors.HandleWebdavError(&log, w, b, err) + return + } + + dstStatRes, err = client.Stat(ctx, dstStatReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if dstStatRes.Status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&log, w, dstStatRes.Status) + return + } + + info := dstStatRes.Info + w.Header().Set(net.HeaderContentType, info.MimeType) + w.Header().Set(net.HeaderETag, info.Etag) + w.Header().Set(net.HeaderOCFileID, storagespace.FormatResourceID(info.Id)) + w.Header().Set(net.HeaderOCETag, info.Etag) + w.WriteHeader(successCode) +} diff --git a/services/webdav/pkg/ocdav/net/builders.go b/services/webdav/pkg/ocdav/net/builders.go new file mode 100644 index 0000000000..70a4946716 --- /dev/null +++ b/services/webdav/pkg/ocdav/net/builders.go @@ -0,0 +1,38 @@ +// Copyright 2018-2022 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package net + +import ( + "net/url" + "time" + + cs3types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/opencloud-eu/reva/v2/pkg/utils" +) + +// ContentDispositionAttachment builds a ContentDisposition Attachment header with various filename encodings +func ContentDispositionAttachment(filename string) string { + return "attachment; filename*=UTF-8''" + url.PathEscape(filename) + "; filename=\"" + filename + "\"" +} + +// RFC1123Z formats a CS3 Timestamp to be used in HTTP headers like Last-Modified +func RFC1123Z(ts *cs3types.Timestamp) string { + t := utils.TSToTime(ts).UTC() + return t.Format(time.RFC1123Z) +} diff --git a/services/webdav/pkg/ocdav/net/context.go b/services/webdav/pkg/ocdav/net/context.go new file mode 100644 index 0000000000..23884c2325 --- /dev/null +++ b/services/webdav/pkg/ocdav/net/context.go @@ -0,0 +1,43 @@ +// Copyright 2018-2022 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package net + +import ( + "context" + + userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" +) + +// IsCurrentUserOwnerOrManager returns whether the context user is the given owner or not +func IsCurrentUserOwnerOrManager(ctx context.Context, owner *userv1beta1.UserId, md *provider.ResourceInfo) bool { + contextUser, ok := ctxpkg.ContextGetUser(ctx) + // personal spaces have owners + if ok && contextUser.Id != nil && owner != nil && + contextUser.Id.Idp == owner.Idp && + contextUser.Id.OpaqueId == owner.OpaqueId { + return true + } + // check if the user is space manager + if md != nil && md.Owner != nil && md.Owner.GetType() == userv1beta1.UserType_USER_TYPE_SPACE_OWNER { + return md.GetPermissionSet().AddGrant + } + return false +} diff --git a/services/webdav/pkg/ocdav/net/context_test.go b/services/webdav/pkg/ocdav/net/context_test.go new file mode 100644 index 0000000000..a81a0f4551 --- /dev/null +++ b/services/webdav/pkg/ocdav/net/context_test.go @@ -0,0 +1,93 @@ +// Copyright 2018-2022 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package net_test + +import ( + "context" + + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Net", func() { + var ( + alice = &userpb.User{ + Id: &userpb.UserId{ + OpaqueId: "alice", + }, + Username: "alice", + } + bob = &userpb.User{ + Id: &userpb.UserId{ + OpaqueId: "bob", + }, + Username: "bob", + } + spaceManager = &userpb.User{ + Id: &userpb.UserId{ + OpaqueId: "space-id", + }, + Username: "virtual", + } + mdSpaceManager = &provider.ResourceInfo{ + Owner: &userpb.UserId{ + OpaqueId: "user-1", + Type: userpb.UserType_USER_TYPE_SPACE_OWNER, + }, + PermissionSet: &provider.ResourcePermissions{ + AddGrant: true, + }, + } + + mdSpaceViewer = &provider.ResourceInfo{ + Owner: &userpb.UserId{ + OpaqueId: "user-1", + Type: userpb.UserType_USER_TYPE_SPACE_OWNER, + }, + PermissionSet: &provider.ResourcePermissions{ + ListContainer: true, + }, + } + aliceCtx = ctxpkg.ContextSetUser(context.Background(), alice) + bobCtx = ctxpkg.ContextSetUser(context.Background(), bob) + ) + + Describe("IsCurrentUserOwnerOrManager", func() { + It("returns true", func() { + Expect(net.IsCurrentUserOwnerOrManager(aliceCtx, alice.Id, nil)).To(BeTrue()) + }) + + It("returns false", func() { + Expect(net.IsCurrentUserOwnerOrManager(bobCtx, alice.Id, nil)).To(BeFalse()) + }) + + It("user is space manager", func() { + Expect(net.IsCurrentUserOwnerOrManager(bobCtx, spaceManager.Id, mdSpaceManager)).To(BeTrue()) + }) + + It("user is space viewer", func() { + Expect(net.IsCurrentUserOwnerOrManager(bobCtx, spaceManager.Id, mdSpaceViewer)).To(BeFalse()) + }) + }) +}) diff --git a/services/webdav/pkg/ocdav/net/headers.go b/services/webdav/pkg/ocdav/net/headers.go new file mode 100644 index 0000000000..10a475c0ab --- /dev/null +++ b/services/webdav/pkg/ocdav/net/headers.go @@ -0,0 +1,77 @@ +// Copyright 2018-2022 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package net + +// Common HTTP headers. +const ( + HeaderAcceptRanges = "Accept-Ranges" + HeaderAccessControlAllowHeaders = "Access-Control-Allow-Headers" + HeaderAccessControlExposeHeaders = "Access-Control-Expose-Headers" + HeaderContentDisposistion = "Content-Disposition" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLength = "Content-Length" + HeaderContentRange = "Content-Range" + HeaderContentType = "Content-Type" + HeaderETag = "ETag" + HeaderLastModified = "Last-Modified" + HeaderLocation = "Location" + HeaderRange = "Range" + HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" + HeaderPrefer = "Prefer" + HeaderPreferenceApplied = "Preference-Applied" + HeaderVary = "Vary" +) + +// webdav headers +const ( + HeaderDav = "DAV" // https://datatracker.ietf.org/doc/html/rfc4918#section-10.1 + HeaderDepth = "Depth" // https://datatracker.ietf.org/doc/html/rfc4918#section-10.2 + HeaderDestination = "Destination" // https://datatracker.ietf.org/doc/html/rfc4918#section-10.3 + HeaderIf = "If" // https://datatracker.ietf.org/doc/html/rfc4918#section-10.4 + HeaderLockToken = "Lock-Token" // https://datatracker.ietf.org/doc/html/rfc4918#section-10.5 + HeaderOverwrite = "Overwrite" // https://datatracker.ietf.org/doc/html/rfc4918#section-10.6 + HeaderTimeout = "Timeout" // https://datatracker.ietf.org/doc/html/rfc4918#section-10.7 +) + +// Non standard HTTP headers. +const ( + HeaderOCFileID = "OC-FileId" + HeaderOCETag = "OC-ETag" + HeaderOCChecksum = "OC-Checksum" + HeaderOCPermissions = "OC-Perm" + HeaderTusResumable = "Tus-Resumable" + HeaderTusVersion = "Tus-Version" + HeaderTusExtension = "Tus-Extension" + HeaderTusChecksumAlgorithm = "Tus-Checksum-Algorithm" + HeaderTusUploadExpires = "Upload-Expires" + HeaderUploadChecksum = "Upload-Checksum" + HeaderUploadLength = "Upload-Length" + HeaderUploadMetadata = "Upload-Metadata" + HeaderUploadOffset = "Upload-Offset" + HeaderOCMtime = "X-OC-Mtime" + HeaderExpectedEntityLength = "X-Expected-Entity-Length" + HeaderLitmus = "X-Litmus" + HeaderTransferAuth = "TransferHeaderAuthorization" +) + +// HTTP Prefer header values +const ( + HeaderPreferReturn = "return" // eg. return=representation / return=minimal, depth-noroot +) diff --git a/services/webdav/pkg/ocdav/net/net.go b/services/webdav/pkg/ocdav/net/net.go new file mode 100644 index 0000000000..8d94ccd4ad --- /dev/null +++ b/services/webdav/pkg/ocdav/net/net.go @@ -0,0 +1,146 @@ +// Copyright 2018-2022 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package net + +import ( + "net/url" + "strings" + + "github.com/pkg/errors" +) + +var ( + // ErrInvalidHeaderValue defines an error which can occure when trying to parse a header value. + ErrInvalidHeaderValue = errors.New("invalid value") +) + +type ctxKey int + +const ( + // CtxKeyBaseURI is the key of the base URI context field + CtxKeyBaseURI ctxKey = iota + + // NsDav is the Dav ns + NsDav = "DAV:" + // NsOwncloud is the owncloud ns + NsOwncloud = "http://owncloud.org/ns" + // NsOCS is the OCS ns + NsOCS = "http://open-collaboration-services.org/ns" + + // RFC1123 time that mimics oc10. time.RFC1123 would end in "UTC", see https://github.com/golang/go/issues/13781 + RFC1123 = "Mon, 02 Jan 2006 15:04:05 GMT" + + // PropQuotaUnknown is the quota unknown property + PropQuotaUnknown = "-2" + // PropOcFavorite is the favorite ns property + PropOcFavorite = "http://owncloud.org/ns/favorite" + // PropOcMetaPathForUser is the meta-path-for-user ns property + PropOcMetaPathForUser = "http://owncloud.org/ns/meta-path-for-user" + + // DepthZero represents the webdav zero depth value + DepthZero Depth = "0" + // DepthOne represents the webdav one depth value + DepthOne Depth = "1" + // DepthInfinity represents the webdav infinity depth value + DepthInfinity Depth = "infinity" +) + +// Depth is a type representing the webdav depth header value +type Depth string + +// String returns the string representation of the webdav depth value +func (d Depth) String() string { + return string(d) +} + +// EncodePath encodes the path of a url. +// +// slashes (/) are treated as path-separators. +func EncodePath(path string) string { + return (&url.URL{Path: path}).EscapedPath() +} + +// ParseDepth parses the depth header value defined in https://tools.ietf.org/html/rfc4918#section-9.1 +// Valid values are "0", "1" and "infinity". An empty string will be parsed to "1". +// For all other values this method returns an error. +func ParseDepth(s string) (Depth, error) { + if s == "" { + return DepthOne, nil + } + + switch strings.ToLower(s) { + case DepthZero.String(): + return DepthZero, nil + case DepthOne.String(): + return DepthOne, nil + case DepthInfinity.String(): + return DepthInfinity, nil + default: + return "", errors.Wrapf(ErrInvalidHeaderValue, "invalid depth: %s", s) + } +} + +// ParseOverwrite parses the overwrite header value defined in https://datatracker.ietf.org/doc/html/rfc4918#section-10.6 +// Valid values are "T" and "F". An empty string will be parse to true. +func ParseOverwrite(s string) (bool, error) { + if s == "" { + s = "T" + } + if s != "T" && s != "F" { + return false, errors.Wrapf(ErrInvalidHeaderValue, "invalid overwrite: %s", s) + } + return s == "T", nil +} + +// ParseDestination parses the destination header value defined in https://datatracker.ietf.org/doc/html/rfc4918#section-10.3 +// The returned path will be relative to the given baseURI. +func ParseDestination(baseURI, s string) (string, error) { + if s == "" { + return "", errors.Wrap(ErrInvalidHeaderValue, "destination header is empty") + } + dstURL, err := url.ParseRequestURI(s) + if err != nil { + return "", errors.Wrap(ErrInvalidHeaderValue, err.Error()) + } + + // TODO check if path is on same storage, return 502 on problems, see https://tools.ietf.org/html/rfc4918#section-9.9.4 + // TODO make request.php optional in destination header + // Strip the base URI from the destination. The destination might contain redirection prefixes which need to be handled + urlSplit := strings.Split(dstURL.Path, baseURI) + if len(urlSplit) != 2 { + return "", errors.Wrap(ErrInvalidHeaderValue, "destination path does not contain base URI") + } + + return urlSplit[1], nil +} + +// ParsePrefer parses the prefer header value defined in https://datatracker.ietf.org/doc/html/rfc8144 +func ParsePrefer(s string) map[string]string { + parts := strings.Split(s, ",") + m := make(map[string]string, len(parts)) + for _, part := range parts { + kv := strings.SplitN(strings.ToLower(strings.Trim(part, " ")), "=", 2) + if len(kv) == 2 { + m[kv[0]] = kv[1] + } else { + m[kv[0]] = "1" // mark it as set + } + } + return m +} diff --git a/services/webdav/pkg/ocdav/net/net_suite_test.go b/services/webdav/pkg/ocdav/net/net_suite_test.go new file mode 100644 index 0000000000..5760c91b47 --- /dev/null +++ b/services/webdav/pkg/ocdav/net/net_suite_test.go @@ -0,0 +1,31 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package net_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestNet(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Net Suite") +} diff --git a/services/webdav/pkg/ocdav/net/net_test.go b/services/webdav/pkg/ocdav/net/net_test.go new file mode 100644 index 0000000000..bb3218aaed --- /dev/null +++ b/services/webdav/pkg/ocdav/net/net_test.go @@ -0,0 +1,116 @@ +// Copyright 2018-2022 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package net_test + +import ( + "time" + + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gmeasure" +) + +var _ = Describe("Net", func() { + DescribeTable("TestParseDepth", + func(v string, expectSuccess bool, expectedValue net.Depth) { + parsed, err := net.ParseDepth(v) + Expect(err == nil).To(Equal(expectSuccess)) + Expect(parsed).To(Equal(expectedValue)) + }, + Entry("default", "", true, net.DepthOne), + Entry("0", "0", true, net.DepthZero), + Entry("1", "1", true, net.DepthOne), + Entry("infinity", "infinity", true, net.DepthInfinity), + Entry("invalid", "invalid", false, net.Depth(""))) + + Describe("ParseDepth", func() { + It("is reasonably fast", func() { + experiment := NewExperiment("Parsing depth headers") + AddReportEntry(experiment.Name, experiment) + + inputs := []string{"", "0", "1", "infinity", "INFINITY"} + size := len(inputs) + experiment.Sample(func(i int) { + experiment.MeasureDuration("parsing", func() { + _, _ = net.ParseDepth(inputs[i%size]) + }) + }, SamplingConfig{Duration: time.Second}) + + encodingStats := experiment.GetStats("parsing") + medianDuration := encodingStats.DurationFor(StatMedian) + + Expect(medianDuration).To(BeNumerically("<", 3*time.Millisecond)) + }) + }) + + Describe("EncodePath", func() { + It("encodes paths", func() { + Expect(net.EncodePath("foo")).To(Equal("foo")) + Expect(net.EncodePath("/some/path/Folder %^*(#1)")).To(Equal("/some/path/Folder%20%25%5E%2A%28%231%29")) + }) + + /* + The encodePath method as it is implemented currently is terribly inefficient. + As soon as there are a few special characters which need to be escaped the allocation count rises and the time spent too. + Adding more special characters increases the allocations and the time spent can rise up to a few milliseconds. + Granted this is not a lot on it's own but when a user has tens or hundreds of paths which need to be escaped and contain a few special characters + then this method alone will cost a huge amount of time. + */ + It("is reasonably fast", func() { + experiment := NewExperiment("Encoding paths") + AddReportEntry(experiment.Name, experiment) + + experiment.Sample(func(idx int) { + experiment.MeasureDuration("encoding", func() { + _ = net.EncodePath("/some/path/Folder %^*(#1)") + }) + }, SamplingConfig{Duration: time.Second}) + + encodingStats := experiment.GetStats("encoding") + medianDuration := encodingStats.DurationFor(StatMedian) + + Expect(medianDuration).To(BeNumerically("<", 10*time.Millisecond)) + }) + }) + + DescribeTable("TestParseOverwrite", + func(v string, expectSuccess bool, expectedValue bool) { + parsed, err := net.ParseOverwrite(v) + Expect(err == nil).To(Equal(expectSuccess)) + Expect(parsed).To(Equal(expectedValue)) + }, + Entry("default", "", true, true), + Entry("T", "T", true, true), + Entry("F", "F", true, false), + Entry("invalid", "invalid", false, false)) + + DescribeTable("TestParseDestination", + func(baseURI, v string, expectSuccess bool, expectedValue string) { + parsed, err := net.ParseDestination(baseURI, v) + Expect(err == nil).To(Equal(expectSuccess)) + Expect(parsed).To(Equal(expectedValue)) + }, + Entry("invalid1", "", "", false, ""), + Entry("invalid2", "baseURI", "", false, ""), + Entry("invalid3", "", "/dest/path", false, ""), + Entry("invalid4", "/foo", "/dest/path", false, ""), + Entry("valid", "/foo", "https://example.com/foo/dest/path", true, "/dest/path")) +}) diff --git a/services/webdav/pkg/ocdav/ocdav.go b/services/webdav/pkg/ocdav/ocdav.go new file mode 100644 index 0000000000..e6b2942131 --- /dev/null +++ b/services/webdav/pkg/ocdav/ocdav.go @@ -0,0 +1,436 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "fmt" + "io" + "net/http" + "path" + "strings" + "time" + + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/jellydator/ttlcache/v2" + "github.com/mitchellh/mapstructure" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" + "github.com/opencloud-eu/reva/v2/pkg/errtypes" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" + "github.com/opencloud-eu/reva/v2/pkg/rhttp" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/global" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" + "github.com/opencloud-eu/reva/v2/pkg/signedurl" + "github.com/opencloud-eu/reva/v2/pkg/storage/favorite" + "github.com/opencloud-eu/reva/v2/pkg/storage/favorite/registry" + "github.com/opencloud-eu/reva/v2/pkg/storage/utils/templates" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// name is the Tracer name used to identify this instrumentation library. +const tracerName = "ocdav" + +func init() { + global.Register("ocdav", New) +} + +const ( + // TokenTransportHeader holds the header key for the reva transfer token + TokenTransportHeader = "X-Reva-Transfer" +) + +type svc struct { + c *config.Config + webDavHandler *WebDavHandler + davHandler *DavHandler + favoritesManager favorite.Manager + client *http.Client + gatewaySelector pool.Selectable[gateway.GatewayAPIClient] + // LockSystem is the lock management system. + LockSystem LockSystem + userIdentifierCache *ttlcache.Cache + nameValidators []Validator + urlSigner signedurl.Signer + log zerolog.Logger +} + +func (s *svc) Config() *config.Config { + return s.c +} + +func getFavoritesManager(c *config.Config) (favorite.Manager, error) { + if f, ok := registry.NewFuncs[c.FavoriteStorageDriver]; ok { + return f(c.FavoriteStorageDrivers[c.FavoriteStorageDriver]) + } + return nil, errtypes.NotFound("driver not found: " + c.FavoriteStorageDriver) +} +func getLockSystem(c *config.Config) (LockSystem, error) { + // TODO in memory implementation + selector, err := pool.GatewaySelector(c.GatewaySvc) + if err != nil { + return nil, err + } + return NewCS3LS(selector), nil +} + +// New returns a new ocdav service +func New(m map[string]interface{}, log *zerolog.Logger) (global.Service, error) { + conf := &config.Config{} + if err := mapstructure.Decode(m, conf); err != nil { + return nil, err + } + + conf.Init() + + fm, err := getFavoritesManager(conf) + if err != nil { + return nil, err + } + ls, err := getLockSystem(conf) + if err != nil { + return nil, err + } + + return NewWith(conf, fm, ls, log, nil) +} + +// NewWith returns a new ocdav service +func NewWith(conf *config.Config, fm favorite.Manager, ls LockSystem, log *zerolog.Logger, selector pool.Selectable[gateway.GatewayAPIClient]) (global.Service, error) { + // be safe - init the conf again + conf.Init() + + var signer signedurl.Signer + if conf.URLSigningSharedSecret != "" { + var err error + signer, err = signedurl.NewJWTSignedURL(signedurl.WithSecret(conf.URLSigningSharedSecret)) + if err != nil { + return nil, fmt.Errorf("failed to initialize URL signer: %w", err) + } + } + + s := &svc{ + c: conf, + webDavHandler: new(WebDavHandler), + davHandler: new(DavHandler), + client: rhttp.GetHTTPClient( + rhttp.Timeout(time.Duration(conf.Timeout*int64(time.Second))), + rhttp.Insecure(conf.Insecure), + ), + gatewaySelector: selector, + favoritesManager: fm, + LockSystem: ls, + userIdentifierCache: ttlcache.NewCache(), + nameValidators: ValidatorsFromConfig(conf), + urlSigner: signer, + log: *log, + } + _ = s.userIdentifierCache.SetTTL(60 * time.Second) + + // initialize handlers and set default configs + if err := s.webDavHandler.Init(conf.WebdavNamespace, true); err != nil { + return nil, err + } + if err := s.davHandler.Init(conf); err != nil { + return nil, err + } + if selector == nil { + var err error + s.gatewaySelector, err = pool.GatewaySelector(s.c.GatewaySvc) + if err != nil { + return nil, err + } + } + return s, nil +} + +func (s *svc) Prefix() string { + return s.c.Prefix +} + +func (s *svc) Close() error { + return nil +} + +func (s *svc) Unprotected() []string { + return []string{"/status.php", "/status", "/remote.php/dav/public-files/", "/apps/files/", "/index.php/f/", "/index.php/s/", "/remote.php/dav/ocm/", "/dav/ocm/"} +} + +func (s *svc) Handler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + log := appctx.GetLogger(ctx) + + // TODO(jfd): do we need this? + // fake litmus testing for empty namespace: see https://github.com/golang/net/blob/e514e69ffb8bc3c76a71ae40de0118d794855992/webdav/litmus_test_server.go#L58-L89 + if r.Header.Get(net.HeaderLitmus) == "props: 3 (propfind_invalid2)" { + http.Error(w, "400 Bad Request", http.StatusBadRequest) + return + } + + // to build correct href prop urls we need to keep track of the base path + // always starts with / + base := path.Join("/", s.Prefix()) + + var head string + head, r.URL.Path = router.ShiftPath(r.URL.Path) + log.Debug().Str("method", r.Method).Str("head", head).Str("tail", r.URL.Path).Msg("http routing") + switch head { + case "status.php", "status": + s.doStatus(w, r) + return + case "remote.php": + // skip optional "remote.php" + head, r.URL.Path = router.ShiftPath(r.URL.Path) + + // yet, add it to baseURI + base = path.Join(base, "remote.php") + case "apps": + head, r.URL.Path = router.ShiftPath(r.URL.Path) + if head == "files" { + s.handleLegacyPath(w, r) + return + } + case "index.php": + head, r.URL.Path = router.ShiftPath(r.URL.Path) + if head == "s" { + token := r.URL.Path + rURL := s.c.PublicURL + path.Join(head, token) + + http.Redirect(w, r, rURL, http.StatusMovedPermanently) + return + } + } + switch head { + // the old `/webdav` endpoint uses remote.php/webdav/$path + case "webdav": + // for oc we need to prepend /home as the path that will be passed to the home storage provider + // will not contain the username + base = path.Join(base, "webdav") + ctx := context.WithValue(ctx, net.CtxKeyBaseURI, base) + r = r.WithContext(ctx) + s.webDavHandler.Handler(s).ServeHTTP(w, r) + return + case "dav": + // cern uses /dav/files/$namespace -> /$namespace/... + // oc uses /dav/files/$user -> /$home/$user/... + // for oc we need to prepend the path to user homes + // or we take the path starting at /dav and allow rewriting it? + base = path.Join(base, "dav") + ctx := context.WithValue(ctx, net.CtxKeyBaseURI, base) + r = r.WithContext(ctx) + s.davHandler.Handler(s).ServeHTTP(w, r) + return + } + log.Warn().Msg("resource not found") + w.WriteHeader(http.StatusNotFound) + }) +} + +func (s *svc) ApplyLayout(ctx context.Context, ns string, useLoggedInUserNS bool, requestPath string) (string, string, error) { + // If useLoggedInUserNS is false, that implies that the request is coming from + // the FilesHandler method invoked by a /dav/files/fileOwner where fileOwner + // is not the same as the logged in user. In that case, we'll treat fileOwner + // as the username whose files are to be accessed and use that in the + // namespace template. + u, ok := ctxpkg.ContextGetUser(ctx) + if !ok || !useLoggedInUserNS { + var requestUsernameOrID string + requestUsernameOrID, requestPath = router.ShiftPath(requestPath) + + // Check if this is a Userid + client, err := s.gatewaySelector.Next() + if err != nil { + return "", "", err + } + + userRes, err := client.GetUser(ctx, &userpb.GetUserRequest{ + UserId: &userpb.UserId{OpaqueId: requestUsernameOrID}, + }) + if err != nil { + return "", "", err + } + + // If it's not a userid try if it is a user name + if userRes.Status.Code != rpc.Code_CODE_OK { + res, err := client.GetUserByClaim(ctx, &userpb.GetUserByClaimRequest{ + Claim: "username", + Value: requestUsernameOrID, + }) + if err != nil { + return "", "", err + } + userRes.Status = res.Status + userRes.User = res.User + } + + // If still didn't find a user, fallback + if userRes.Status.Code != rpc.Code_CODE_OK { + userRes.User = &userpb.User{ + Username: requestUsernameOrID, + Id: &userpb.UserId{OpaqueId: requestUsernameOrID}, + } + } + + u = userRes.User + } + + return templates.WithUser(u, ns), requestPath, nil +} + +func authContextForUser(client gateway.GatewayAPIClient, userID *userpb.UserId, machineAuthAPIKey string) (context.Context, error) { + if machineAuthAPIKey == "" { + return nil, errtypes.NotSupported("machine auth not configured") + } + // Get auth + granteeCtx := ctxpkg.ContextSetUser(context.Background(), &userpb.User{Id: userID}) + + authRes, err := client.Authenticate(granteeCtx, &gateway.AuthenticateRequest{ + Type: "machine", + ClientId: "userid:" + userID.OpaqueId, + ClientSecret: machineAuthAPIKey, + }) + if err != nil { + return nil, err + } + if authRes.GetStatus().GetCode() != rpc.Code_CODE_OK { + return nil, errtypes.NewErrtypeFromStatus(authRes.Status) + } + granteeCtx = metadata.AppendToOutgoingContext(granteeCtx, ctxpkg.TokenHeader, authRes.Token) + return granteeCtx, nil +} + +func (s *svc) sspReferenceIsChildOf(ctx context.Context, selector pool.Selectable[gateway.GatewayAPIClient], child, parent *provider.Reference) (bool, error) { + client, err := selector.Next() + if err != nil { + return false, err + } + parentStatRes, err := client.Stat(ctx, &provider.StatRequest{Ref: parent}) + if err != nil { + return false, err + } + if parentStatRes.GetStatus().GetCode() != rpc.Code_CODE_OK { + return false, errtypes.NewErrtypeFromStatus(parentStatRes.GetStatus()) + } + parentAuthCtx, err := authContextForUser(client, parentStatRes.GetInfo().GetOwner(), s.c.MachineAuthAPIKey) + if err != nil { + return false, err + } + parentPathRes, err := client.GetPath(parentAuthCtx, &provider.GetPathRequest{ResourceId: parentStatRes.GetInfo().GetId()}) + if err != nil { + return false, err + } + + childStatRes, err := client.Stat(ctx, &provider.StatRequest{Ref: child}) + if err != nil { + return false, err + } + if childStatRes.GetStatus().GetCode() == rpc.Code_CODE_NOT_FOUND && utils.IsRelativeReference(child) && child.Path != "." { + childParentRef := &provider.Reference{ + ResourceId: child.ResourceId, + Path: utils.MakeRelativePath(path.Dir(child.Path)), + } + childStatRes, err = client.Stat(ctx, &provider.StatRequest{Ref: childParentRef}) + if err != nil { + return false, err + } + } + if childStatRes.GetStatus().GetCode() != rpc.Code_CODE_OK { + return false, errtypes.NewErrtypeFromStatus(parentStatRes.Status) + } + // TODO: this should use service accounts https://github.com/owncloud/ocis/issues/7597 + childAuthCtx, err := authContextForUser(client, childStatRes.GetInfo().GetOwner(), s.c.MachineAuthAPIKey) + if err != nil { + return false, err + } + childPathRes, err := client.GetPath(childAuthCtx, &provider.GetPathRequest{ResourceId: childStatRes.GetInfo().GetId()}) + if err != nil { + return false, err + } + + cp := childPathRes.Path + "/" + pp := parentPathRes.Path + "/" + return strings.HasPrefix(cp, pp), nil +} + +func (s *svc) referenceIsChildOf(ctx context.Context, selector pool.Selectable[gateway.GatewayAPIClient], child, parent *provider.Reference) (bool, error) { + if child.ResourceId.SpaceId != parent.ResourceId.SpaceId { + return false, nil // Not on the same storage -> not a child + } + + if utils.ResourceIDEqual(child.ResourceId, parent.ResourceId) { + return strings.HasPrefix(child.Path, parent.Path+"/"), nil // Relative to the same resource -> compare paths + } + + if child.ResourceId.SpaceId == utils.ShareStorageSpaceID || parent.ResourceId.SpaceId == utils.ShareStorageSpaceID { + // the sharesstorageprovider needs some special handling + return s.sspReferenceIsChildOf(ctx, selector, child, parent) + } + + client, err := selector.Next() + if err != nil { + return false, err + } + + // the references are on the same storage but relative to different resources + // -> we need to get the path for both resources + childPathRes, err := client.GetPath(ctx, &provider.GetPathRequest{ResourceId: child.ResourceId}) + if err != nil { + if st, ok := status.FromError(err); ok && st.Code() == codes.Unimplemented { + return false, nil // the storage provider doesn't support GetPath() -> rely on it taking care of recursion issues + } + return false, err + } + parentPathRes, err := client.GetPath(ctx, &provider.GetPathRequest{ResourceId: parent.ResourceId}) + if err != nil { + return false, err + } + + cp := path.Join(childPathRes.Path, child.Path) + "/" + pp := path.Join(parentPathRes.Path, parent.Path) + "/" + return strings.HasPrefix(cp, pp), nil +} + +// filename returns the base filename from a path and replaces any slashes with an empty string +func filename(p string) string { + return strings.Trim(path.Base(p), "/") +} + +// isBodyEmpty returns true when the Body of the request is Empty +func isBodyEmpty(r *http.Request) bool { + if r.Body != nil && r.Body != http.NoBody { + buf := make([]byte, 0) + _, err := r.Body.Read(buf) + if err != io.EOF { + // We currently do not support extended mkcol https://datatracker.ietf.org/doc/rfc5689/ + // TODO let clients send a body with properties to set on the new resource + return false + } + } + return true +} diff --git a/services/webdav/pkg/ocdav/ocdav_blackbox_test.go b/services/webdav/pkg/ocdav/ocdav_blackbox_test.go new file mode 100644 index 0000000000..f356de14d1 --- /dev/null +++ b/services/webdav/pkg/ocdav/ocdav_blackbox_test.go @@ -0,0 +1,1831 @@ +// Copyright 2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +package ocdav_test + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "path" + "strings" + + cs3gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + cs3user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + cs3rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1" + cs3storageprovider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + cs3types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/global" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "google.golang.org/grpc" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type selector struct { + client gateway.GatewayAPIClient +} + +func (s selector) Next(opts ...pool.Option) (gateway.GatewayAPIClient, error) { + return s.client, nil +} + +// TODO for now we have to test all of ocdav. when this testsuite is complete we can move +// the handlers to dedicated packages to reduce the amount of complexity to get a test environment up +var _ = Describe("ocdav", func() { + var ( + handler global.Service + client *mocks.GatewayAPIClient + ctx context.Context + + userspace *cs3storageprovider.StorageSpace + user *cs3user.User + + dataSvr *httptest.Server + rr *httptest.ResponseRecorder + req *http.Request + err error + + basePath string + + // mockPathStat is used to by path based endpoints + mockPathStat = func(path string, s *cs3rpc.Status, info *cs3storageprovider.ResourceInfo) { + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.StatRequest) bool { + return req.Ref.Path == path + })).Return(&cs3storageprovider.StatResponse{ + Status: s, + Info: info, + }, nil) + } + mockStat = func(ref *cs3storageprovider.Reference, s *cs3rpc.Status, info *cs3storageprovider.ResourceInfo) { + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.StatRequest) bool { + return utils.ResourceIDEqual(req.Ref.ResourceId, ref.ResourceId) && + (ref.Path == "" || req.Ref.Path == ref.Path) + })).Return(&cs3storageprovider.StatResponse{ + Status: s, + Info: info, + }, nil) + } + mockStatOK = func(ref *cs3storageprovider.Reference, info *cs3storageprovider.ResourceInfo) { + mockStat(ref, status.NewOK(ctx), info) + } + // two mock helpers to build references and resource infos in the userspace of provider-1 + mockReference = func(id, path string) *cs3storageprovider.Reference { + return &cs3storageprovider.Reference{ + ResourceId: &cs3storageprovider.ResourceId{ + StorageId: "provider-1", + SpaceId: "userspace", + OpaqueId: id, + }, + Path: path, + } + } + mockInfo = func(m map[string]interface{}) *cs3storageprovider.ResourceInfo { + + if _, ok := m["storageid"]; !ok { + m["storageid"] = "provider-1" + } + if _, ok := m["spaceid"]; !ok { + m["spaceid"] = "userspace" + } + if _, ok := m["opaqueid"]; !ok { + m["opaqueid"] = "root" + } + if _, ok := m["type"]; !ok { + m["type"] = cs3storageprovider.ResourceType_RESOURCE_TYPE_CONTAINER + } + if _, ok := m["size"]; !ok { + m["size"] = uint64(0) + } + + return &cs3storageprovider.ResourceInfo{ + Id: &cs3storageprovider.ResourceId{ + StorageId: m["storageid"].(string), + SpaceId: m["spaceid"].(string), + OpaqueId: m["opaqueid"].(string), + }, + Type: m["type"].(cs3storageprovider.ResourceType), + Size: m["size"].(uint64), + } + } + mReq *cs3storageprovider.MoveRequest + ) + + BeforeEach(func() { + user = &cs3user.User{Id: &cs3user.UserId{OpaqueId: "username"}, Username: "username"} + + dataSvr = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusCreated) + })) + + ctx = ctxpkg.ContextSetUser(context.Background(), user) + client = &mocks.GatewayAPIClient{} + + cfg := &config.Config{ + FilesNamespace: "/users/{{.Username}}", + WebdavNamespace: "/users/{{.Username}}", + NameValidation: config.NameValidation{ + MaxLength: 255, + InvalidChars: []string{"\f", "\r", "\n", "\\"}, + }, + URLSigningSharedSecret: "testsecret", + } + sel := selector{ + client: client, + } + handler, err = ocdav.NewWith(cfg, nil, ocdav.NewCS3LS(sel), &zerolog.Logger{}, sel) + Expect(err).ToNot(HaveOccurred()) + + userspace = &cs3storageprovider.StorageSpace{ + Opaque: &cs3types.Opaque{ + Map: map[string]*cs3types.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/users/username/"), + }, + }, + }, + Id: &cs3storageprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&cs3storageprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"})}, + Root: &cs3storageprovider.ResourceId{StorageId: "provider-1", SpaceId: "userspace", OpaqueId: "root"}, + Name: "username", + RootInfo: &cs3storageprovider.ResourceInfo{ + Name: "username", + Path: "/users/username", + }, + } + + client.On("GetPublicShare", mock.Anything, mock.Anything).Return(&link.GetPublicShareResponse{ + Status: status.NewNotFound(ctx, "not found")}, + nil) + client.On("GetUser", mock.Anything, mock.Anything).Return(&cs3user.GetUserResponse{ + Status: status.NewNotFound(ctx, "not found"), + }, nil) + client.On("GetUserByClaim", mock.Anything, mock.Anything).Return(&cs3user.GetUserByClaimResponse{ + Status: status.NewNotFound(ctx, "not found"), + }, nil) + + // for public access + client.On("Authenticate", mock.Anything, mock.MatchedBy(func(req *cs3gateway.AuthenticateRequest) bool { + return req.Type == "publicshares" && + strings.HasPrefix(req.ClientId, "tokenfor") && + strings.HasPrefix(req.ClientSecret, "signature||") + })).Return(&cs3gateway.AuthenticateResponse{ + Status: status.NewOK(ctx), + User: user, + Token: "jwt", + }, nil) + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.StatRequest) bool { + return req.Ref.ResourceId.StorageId == utils.PublicStorageProviderID && + req.Ref.ResourceId.SpaceId == utils.PublicStorageSpaceID && + req.Ref.ResourceId.OpaqueId == "tokenforfile" + })).Return(&cs3storageprovider.StatResponse{ + Status: status.NewOK(ctx), + Info: &cs3storageprovider.ResourceInfo{ + Type: cs3storageprovider.ResourceType_RESOURCE_TYPE_FILE, + }, + }, nil) + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.StatRequest) bool { + return req.Ref.ResourceId.StorageId == utils.PublicStorageProviderID && + req.Ref.ResourceId.SpaceId == utils.PublicStorageSpaceID && + req.Ref.ResourceId.OpaqueId == "tokenforfolder" + })).Return(&cs3storageprovider.StatResponse{ + Status: status.NewOK(ctx), + Info: &cs3storageprovider.ResourceInfo{ + Type: cs3storageprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + }, + }, nil) + }) + AfterEach(func() { + dataSvr.Close() + }) + + Describe("NewHandler", func() { + It("returns a handler", func() { + Expect(handler).ToNot(BeNil()) + }) + }) + + // TODO for every endpoint test the different WebDAV Methods + + // basic metadata + // PROPFIND + // MKCOL + // DELETE + + // basic data + // PUT + // GET + // HEAD + + // move & copy + // MOVE + // COPY + + // additional methods + // PROPPATCH + // LOCK + // UNLOCK + // REPORT + // POST (Tus) + // OPTIONS? + + Context("at the very legacy /webdav endpoint", func() { + + BeforeEach(func() { + // set the webdav endpoint to test + basePath = "/webdav" + + // path based requests at the /webdav endpoint first look up the storage space + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, "/users") + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, + }, nil) + }) + + Describe("PROPFIND to root", func() { + + BeforeEach(func() { + // setup the request + rr = httptest.NewRecorder() + req, err = http.NewRequest("PROPFIND", basePath, strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + }) + When("the gateway returns a file list", func() { + It("returns a multistatus with the file info", func() { + + // the ocdav handler uses the space.rootinfo so we don't need to mock stat here + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusMultiStatus)) + Expect(rr).To(HaveHTTPBody(Not(BeEmpty())), "Body must not be empty") + // TODO test listing more thoroughly + }) + + }) + // TODO test when list storage space returns not found + // TODO test when list storage space dos not have a root info + + }) + Describe("PROPFIND to a file", func() { + + BeforeEach(func() { + // set the webdav endpoint to test + basePath = "/webdav/file" + + // setup the request + rr = httptest.NewRecorder() + req, err = http.NewRequest("PROPFIND", basePath, strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + }) + + When("the gateway returns the file info", func() { + It("returns a multistatus with the file properties", func() { + + mockStatOK(mockReference("root", "./file"), mockInfo(map[string]interface{}{"opaqueid": "file", "type": cs3storageprovider.ResourceType_RESOURCE_TYPE_FILE, "size": uint64(123)})) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusMultiStatus)) + Expect(rr).To(HaveHTTPBody( + And( + ContainSubstring("%s", basePath), + ContainSubstring("123"))), + "Body must contain resource href and properties") + // TODO test properties more thoroughly + }) + + }) + + When("the gateway returns not found", func() { + It("returns a not found status", func() { + + mockStat(mockReference("root", "./file"), status.NewNotFound(ctx, "not found"), nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusNotFound)) + Expect(rr).To(HaveHTTPBody( + And( + ContainSubstring("Sabre\\DAV\\Exception\\NotFound"), + ContainSubstring("Resource not found"))), + "Body must contain sabredav exception and message") + }) + }) + }) + + Describe("MKCOL", func() { + + BeforeEach(func() { + // setup the request + rr = httptest.NewRecorder() + req, err = http.NewRequest("MKCOL", basePath+"/subfolder/newfolder", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + }) + + When("the gateway returns OK", func() { + It("returns a created status", func() { + + // MKCOL needs to check if the resource already exists to return the correct status + mockPathStat("/users/username/subfolder/newfolder", status.NewNotFound(ctx, "not found"), nil) + + client.On("CreateContainer", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.CreateContainerRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./subfolder/newfolder", + }) + })).Return(&cs3storageprovider.CreateContainerResponse{ + Status: status.NewOK(ctx), + }, nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusCreated)) + Expect(rr).To(HaveHTTPBody(BeEmpty()), "Body must be empty") + // TODO expect fileid and etag header? + }) + + }) + + When("the gateway aborts the stat", func() { + // eg when an if match etag header was sent and mismatches + // TODO send lock id + It("returns a precondition failed status", func() { + + // MKCOL needs to check if the resource already exists to return the correct status + // TODO check the etag is forwarded to make the request conditional + // TODO should be part of the CS3 api? + mockPathStat("/users/username/subfolder/newfolder", status.NewAborted(ctx, errors.New("etag mismatch"), "etag mismatch"), nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusPreconditionFailed)) + + Expect(rr).To(HaveHTTPBody( + ContainSubstring("Sabre\\DAV\\Exception\\PreconditionFailed"), + // TODO what message does oc10 return? "error: aborted:" is probably not it + // ContainSubstring("error: aborted: "), + ), + "Body must contain sabredav exception and message") + + }) + }) + + When("the resource already exists", func() { + It("returns a method not allowed status", func() { + + // MKCOL needs to check if the resource already exists to return the correct status + mockPathStat("/users/username/subfolder/newfolder", status.NewOK(ctx), &cs3storageprovider.ResourceInfo{}) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusMethodNotAllowed)) + + Expect(rr).To(HaveHTTPBody( + And( + ContainSubstring("Sabre\\DAV\\Exception\\MethodNotAllowed"), + ContainSubstring("The resource you tried to create already exists"))), + "Body must contain sabredav exception and message") + + }) + }) + + When("an intermediate collection does not exists", func() { + It("returns a conflict status", func() { + + // MKCOL needs to check if the resource already exists to return the correct status + mockPathStat("/users/username/subfolder/newfolder", status.NewNotFound(ctx, "not found"), nil) + + client.On("CreateContainer", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.CreateContainerRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./subfolder/newfolder", + }) + })).Return(&cs3storageprovider.CreateContainerResponse{ + Status: status.NewFailedPrecondition(ctx, errors.New("parent does not exist"), "parent does not exist"), + }, nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusConflict)) + + Expect(rr).To(HaveHTTPBody( + And( + ContainSubstring("Sabre\\DAV\\Exception\\Conflict"), + ContainSubstring("parent does not exist"))), + "Body must contain sabredav exception and message") + + }) + }) + }) + + Describe("DELETE", func() { + + BeforeEach(func() { + // setup the request + rr = httptest.NewRecorder() + req, err = http.NewRequest("DELETE", basePath+"/existingfolder", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + }) + + When("the gateway returns OK", func() { + It("returns a no content status", func() { + + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./existingfolder", + }) + })).Return(&cs3storageprovider.DeleteResponse{ + Status: status.NewOK(ctx), + }, nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusNoContent)) + Expect(rr).To(HaveHTTPBody(BeEmpty()), "Body must be empty") + // TODO expect fileid and etag header? + }) + + }) + + When("the gateway returns not found", func() { + It("returns a method not found status", func() { + + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./existingfolder", + }) + })).Return(&cs3storageprovider.DeleteResponse{ + Status: status.NewNotFound(ctx, "not found"), + }, nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusNotFound)) + Expect(rr).To(HaveHTTPBody("\nSabre\\DAV\\Exception\\NotFoundResource not found"), "Body must have a not found sabredav exception") + + }) + }) + }) + + Describe("PUT", func() { + + BeforeEach(func() { + // setup the request + rr = httptest.NewRecorder() + req, err = http.NewRequest("PUT", basePath+"/newfile", strings.NewReader("new content")) + Expect(err).ToNot(HaveOccurred()) + req.Header.Set(net.HeaderContentLength, "11") + req = req.WithContext(ctx) + + }) + + When("the gateway returns OK", func() { + It("returns a created status", func() { + + client.On("InitiateFileUpload", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.InitiateFileUploadRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./newfile", + }) + })).Return(&cs3gateway.InitiateFileUploadResponse{ + Status: status.NewOK(ctx), + Protocols: []*cs3gateway.FileUploadProtocol{ + { + Protocol: "simple", + UploadEndpoint: dataSvr.URL, + }, + }, + }, nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusCreated)) + Expect(rr).To(HaveHTTPBody(BeEmpty()), "Body must be empty") + // TODO expect fileid and etag header? + }) + }) + + When("the gateway returns aborted", func() { + It("returns a precondition failed status", func() { + + client.On("InitiateFileUpload", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.InitiateFileUploadRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./newfile", + }) + })).Return(&cs3gateway.InitiateFileUploadResponse{ + Status: status.NewAborted(ctx, errors.New("parent does not exist"), "parent does not exist"), + }, nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusPreconditionFailed)) + // TODO Expect(rr).To(HaveHTTPBody(BeEmpty()), "Body must be a sabredav exception") + }) + }) + + When("the resource already exists", func() { + It("returns a conflict status", func() { + + client.On("InitiateFileUpload", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.InitiateFileUploadRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./newfile", + }) + })).Return(&cs3gateway.InitiateFileUploadResponse{ + Status: status.NewFailedPrecondition(ctx, errors.New("precondition failed"), "precondition failed"), + }, nil) + + client.On("Stat", mock.Anything, mock.Anything).Return(&cs3storageprovider.StatResponse{ + Status: status.NewOK(ctx), + Info: &cs3storageprovider.ResourceInfo{ + Type: cs3storageprovider.ResourceType_RESOURCE_TYPE_FILE, + }, + }, nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusConflict)) + // TODO Expect(rr).To(HaveHTTPBody(BeEmpty()), "Body must be a sabredav exception") + }) + }) + + When("the gateway returns not found", func() { + It("returns a not found", func() { + + client.On("InitiateFileUpload", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.InitiateFileUploadRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./newfile", + }) + })).Return(&cs3gateway.InitiateFileUploadResponse{ + Status: status.NewNotFound(ctx, "not found"), + }, nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusNotFound)) + // TODO Expect(rr).To(HaveHTTPBody(BeEmpty()), "Body must be a sabredav exception") + }) + }) + + }) + + Describe("MOVE", func() { + + BeforeEach(func() { + // setup the request + rr = httptest.NewRecorder() + req, err = http.NewRequest("MOVE", basePath+"/file", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + req.Header.Set(net.HeaderDestination, basePath+"/newfile") + req.Header.Set("Overwrite", "T") + + mReq = &cs3storageprovider.MoveRequest{ + Source: &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./file", + }, + Destination: &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./newfile", + }, + } + }) + + When("the gateway returns OK when moving file", func() { + It("the source exists, the destination doesn't exists", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + client.On("Stat", mock.Anything, mock.Anything).Return(&cs3storageprovider.StatResponse{ + Status: status.NewNotFound(ctx, ""), + Info: &cs3storageprovider.ResourceInfo{}, + }, nil).Once() + mockPathStat(".", status.NewOK(ctx), nil) + + client.On("Move", mock.Anything, mReq).Return(&cs3storageprovider.MoveResponse{ + Status: status.NewOK(ctx), + }, nil) + + mockPathStat(mReq.Destination.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: &cs3storageprovider.ResourceId{}}) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusCreated)) + }) + + It("the source and the destination exist", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + mockPathStat(mReq.Destination.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Destination.ResourceId}) + + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, mReq.Destination) + })).Return(&cs3storageprovider.DeleteResponse{ + Status: status.NewOK(ctx), + }, nil) + + client.On("Move", mock.Anything, mReq).Return(&cs3storageprovider.MoveResponse{ + Status: status.NewOK(ctx), + }, nil) + + mockPathStat(mReq.Destination.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: &cs3storageprovider.ResourceId{}}) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusNoContent)) + }) + }) + + When("the gateway returns error when moving file", func() { + It("the source Stat error", func() { + + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.StatRequest) bool { + return utils.ResourceEqual(req.Ref, mReq.Source) + })).Return(nil, fmt.Errorf("unexpected io error")) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusInternalServerError)) + }) + + It("moves a file. the source not found", func() { + + mockPathStat(mReq.Source.Path, status.NewNotFound(ctx, ""), nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusNotFound)) + }) + + It("the destination Stat error", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.StatRequest) bool { + return utils.ResourceEqual(req.Ref, mReq.Destination) + })).Return(nil, fmt.Errorf("unexpected io error")) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusInternalServerError)) + }) + + It("error when the 'Overwrite' header is 'F'", func() { + + req.Header.Set("Overwrite", "F") + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), nil) + mockPathStat(mReq.Destination.Path, status.NewOK(ctx), nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusBadRequest)) + }) + + It("error when deleting an existing tree", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId, Path: "./file"}) + mockPathStat(mReq.Destination.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Destination.ResourceId, Path: "./newfile"}) + + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, mReq.Destination) + })).Return(nil, fmt.Errorf("unexpected io error")) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusInternalServerError)) + }) + + It("error when destination Stat returns unexpected code", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + mockPathStat(mReq.Destination.Path, status.NewInternal(ctx, ""), nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusInternalServerError)) + }) + + It("error when Delete returns unexpected code", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId, Path: "./file"}) + mockPathStat(mReq.Destination.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Destination.ResourceId, Path: "./newfile"}) + + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, mReq.Destination) + })).Return(&cs3storageprovider.DeleteResponse{ + Status: status.NewInvalid(ctx, ""), + }, nil) + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusBadRequest)) + }) + + It("the destination Stat error", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + mockPathStat(mReq.Destination.Path, status.NewNotFound(ctx, ""), nil) + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.StatRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: ".", + }) + })).Return(nil, fmt.Errorf("unexpected io error")).Once() + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusInternalServerError)) + }) + + It("error when destination Stat is not found", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + mockPathStat(mReq.Destination.Path, status.NewNotFound(ctx, ""), nil) + mockPathStat(".", status.NewNotFound(ctx, ""), nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusConflict)) + }) + + It("an unexpected error when destination Stat", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + mockPathStat(mReq.Destination.Path, status.NewNotFound(ctx, ""), nil) + mockPathStat(".", status.NewInvalid(ctx, ""), nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusBadRequest)) + }) + + It("error when removing", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + mockPathStat(mReq.Destination.Path, status.NewNotFound(ctx, ""), nil) + mockPathStat(".", status.NewOK(ctx), nil) + client.On("Move", mock.Anything, mReq).Return(nil, fmt.Errorf("unexpected io error")) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusInternalServerError)) + }) + + It("status 'Aborted' when removing", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), nil) + mockPathStat(mReq.Destination.Path, status.NewNotFound(ctx, ""), nil) + mockPathStat(".", status.NewOK(ctx), nil) + + client.On("Move", mock.Anything, mReq).Return(&cs3storageprovider.MoveResponse{ + Status: status.NewAborted(ctx, fmt.Errorf("aborted"), ""), + }, nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusBadRequest)) + }) + + It("status 'Unimplemented' when removing", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + mockPathStat(mReq.Destination.Path, status.NewNotFound(ctx, ""), nil) + mockPathStat(".", status.NewOK(ctx), nil) + + client.On("Move", mock.Anything, mReq).Return(&cs3storageprovider.MoveResponse{ + Status: status.NewUnimplemented(ctx, fmt.Errorf("unimplemeted"), ""), + }, nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusBadGateway)) + }) + + It("the destination Stat error after moving", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + client.On("Stat", mock.Anything, mock.Anything).Return(&cs3storageprovider.StatResponse{ + Status: status.NewNotFound(ctx, ""), + Info: &cs3storageprovider.ResourceInfo{}, + }, nil).Once() + mockPathStat(".", status.NewOK(ctx), nil) + + client.On("Move", mock.Anything, mReq).Return(&cs3storageprovider.MoveResponse{ + Status: status.NewOK(ctx), + }, nil) + + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.StatRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: mReq.Destination.Path, + }) + })).Return(nil, fmt.Errorf("unexpected io error")) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusInternalServerError)) + }) + + It("the destination Stat returned not OK status after moving", func() { + + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + mockPathStat(mReq.Destination.Path, status.NewNotFound(ctx, ""), nil) + mockPathStat(".", status.NewOK(ctx), nil) + + client.On("Move", mock.Anything, mReq).Return(&cs3storageprovider.MoveResponse{ + Status: status.NewOK(ctx), + }, nil) + + mockPathStat(mReq.Destination.Path, status.NewNotFound(ctx, ""), nil) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusNotFound)) + }) + }) + }) + + Describe("MOVE validation failed", func() { + + BeforeEach(func() { + // setup the request + // set the webdav endpoint to test + basePath = "/webdav" + userspace.Id = &cs3storageprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&cs3storageprovider.ResourceId{StorageId: "provider-1", SpaceId: "userspace", OpaqueId: "userspace"})} + userspace.Root = &cs3storageprovider.ResourceId{StorageId: "provider-1", SpaceId: "userspace", OpaqueId: "userspace"} + + // path based requests at the /webdav endpoint first look up the storage space + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, "/users") + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, + }, nil) + + rr = httptest.NewRecorder() + req, err = http.NewRequest("MOVE", basePath+"/file", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + req.Header.Set(net.HeaderDestination, basePath+"/provider-1$userspace!userspace") + req.Header.Set("Overwrite", "T") + mReq = &cs3storageprovider.MoveRequest{ + Source: mockReference("userspace", "./file"), + Destination: mockReference("userspace", ""), + } + }) + + When("the gateway returns error when moving file", func() { + It("error when the source is a file and the destination is a folder", func() { + mockPathStat(mReq.Source.Path, status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}) + + mockStat(mockReference("userspace", ""), status.NewOK(ctx), &cs3storageprovider.ResourceInfo{ + Id: mReq.Destination.ResourceId, Path: mReq.Destination.Path, + Type: cs3storageprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Space: userspace, + }) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusBadRequest)) + }) + }) + }) + }) + + Context("at the /dav/avatars endpoint", func() { + + BeforeEach(func() { + basePath = "/dav/avatars" + }) + + }) + Context("at the legacy /dav/files endpoint", func() { + + BeforeEach(func() { + basePath = "/dav/files" + }) + + }) + Context("at the /dav/meta endpoint", func() { + + BeforeEach(func() { + basePath = "/dav/meta" + }) + + }) + Context("at the /dav/trash-bin endpoint", func() { + + BeforeEach(func() { + basePath = "/dav/trash-bin" + }) + + }) + Context("at the /dav/spaces endpoint", func() { + + BeforeEach(func() { + basePath = "/dav/spaces" + + userspace.Id = &cs3storageprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&cs3storageprovider.ResourceId{StorageId: "provider-1", SpaceId: "userspace", OpaqueId: "userspace"})} + userspace.Root = &cs3storageprovider.ResourceId{StorageId: "provider-1", SpaceId: "userspace", OpaqueId: "userspace"} + // path based requests at the /webdav endpoint first look up the storage space + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, "/users") + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, + }, nil) + }) + + Describe("MOVE", func() { + // The variables that used in a JustBeforeEach must be defined in the BeforeEach + var reqPath, dstPath, dstFileName string + + JustBeforeEach(func() { + // setup the request + rr = httptest.NewRecorder() + req, err = http.NewRequest("MOVE", basePath+reqPath, strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + req.Header.Set(net.HeaderDestination, basePath+dstPath) + req.Header.Set("Overwrite", "T") + + client.On("GetPath", mock.Anything, mock.Anything).Return(func(ctx context.Context, req *cs3storageprovider.GetPathRequest, _ ...grpc.CallOption) (*cs3storageprovider.GetPathResponse, error) { + switch req.ResourceId.OpaqueId { + case "dstId": + return &cs3storageprovider.GetPathResponse{ + Status: status.NewOK(ctx), + Path: "/dstFileName", + }, nil + default: + return &cs3storageprovider.GetPathResponse{ + Status: status.NewOK(ctx), + Path: "/file", + }, nil + } + }) + + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.StatRequest) bool { + return req.Ref.Path == mReq.Source.Path + })).Return(&cs3storageprovider.StatResponse{ + Status: status.NewOK(ctx), + Info: &cs3storageprovider.ResourceInfo{Id: mReq.Source.ResourceId}, + }, nil).Once() + + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.StatRequest) bool { + return req.Ref.Path == mReq.Destination.Path + })).Return(&cs3storageprovider.StatResponse{ + Status: status.NewOK(ctx), + Info: &cs3storageprovider.ResourceInfo{ + Id: mReq.Source.ResourceId, + ParentId: &cs3storageprovider.ResourceId{StorageId: "provider-1", OpaqueId: "dstId", SpaceId: "userspace"}, + Name: dstFileName, + }, + }, nil) + + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, mReq.Destination) + })).Return(&cs3storageprovider.DeleteResponse{ + Status: status.NewOK(ctx), + }, nil) + + }) + + When("use the id as a destination. the gateway returns OK when moving file", func() { + BeforeEach(func() { + reqPath = "/provider-1$userspace/file" + dstPath = "/provider-1$userspace!dstId" + dstFileName = "dstFileName" + + mReq = &cs3storageprovider.MoveRequest{ + Source: mockReference("userspace", "./file"), + Destination: mockReference("dstId", "."), + } + }) + It("the source and the destination exist", func() { + + expReq := &cs3storageprovider.MoveRequest{ + Source: &cs3storageprovider.Reference{ResourceId: &cs3storageprovider.ResourceId{ + StorageId: "provider-1", SpaceId: "userspace"}, Path: "./file"}, + Destination: &cs3storageprovider.Reference{ResourceId: &cs3storageprovider.ResourceId{ + StorageId: "provider-1", OpaqueId: "dstId", SpaceId: "userspace"}, Path: "./dstFileName"}, + } + + client.On("Move", mock.Anything, expReq).Return(&cs3storageprovider.MoveResponse{ + Status: status.NewOK(ctx), + }, nil) + + mockPathStat("./dstFileName", status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: &cs3storageprovider.ResourceId{}}) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusNoContent)) + }) + }) + When("use the id as a source and destination. the gateway returns OK when moving file", func() { + BeforeEach(func() { + reqPath = "/provider-1$userspace!srcId" + dstPath = "/provider-1$userspace!dstId" + dstFileName = "" + + mReq = &cs3storageprovider.MoveRequest{ + Source: mockReference("srcId", "."), + Destination: mockReference("dstId", "."), + } + }) + It("the source and the destination exist", func() { + + expReq := &cs3storageprovider.MoveRequest{ + Source: &cs3storageprovider.Reference{ResourceId: &cs3storageprovider.ResourceId{ + StorageId: "provider-1", OpaqueId: "srcId", SpaceId: "userspace"}, Path: "."}, + Destination: &cs3storageprovider.Reference{ResourceId: &cs3storageprovider.ResourceId{ + StorageId: "provider-1", OpaqueId: "dstId", SpaceId: "userspace"}, Path: "."}, + } + + client.On("Move", mock.Anything, expReq).Return(&cs3storageprovider.MoveResponse{ + Status: status.NewOK(ctx), + }, nil) + + mockPathStat(".", status.NewOK(ctx), &cs3storageprovider.ResourceInfo{Id: &cs3storageprovider.ResourceId{}}) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusNoContent)) + }) + }) + }) + + }) + Context("at the /dav/public-files endpoint", func() { + + BeforeEach(func() { + basePath = "/dav/public-files" + }) + + }) + + // TODO restructure the tests and split them up by endpoint? + // - that should allow reusing the set up of expected requests to the gateway + + // listing spaces is a precondition for path based requests, what if listing spaces currently is broken? + Context("bad requests", func() { + + It("to the /dav/spaces endpoint root return a method not allowed status ", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("DELETE", "/dav/spaces", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusMethodNotAllowed)) + }) + It("when deleting a space at the /dav/spaces endpoint return method not allowed status", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("DELETE", "/dav/spaces/trytodeleteme", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusMethodNotAllowed)) + Expect(rr).To(HaveHTTPBody("\nSabre\\DAV\\Exception\\MethodNotAlloweddeleting spaces via dav is not allowed"), "Body must have a sabredav exception") + }) + It("with invalid if header return bad request status", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("DELETE", "/dav/spaces/somespace/foo", strings.NewReader("")) + req.Header.Set("If", "invalid") + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusBadRequest)) + }) + + DescribeTable("returns 415 when no body was expected", + func(method string, path string) { + // as per https://www.rfc-editor.org/rfc/rfc4918#section-8.4 + rr := httptest.NewRecorder() + req, err := http.NewRequest(method, path, strings.NewReader("should be empty")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(http.StatusUnsupportedMediaType)) + Expect(rr).To(HaveHTTPBody("\nSabre\\DAV\\Exception\\UnsupportedMediaTypebody must be empty"), "Body must have a sabredav exception") + }, + Entry("MOVE", "MOVE", "/webdav/source"), + Entry("COPY", "COPY", "/webdav/source"), + Entry("DELETE", "DELETE", "/webdav/source"), + PEntry("MKCOL", "MKCOL", "/webdav/source"), + ) + + DescribeTable("check naming rules", + func(method string, path string, expectedStatus int) { + rr := httptest.NewRecorder() + req, err := http.NewRequest(method, "", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req.URL.Path = path // we need to overwrite the path here to send invalid chars + + if method == "COPY" || method == "MOVE" { + req.Header.Set(net.HeaderDestination, path+".bak") + } + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + + Expect(rr).To(HaveHTTPBody(HavePrefix("\nSabre\\DAV\\Exception\\BadRequest")), "Body must have a sabredav exception") + }, + Entry("MKCOL no \\f", "MKCOL", "/webdav/forbidden \f char", http.StatusBadRequest), + Entry("MKCOL no \\r", "MKCOL", "/webdav/forbidden \r char", http.StatusBadRequest), + Entry("MKCOL no \\n", "MKCOL", "/webdav/forbidden \n char", http.StatusBadRequest), + Entry("MKCOL no \\\\", "MKCOL", "/webdav/forbidden \\ char", http.StatusBadRequest), + + // COPY source path + Entry("COPY no \\f", "COPY", "/webdav/forbidden \f char", http.StatusBadRequest), + Entry("COPY no \\r", "COPY", "/webdav/forbidden \r char", http.StatusBadRequest), + Entry("COPY no \\n", "COPY", "/webdav/forbidden \n char", http.StatusBadRequest), + Entry("COPY no \\\\", "COPY", "/webdav/forbidden \\ char", http.StatusBadRequest), + + // MOVE source path + Entry("MOVE no \\f", "MOVE", "/webdav/forbidden \f char", http.StatusBadRequest), + Entry("MOVE no \\r", "MOVE", "/webdav/forbidden \r char", http.StatusBadRequest), + Entry("MOVE no \\n", "MOVE", "/webdav/forbidden \n char", http.StatusBadRequest), + Entry("MOVE no \\\\", "MOVE", "/webdav/forbidden \\ char", http.StatusBadRequest), + ) + + DescribeTable("check naming rules", + func(method string, path string, expectedStatus int) { + rr := httptest.NewRecorder() + req, err := http.NewRequest(method, "/webdav/safe path", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + + req.Header.Set(net.HeaderDestination, path+".bak") + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + + Expect(rr).To(HaveHTTPBody(HavePrefix("\nSabre\\DAV\\Exception\\BadRequest")), "Body must have a sabredav exception") + }, + // COPY + Entry("COPY no \\f", "COPY", "/webdav/forbidden \f char", http.StatusBadRequest), + Entry("COPY no \\r", "COPY", "/webdav/forbidden \r char", http.StatusBadRequest), + Entry("COPY no \\n", "COPY", "/webdav/forbidden \n char", http.StatusBadRequest), + Entry("COPY no \\\\", "COPY", "/webdav/forbidden \\ char", http.StatusBadRequest), + + // MOVE + Entry("MOVE no \\f", "MOVE", "/webdav/forbidden \f char", http.StatusBadRequest), + Entry("MOVE no \\r", "MOVE", "/webdav/forbidden \r char", http.StatusBadRequest), + Entry("MOVE no \\n", "MOVE", "/webdav/forbidden \n char", http.StatusBadRequest), + Entry("MOVE no \\\\", "MOVE", "/webdav/forbidden \\ char", http.StatusBadRequest), + ) + + }) + + // listing spaces is a precondition for path based requests, what if listing spaces currently is broken? + Context("When listing spaces fails with an error", func() { + + DescribeTable("HandleDelete", + func(endpoint string, expectedPathPrefix string, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(nil, fmt.Errorf("unexpected io error")) + + // the spaces endpoint omits the list storage spaces call, it directly executes the delete call + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./foo", + }) + })).Return(&cs3storageprovider.DeleteResponse{ + Status: status.NewOK(ctx), + }, nil) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("DELETE", endpoint+"/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + if expectedStatus == http.StatusInternalServerError { + Expect(rr).To(HaveHTTPBody("\nunexpected io error"), "Body must have a sabredav exception") + } else { + Expect(rr).To(HaveHTTPBody(""), "Body must be empty") + } + + }, + Entry("at the /webdav endpoint", "/webdav", "/users", http.StatusInternalServerError), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", http.StatusInternalServerError), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", http.StatusNoContent), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", http.StatusInternalServerError), + ) + + DescribeTable("HandleMkcol", + func(endpoint string, expectedPathPrefix string, expectedStatPath string, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(nil, fmt.Errorf("unexpected io error")) + + // path based requests need to check if the resource already exists + mockPathStat(expectedStatPath, status.NewNotFound(ctx, "not found"), nil) + + // the spaces endpoint omits the list storage spaces call, it directly executes the create container call + client.On("CreateContainer", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.CreateContainerRequest) bool { + return utils.ResourceEqual(req.Ref, &cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: "./foo", + }) + })).Return(&cs3storageprovider.CreateContainerResponse{ + Status: status.NewOK(ctx), + }, nil) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("MKCOL", endpoint+"/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + if expectedStatus == http.StatusInternalServerError { + Expect(rr).To(HaveHTTPBody("\nunexpected io error"), "Body must have a sabredav exception") + } else { + Expect(rr).To(HaveHTTPBody(""), "Body must be empty") + } + + }, + Entry("at the /webdav endpoint", "/webdav", "/users", "/users/username/foo", http.StatusInternalServerError), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "/users/username/foo", http.StatusInternalServerError), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "/users/username/foo", http.StatusCreated), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "/public/tokenforfolder/foo", http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", "/public/tokenforfolder/foo", http.StatusInternalServerError), + ) + }) + + Context("When calls fail with an error", func() { + + DescribeTable("HandleDelete", + func(endpoint string, expectedPathPrefix string, expectedPath string, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, + }, nil) + + ref := cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: expectedPath, + } + + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, &ref) + })).Return(nil, fmt.Errorf("unexpected io error")) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("DELETE", endpoint+"/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + if expectedStatus == http.StatusInternalServerError { + Expect(rr).To(HaveHTTPBody("\nunexpected io error"), "Body must have a sabredav exception") + } else { + Expect(rr).To(HaveHTTPBody(""), "Body must be empty") + } + + }, + Entry("at the /webdav endpoint", "/webdav", "/users", "./foo", http.StatusInternalServerError), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "./foo", http.StatusInternalServerError), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "./foo", http.StatusInternalServerError), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "", http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", ".", http.StatusInternalServerError), + ) + + DescribeTable("HandleMkcol", + func(endpoint string, expectedPathPrefix string, expectedStatPath string, expectedCreatePath string, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, // FIXME we may need to return the /public storage provider id and mock it + }, nil) + + // path based requests need to check if the resource already exists + mockPathStat(expectedStatPath, status.NewNotFound(ctx, "not found"), nil) + + ref := cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: expectedCreatePath, + } + + client.On("CreateContainer", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.CreateContainerRequest) bool { + return utils.ResourceEqual(req.Ref, &ref) + })).Return(nil, fmt.Errorf("unexpected io error")) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("MKCOL", endpoint+"/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + if expectedStatus == http.StatusInternalServerError { + Expect(rr).To(HaveHTTPBody("\nunexpected io error"), "Body must have a sabredav exception") + } else { + Expect(rr).To(HaveHTTPBody(""), "Body must be empty") + } + + }, + Entry("at the /webdav endpoint", "/webdav", "/users", "/users/username/foo", "./foo", http.StatusInternalServerError), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "/users/username/foo", "./foo", http.StatusInternalServerError), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "/users/username/foo", "./foo", http.StatusInternalServerError), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "/public/tokenforfolder/foo", "./foo", http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", "/public/tokenforfolder/foo", ".", http.StatusInternalServerError), + ) + + }) + + Context("When calls return ok", func() { + + DescribeTable("HandleDelete", + func(endpoint string, expectedPathPrefix string, expectedPath string, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, + }, nil) + + ref := cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: expectedPath, + } + + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, &ref) + })).Return(&cs3storageprovider.DeleteResponse{ + Status: status.NewOK(ctx), + }, nil) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("DELETE", endpoint+"/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + Expect(rr).To(HaveHTTPBody(""), "Body must be empty") + + }, + Entry("at the /webdav endpoint", "/webdav", "/users", "./foo", http.StatusNoContent), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "./foo", http.StatusNoContent), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "./foo", http.StatusNoContent), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "", http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", ".", http.StatusNoContent), + ) + + DescribeTable("HandleMkcol", + func(endpoint string, expectedPathPrefix string, expectedStatPath string, expectedCreatePath string, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, // FIXME we may need to return the /public storage provider id and mock it + }, nil) + + // path based requests need to check if the resource already exists + mockPathStat(expectedStatPath, status.NewNotFound(ctx, "not found"), nil) + + ref := cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: expectedCreatePath, + } + + client.On("CreateContainer", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.CreateContainerRequest) bool { + return utils.ResourceEqual(req.Ref, &ref) + })).Return(&cs3storageprovider.CreateContainerResponse{ + Status: status.NewOK(ctx), + }, nil) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("MKCOL", endpoint+"/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + Expect(rr).To(HaveHTTPBody(""), "Body must be empty") + + }, + Entry("at the /webdav endpoint", "/webdav", "/users", "/users/username/foo", "./foo", http.StatusCreated), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "/users/username/foo", "./foo", http.StatusCreated), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "/users/username/foo", "./foo", http.StatusCreated), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "/public/tokenforfolder/foo", "./foo", http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", "/public/tokenforfolder/foo", ".", http.StatusCreated), + ) + + }) + + Context("When the resource is not found", func() { + + DescribeTable("HandleDelete", + func(endpoint string, expectedPathPrefix string, expectedPath string, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, + }, nil) + + ref := cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: expectedPath, + } + + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, &ref) + })).Return(&cs3storageprovider.DeleteResponse{ + Status: status.NewNotFound(ctx, "not found"), + }, nil) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("DELETE", endpoint+"/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + if expectedStatus == http.StatusNotFound { + Expect(rr).To(HaveHTTPBody("\nSabre\\DAV\\Exception\\NotFoundResource not found"), "Body must have a not found sabredav exception") + } else { + Expect(rr).To(HaveHTTPBody(""), "Body must be empty") + } + }, + Entry("at the /webdav endpoint", "/webdav", "/users", "./foo", http.StatusNotFound), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "./foo", http.StatusNotFound), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "./foo", http.StatusNotFound), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "", http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", ".", http.StatusNotFound), + ) + + DescribeTable("HandleMkcol", + func(endpoint string, expectedPathPrefix string, expectedStatPath string, expectedPath string, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, + }, nil) + + // path based requests need to check if the resource already exists + mockPathStat(expectedStatPath, status.NewNotFound(ctx, "not found"), nil) + + ref := cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: expectedPath, + } + + client.On("CreateContainer", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.CreateContainerRequest) bool { + return utils.ResourceEqual(req.Ref, &ref) + })).Return(&cs3storageprovider.CreateContainerResponse{ + Status: status.NewNotFound(ctx, "not found"), + }, nil) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("MKCOL", endpoint+"/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + if expectedStatus == http.StatusNotFound { + Expect(rr).To(HaveHTTPBody("\nSabre\\DAV\\Exception\\NotFoundResource not found"), "Body must have a not found sabredav exception") + } else { + Expect(rr).To(HaveHTTPBody(""), "Body must be empty") + } + }, + Entry("at the /webdav endpoint", "/webdav", "/users", "/users/username/foo", "./foo", http.StatusNotFound), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "/users/username/foo", "./foo", http.StatusNotFound), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "/users/username/foo", "./foo", http.StatusNotFound), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "/public/tokenforfolder/foo", "", http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", "/public/tokenforfolder/foo", ".", http.StatusNotFound), + ) + + }) + + Context("When the operation is forbidden", func() { + + DescribeTable("HandleDelete", + func(endpoint string, expectedPathPrefix string, expectedPath string, locked, userHasAccess bool, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, + }, nil) + + ref := cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: expectedPath, + } + + if locked { + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, &ref) + })).Return(&cs3storageprovider.DeleteResponse{ + Opaque: &cs3types.Opaque{Map: map[string]*cs3types.OpaqueEntry{ + "lockid": {Decoder: "plain", Value: []byte("somelockid")}, + }}, + Status: status.NewPermissionDenied(ctx, fmt.Errorf("permission denied error"), "permission denied message"), + }, nil) + } else { + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + return utils.ResourceEqual(req.Ref, &ref) + })).Return(&cs3storageprovider.DeleteResponse{ + Status: status.NewPermissionDenied(ctx, fmt.Errorf("permission denied error"), "permission denied message"), + }, nil) + } + + if userHasAccess { + mockStatOK(&ref, mockInfo(map[string]interface{}{})) + } else { + mockStat(&ref, status.NewPermissionDenied(ctx, fmt.Errorf("permission denied error"), "permission denied message"), nil) + } + + rr := httptest.NewRecorder() + req, err := http.NewRequest("DELETE", endpoint+"/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + if expectedStatus == http.StatusMethodNotAllowed { + Expect(rr).To(HaveHTTPBody(""), "Body must be empty") + } else { + if userHasAccess { + if locked { + Expect(rr).To(HaveHTTPBody("\nSabre\\DAV\\Exception\\Locked"), "Body must have a locked sabredav exception") + Expect(rr).To(HaveHTTPHeaderWithValue("Lock-Token", "")) + } else { + Expect(rr).To(HaveHTTPBody("\nSabre\\DAV\\Exception\\Forbidden"), "Body must have a forbidden sabredav exception") + } + } else { + Expect(rr).To(HaveHTTPBody("\nSabre\\DAV\\Exception\\NotFoundResource not found"), "Body must have a not found sabredav exception") + } + } + }, + + // without lock + + // when user has access he should see forbidden status + Entry("at the /webdav endpoint", "/webdav", "/users", "./foo", false, true, http.StatusForbidden), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "./foo", false, true, http.StatusForbidden), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "./foo", false, true, http.StatusForbidden), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "", false, true, http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", ".", false, true, http.StatusForbidden), + // when user does not have access he should get not found status + Entry("at the /webdav endpoint", "/webdav", "/users", "./foo", false, false, http.StatusNotFound), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "./foo", false, false, http.StatusNotFound), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "./foo", false, false, http.StatusNotFound), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "", false, false, http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", ".", false, false, http.StatusNotFound), + + // With lock + + // when user has access he should see locked status + Entry("at the /webdav endpoint", "/webdav", "/users", "./foo", true, true, http.StatusLocked), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "./foo", true, true, http.StatusLocked), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "./foo", true, true, http.StatusLocked), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "", true, true, http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", ".", true, true, http.StatusLocked), + // when user does not have access he should get not found status + Entry("at the /webdav endpoint", "/webdav", "/users", "./foo", true, false, http.StatusNotFound), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "./foo", true, false, http.StatusNotFound), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "./foo", true, false, http.StatusNotFound), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "", true, false, http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", ".", true, false, http.StatusNotFound), + ) + + DescribeTable("HandleMkcol", + func(endpoint string, expectedPathPrefix string, expectedStatPath string, expectedPath string, locked, userHasAccess bool, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, + }, nil) + + // path based requests need to check if the resource already exists + mockPathStat(expectedStatPath, status.NewNotFound(ctx, "not found"), nil) + + ref := cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: expectedPath, + } + + if locked { + client.On("CreateContainer", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.CreateContainerRequest) bool { + return utils.ResourceEqual(req.Ref, &ref) + })).Return(&cs3storageprovider.CreateContainerResponse{ + Opaque: &cs3types.Opaque{Map: map[string]*cs3types.OpaqueEntry{ + "lockid": {Decoder: "plain", Value: []byte("somelockid")}, + }}, + Status: status.NewPermissionDenied(ctx, fmt.Errorf("permission denied error"), "permission denied message"), + }, nil) + } else { + client.On("CreateContainer", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.CreateContainerRequest) bool { + return utils.ResourceEqual(req.Ref, &ref) + })).Return(&cs3storageprovider.CreateContainerResponse{ + Status: status.NewPermissionDenied(ctx, fmt.Errorf("permission denied error"), "permission denied message"), + }, nil) + } + + parentRef := cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: utils.MakeRelativePath(path.Dir(expectedPath)), + } + + if userHasAccess { + mockStatOK(&parentRef, mockInfo(map[string]interface{}{})) + } else { + mockStat(&parentRef, status.NewPermissionDenied(ctx, fmt.Errorf("permission denied error"), "permission denied message"), nil) + } + + rr := httptest.NewRecorder() + req, err := http.NewRequest("MKCOL", endpoint+"/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + if expectedStatus == http.StatusMethodNotAllowed { + Expect(rr).To(HaveHTTPBody(""), "Body must be empty") + } else { + if userHasAccess { + if locked { + Expect(rr).To(HaveHTTPBody("\nSabre\\DAV\\Exception\\Locked"), "Body must have a locked sabredav exception") + Expect(rr).To(HaveHTTPHeaderWithValue("Lock-Token", "")) + } else { + Expect(rr).To(HaveHTTPBody("\nSabre\\DAV\\Exception\\Forbidden"), "Body must have a forbidden sabredav exception") + } + } else { + Expect(rr).To(HaveHTTPBody("\nSabre\\DAV\\Exception\\NotFoundResource not found"), "Body must have a not found sabredav exception") + } + } + }, + + // without lock + + // when user has access he should see forbidden status + Entry("at the /webdav endpoint", "/webdav", "/users", "/users/username/foo", "./foo", false, true, http.StatusForbidden), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "/users/username/foo", "./foo", false, true, http.StatusForbidden), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "/users/username/foo", "./foo", false, true, http.StatusForbidden), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "/public/tokenforfolder/foo", "", false, true, http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", "/public/tokenforfolder/foo", ".", false, true, http.StatusForbidden), + // when user does not have access he should get not found status + Entry("at the /webdav endpoint", "/webdav", "/users", "/users/username/foo", "./foo", false, false, http.StatusNotFound), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "/users/username/foo", "./foo", false, false, http.StatusNotFound), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "/users/username/foo", "./foo", false, false, http.StatusNotFound), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "/public/tokenforfolder/foo", "", false, false, http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", "/public/tokenforfolder/foo", ".", false, false, http.StatusNotFound), + + // With lock + + // when user has access he should see locked status + // FIXME currently the ocdav mkcol handler is not forwarding a lockid ... but decomposedfs at least cannot create locks for unmapped resources, yet + PEntry("at the /webdav endpoint", "/webdav", "/users", "/users/username/foo", "./foo", true, true, http.StatusLocked), + PEntry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "/users/username/foo", "./foo", true, true, http.StatusLocked), + PEntry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "/users/username/foo", "./foo", true, true, http.StatusLocked), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "/public/tokenforfolder/foo", "", true, true, http.StatusMethodNotAllowed), + PEntry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", "/public/tokenforfolder/foo", ".", true, true, http.StatusLocked), + // when user does not have access he should get not found status + Entry("at the /webdav endpoint", "/webdav", "/users", "/users/username/foo", "./foo", true, false, http.StatusNotFound), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "/users/username/foo", "./foo", true, false, http.StatusNotFound), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "/users/username/foo", "./foo", true, false, http.StatusNotFound), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "/public/tokenforfolder/foo", "", true, false, http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", "/public/tokenforfolder/foo", ".", true, false, http.StatusNotFound), + ) + + }) + // listing spaces is a precondition for path based requests, what if listing spaces currently is broken? + Context("locks are forwarded", func() { + + DescribeTable("HandleDelete", + func(endpoint string, expectedPathPrefix string, expectedPath string, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, + }, nil) + + ref := cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: expectedPath, + } + + client.On("Delete", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.DeleteRequest) bool { + Expect(utils.ReadPlainFromOpaque(req.Opaque, "lockid")).To(Equal("urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2")) + return utils.ResourceEqual(req.Ref, &ref) + })).Return(&cs3storageprovider.DeleteResponse{ + Status: status.NewOK(ctx), + }, nil) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("DELETE", endpoint+"/foo", strings.NewReader("")) + req.Header.Set("If", "()") + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + }, + Entry("at the /webdav endpoint", "/webdav", "/users", "./foo", http.StatusNoContent), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "./foo", http.StatusNoContent), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "./foo", http.StatusNoContent), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "", http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", ".", http.StatusNoContent), + ) + + // FIXME currently the ocdav mkcol handler is not forwarding a lockid ... but decomposedfs at least cannot create locks for unmapped resources, yet + PDescribeTable("HandleMkcol", + func(endpoint string, expectedPathPrefix string, expectedStatPath string, expectedPath string, expectedStatus int) { + + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, expectedPathPrefix) + })).Return(&cs3storageprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*cs3storageprovider.StorageSpace{userspace}, + }, nil) + + // path based requests need to check if the resource already exists + mockPathStat(expectedStatPath, status.NewNotFound(ctx, "not found"), nil) + + ref := cs3storageprovider.Reference{ + ResourceId: userspace.Root, + Path: expectedPath, + } + + client.On("CreateContainer", mock.Anything, mock.MatchedBy(func(req *cs3storageprovider.CreateContainerRequest) bool { + Expect(utils.ReadPlainFromOpaque(req.Opaque, "lockid")).To(Equal("urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2")) + return utils.ResourceEqual(req.Ref, &ref) + })).Return(&cs3storageprovider.CreateContainerResponse{ + Status: status.NewOK(ctx), + }, nil) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("MKCOL", endpoint+"/foo", strings.NewReader("")) + req.Header.Set("If", "()") + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.Handler().ServeHTTP(rr, req) + Expect(rr).To(HaveHTTPStatus(expectedStatus)) + }, + Entry("at the /webdav endpoint", "/webdav", "/users", "/users/username/foo", "./foo", http.StatusNoContent), + Entry("at the /dav/files endpoint", "/dav/files/username", "/users/username", "/users/username/foo", "./foo", http.StatusNoContent), + Entry("at the /dav/spaces endpoint", "/dav/spaces/provider-1$userspace!root", "/users/username", "/users/username/foo", "./foo", http.StatusNoContent), + Entry("at the /dav/public-files endpoint for a file", "/dav/public-files/tokenforfile", "", "/public/tokenforfolder/foo", "", http.StatusMethodNotAllowed), + Entry("at the /dav/public-files endpoint for a folder", "/dav/public-files/tokenforfolder", "/public/tokenforfolder", "/public/tokenforfolder/foo", ".", http.StatusNoContent), + ) + + }) + +}) diff --git a/services/webdav/pkg/ocdav/ocdav_suite_test.go b/services/webdav/pkg/ocdav/ocdav_suite_test.go new file mode 100644 index 0000000000..26639ca781 --- /dev/null +++ b/services/webdav/pkg/ocdav/ocdav_suite_test.go @@ -0,0 +1,31 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestOcdav(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ocdav Suite") +} diff --git a/services/webdav/pkg/ocdav/ocdav_whitebox_test.go b/services/webdav/pkg/ocdav/ocdav_whitebox_test.go new file mode 100644 index 0000000000..60ea9bd7f8 --- /dev/null +++ b/services/webdav/pkg/ocdav/ocdav_whitebox_test.go @@ -0,0 +1,99 @@ +// Copyright 2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +package ocdav + +import ( + "errors" + "testing" + + sprovider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/test-go/testify/require" +) + +func TestWrapResourceID(t *testing.T) { + expected := "storageid" + "$" + "spaceid" + "!" + "opaqueid" + wrapped := storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "storageid", SpaceId: "spaceid", OpaqueId: "opaqueid"}) + + if wrapped != expected { + t.Errorf("wrapped id doesn't have the expected format: got %s expected %s", wrapped, expected) + } +} + +func TestNameNotEmpty(t *testing.T) { + expErr := errors.New("must not be empty") + tests := map[string]error{ + "": expErr, + " ": expErr, + "\n": expErr, + "name": nil, + "empty": nil, + } + + for name, expected := range tests { + rule := notEmpty() + require.Equal(t, expected, rule(name), name) + } +} + +func TestNameDoesNotContain(t *testing.T) { + tests := []struct { + excludedChars []string + tests map[string]error + }{ + { + []string{"a"}, + map[string]error{ + "foo": nil, + "bar": errors.New("must not contain a"), + }, + }, + { + []string{"a", "b"}, + map[string]error{ + "foo": nil, + "bar": errors.New("must not contain a"), + "car": errors.New("must not contain a"), + "bor": errors.New("must not contain b"), + }, + }, + } + + for _, tt := range tests { + rule := doesNotContain(tt.excludedChars) + for name, expected := range tt.tests { + require.Equal(t, expected, rule(name), name) + } + } +} + +func TestNameMaxLength(t *testing.T) { + name := "123456789" + tests := []struct { + MaxLength int + Error error + }{ + {12, nil}, + {8, errors.New("must be shorter than 8")}, + {4, errors.New("must be shorter than 4")}, + } + for _, tt := range tests { + rule := isShorterThan(tt.MaxLength) + require.Equal(t, tt.Error, rule(name), tt.MaxLength) + } +} diff --git a/services/webdav/pkg/ocdav/options.go b/services/webdav/pkg/ocdav/options.go new file mode 100644 index 0000000000..cc4f5a3879 --- /dev/null +++ b/services/webdav/pkg/ocdav/options.go @@ -0,0 +1,48 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "net/http" + "strings" + + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" +) + +func (s *svc) handleOptions(w http.ResponseWriter, r *http.Request) { + allow := "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY," + allow += " MOVE, UNLOCK, PROPFIND, MKCOL, REPORT, SEARCH," + allow += " PUT" // TODO(jfd): only for files ... but we cannot create the full path without a user ... which we only have when credentials are sent + + isPublic := strings.Contains(r.Context().Value(net.CtxKeyBaseURI).(string), "public-files") + + w.Header().Set(net.HeaderContentType, "application/xml") + w.Header().Set("Allow", allow) + w.Header().Set("DAV", "1, 2") + w.Header().Set("MS-Author-Via", "DAV") + if !isPublic { + w.Header().Add(net.HeaderAccessControlAllowHeaders, net.HeaderTusResumable) + w.Header().Add(net.HeaderAccessControlExposeHeaders, strings.Join([]string{net.HeaderTusResumable, net.HeaderTusVersion, net.HeaderTusExtension}, ",")) + w.Header().Set(net.HeaderTusResumable, "1.0.0") // TODO(jfd): only for dirs? + w.Header().Set(net.HeaderTusVersion, "1.0.0") + w.Header().Set(net.HeaderTusExtension, "creation,creation-with-upload,checksum,expiration") + w.Header().Set(net.HeaderTusChecksumAlgorithm, "md5,sha1,crc32") + } + w.WriteHeader(http.StatusNoContent) +} diff --git a/services/webdav/pkg/ocdav/prop/prop.go b/services/webdav/pkg/ocdav/prop/prop.go new file mode 100644 index 0000000000..303c9123ec --- /dev/null +++ b/services/webdav/pkg/ocdav/prop/prop.go @@ -0,0 +1,212 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package prop + +import ( + "bytes" + "encoding/xml" + "unicode/utf8" +) + +// PropertyXML represents a single DAV resource property as defined in RFC 4918. +// http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties +type PropertyXML struct { + // XMLName is the fully qualified name that identifies this property. + XMLName xml.Name + + // Lang is an optional xml:lang attribute. + Lang string `xml:"xml:lang,attr,omitempty"` + + // InnerXML contains the XML representation of the property value. + // See http://www.webdav.org/specs/rfc4918.html#property_values + // + // Property values of complex type or mixed-content must have fully + // expanded XML namespaces or be self-contained with according + // XML namespace declarations. They must not rely on any XML + // namespace declarations within the scope of the XML document, + // even including the DAV: namespace. + InnerXML []byte `xml:",innerxml"` +} + +func xmlEscaped(val string) []byte { + buf := new(bytes.Buffer) + xml.Escape(buf, []byte(val)) + return buf.Bytes() +} + +// EscapedNS returns a new PropertyXML instance while xml-escaping the value +func EscapedNS(namespace string, local string, val string) PropertyXML { + return PropertyXML{ + XMLName: xml.Name{Space: namespace, Local: local}, + Lang: "", + InnerXML: xmlEscaped(val), + } +} + +var ( + escAmp = []byte("&") + escLT = []byte("<") + escGT = []byte(">") + escFFFD = []byte(string(utf8.RuneError)) // Unicode replacement character +) + +// Decide whether the given rune is in the XML Character Range, per +// the Char production of https://www.xml.com/axml/testaxml.htm, +// Section 2.2 Characters. +func isInCharacterRange(r rune) (inrange bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= utf8.RuneError || + r >= 0x10000 && r <= 0x10FFFF +} + +// Escaped returns a new PropertyXML instance while replacing only +// * `&` with `&` +// * `<` with `<` +// * `>` with `>` +// as defined in https://www.w3.org/TR/REC-xml/#syntax: +// +// > The ampersand character (&) and the left angle bracket (<) must not appear +// > in their literal form, except when used as markup delimiters, or within a +// > comment, a processing instruction, or a CDATA section. If they are needed +// > elsewhere, they must be escaped using either numeric character references +// > or the strings " & " and " < " respectively. The right angle +// > bracket (>) may be represented using the string " > ", and must, for +// > compatibility, be escaped using either " > " or a character reference +// > when it appears in the string " ]]> " in content, when that string is not +// > marking the end of a CDATA section. +// +// The code ignores errors as the legacy Escaped() does +// TODO properly use the space +func Escaped(key, val string) PropertyXML { + s := []byte(val) + w := bytes.NewBuffer(make([]byte, 0, len(s))) + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRune(s[i:]) + i += width + switch r { + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + default: + if !isInCharacterRange(r) || (r == utf8.RuneError && width == 1) { + esc = escFFFD + break + } + continue + } + if _, err := w.Write(s[last : i-width]); err != nil { + break + } + if _, err := w.Write(esc); err != nil { + break + } + last = i + } + _, _ = w.Write(s[last:]) + return PropertyXML{ + XMLName: xml.Name{Space: "", Local: key}, + Lang: "", + InnerXML: w.Bytes(), + } +} + +// NotFound returns a new PropertyXML instance with an empty value +func NotFound(key string) PropertyXML { + return PropertyXML{ + XMLName: xml.Name{Space: "", Local: key}, + Lang: "", + } +} + +// NotFoundNS returns a new PropertyXML instance with the given namespace and an empty value +func NotFoundNS(namespace, key string) PropertyXML { + return PropertyXML{ + XMLName: xml.Name{Space: namespace, Local: key}, + Lang: "", + } +} + +// Raw returns a new PropertyXML instance for the given key/value pair +// TODO properly use the space +func Raw(key, val string) PropertyXML { + return PropertyXML{ + XMLName: xml.Name{Space: "", Local: key}, + Lang: "", + InnerXML: []byte(val), + } +} + +// Next returns the next token, if any, in the XML stream of d. +// RFC 4918 requires to ignore comments, processing instructions +// and directives. +// http://www.webdav.org/specs/rfc4918.html#property_values +// http://www.webdav.org/specs/rfc4918.html#xml-extensibility +func Next(d *xml.Decoder) (xml.Token, error) { + for { + t, err := d.Token() + if err != nil { + return t, err + } + switch t.(type) { + case xml.Comment, xml.Directive, xml.ProcInst: + continue + default: + return t, nil + } + } +} + +// ActiveLock holds active lock xml data +// +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_activelock +// +// +type ActiveLock struct { + XMLName xml.Name `xml:"activelock"` + Exclusive *struct{} `xml:"lockscope>exclusive,omitempty"` + Shared *struct{} `xml:"lockscope>shared,omitempty"` + Write *struct{} `xml:"locktype>write,omitempty"` + Depth string `xml:"depth"` + Owner Owner `xml:"owner,omitempty"` + Timeout string `xml:"timeout,omitempty"` + Locktoken string `xml:"locktoken>href"` + Lockroot string `xml:"lockroot>href,omitempty"` +} + +// Owner captures the inner UML of a lock owner element http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner +type Owner struct { + InnerXML string `xml:",innerxml"` +} + +// Escape repaces ", &, ', < and > with their xml representation +func Escape(s string) string { + b := bytes.NewBuffer(nil) + _ = xml.EscapeText(b, []byte(s)) + return b.String() +} diff --git a/services/webdav/pkg/ocdav/propfind/propfind.go b/services/webdav/pkg/ocdav/propfind/propfind.go new file mode 100644 index 0000000000..0cc2ea1eed --- /dev/null +++ b/services/webdav/pkg/ocdav/propfind/propfind.go @@ -0,0 +1,1916 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package propfind + +import ( + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + typesv1beta1 "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/iancoleman/strcase" + "github.com/opencloud-eu/opencloud/services/thumbnails/pkg/thumbnail" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/prop" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/xs" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/conversions" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" + "github.com/opencloud-eu/reva/v2/pkg/publicshare" + rstatus "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" + "github.com/opencloud-eu/reva/v2/pkg/signedurl" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/rs/zerolog" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "golang.org/x/sync/errgroup" + "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + tracerName = "ocdav" +) + +// these keys are used to lookup in ArbitraryMetadata, generated prop names are lowercased +var ( + audioKeys = []string{ + "album", + "albumArtist", + "artist", + "bitrate", + "composers", + "copyright", + "disc", + "discCount", + "duration", + "genre", + "hasDrm", + "isVariableBitrate", + "title", + "track", + "trackCount", + "year", + } + locationKeys = []string{ + "altitude", + "latitude", + "longitude", + } + imageKeys = []string{ + "width", + "height", + } + photoKeys = []string{ + "cameraMake", + "cameraModel", + "exposureDenominator", + "exposureNumerator", + "fNumber", + "focalLength", + "iso", + "orientation", + "takenDateTime", + } +) + +type countingReader struct { + n int + r io.Reader +} + +// Props represents properties related to a resource +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind) +type Props []xml.Name + +// XML holds the xml representation of a propfind +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind +type XML struct { + XMLName xml.Name `xml:"DAV: propfind"` + Allprop *struct{} `xml:"DAV: allprop"` + Propname *struct{} `xml:"DAV: propname"` + Prop Props `xml:"DAV: prop"` + Include Props `xml:"DAV: include"` +} + +// PropstatXML holds the xml representation of a propfind response +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +type PropstatXML struct { + // Prop requires DAV: to be the default namespace in the enclosing + // XML. This is due to the standard encoding/xml package currently + // not honoring namespace declarations inside a xmltag with a + // parent element for anonymous slice elements. + // Use of multistatusWriter takes care of this. + Prop []prop.PropertyXML `xml:"d:prop>_ignored_"` + Status string `xml:"d:status"` + Error *errors.ErrorXML `xml:"d:error"` + ResponseDescription string `xml:"d:responsedescription,omitempty"` +} + +// ResponseXML holds the xml representation of a propfind response +type ResponseXML struct { + XMLName xml.Name `xml:"d:response"` + Href string `xml:"d:href"` + Propstat []PropstatXML `xml:"d:propstat"` + Status string `xml:"d:status,omitempty"` + Error *errors.ErrorXML `xml:"d:error"` + ResponseDescription string `xml:"d:responsedescription,omitempty"` +} + +// MultiStatusResponseXML holds the xml representation of a multistatus propfind response +type MultiStatusResponseXML struct { + XMLName xml.Name `xml:"d:multistatus"` + XmlnsS string `xml:"xmlns:s,attr,omitempty"` + XmlnsD string `xml:"xmlns:d,attr,omitempty"` + XmlnsOC string `xml:"xmlns:oc,attr,omitempty"` + + Responses []*ResponseXML `xml:"d:response"` +} + +// ResponseUnmarshalXML is a workaround for https://github.com/golang/go/issues/13400 +type ResponseUnmarshalXML struct { + XMLName xml.Name `xml:"response"` + Href string `xml:"href"` + Propstat []PropstatUnmarshalXML `xml:"propstat"` + Status string `xml:"status,omitempty"` + Error *errors.ErrorXML `xml:"d:error"` + ResponseDescription string `xml:"responsedescription,omitempty"` +} + +// MultiStatusResponseUnmarshalXML is a workaround for https://github.com/golang/go/issues/13400 +type MultiStatusResponseUnmarshalXML struct { + XMLName xml.Name `xml:"multistatus"` + XmlnsS string `xml:"xmlns:s,attr,omitempty"` + XmlnsD string `xml:"xmlns:d,attr,omitempty"` + XmlnsOC string `xml:"xmlns:oc,attr,omitempty"` + + Responses []*ResponseUnmarshalXML `xml:"response"` +} + +// PropstatUnmarshalXML is a workaround for https://github.com/golang/go/issues/13400 +type PropstatUnmarshalXML struct { + // Prop requires DAV: to be the default namespace in the enclosing + // XML. This is due to the standard encoding/xml package currently + // not honoring namespace declarations inside a xmltag with a + // parent element for anonymous slice elements. + // Use of multistatusWriter takes care of this. + Prop []*prop.PropertyXML `xml:"prop"` + Status string `xml:"status"` + Error *errors.ErrorXML `xml:"d:error"` + ResponseDescription string `xml:"responsedescription,omitempty"` +} + +// spaceData is used to remember the space for a resource info +type spaceData struct { + Ref *provider.Reference + SpaceType string +} + +// NewMultiStatusResponseXML returns a preconfigured instance of MultiStatusResponseXML +func NewMultiStatusResponseXML() *MultiStatusResponseXML { + return &MultiStatusResponseXML{ + XmlnsD: "DAV:", + XmlnsS: "http://sabredav.org/ns", + XmlnsOC: "http://owncloud.org/ns", + } +} + +// Handler handles propfind requests +type Handler struct { + PublicURL string + selector pool.Selectable[gateway.GatewayAPIClient] + c *config.Config + urlSigner signedurl.Signer +} + +// NewHandler returns a new PropfindHandler instance +func NewHandler(publicURL string, selector pool.Selectable[gateway.GatewayAPIClient], signer signedurl.Signer, c *config.Config) *Handler { + return &Handler{ + PublicURL: publicURL, + selector: selector, + c: c, + urlSigner: signer, + } +} + +// HandlePathPropfind handles a path based propfind request +// ns is the namespace that is prefixed to the path in the cs3 namespace +func (p *Handler) HandlePathPropfind(w http.ResponseWriter, r *http.Request, ns string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), fmt.Sprintf("%s %v", r.Method, r.URL.Path)) + defer span.End() + + fn := path.Join(ns, r.URL.Path) // TODO do we still need to jail if we query the registry about the spaces? + + sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() + dh := r.Header.Get(net.HeaderDepth) + + depth, err := net.ParseDepth(dh) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "Invalid Depth header value") + span.SetAttributes(semconv.HTTPResponseStatusCodeKey.Int(http.StatusBadRequest)) + sublog.Debug().Str("depth", dh).Msg(err.Error()) + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Invalid Depth header value: %v", dh) + b, err := errors.Marshal(http.StatusBadRequest, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + + if depth == net.DepthInfinity && !p.c.AllowPropfindDepthInfinitiy { + span.RecordError(errors.ErrInvalidDepth) + span.SetStatus(codes.Error, "DEPTH: infinity is not supported") + span.SetAttributes(semconv.HTTPResponseStatusCodeKey.Int(http.StatusBadRequest)) + sublog.Debug().Str("depth", dh).Msg(errors.ErrInvalidDepth.Error()) + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Invalid Depth header value: %v", dh) + b, err := errors.Marshal(http.StatusBadRequest, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + + pf, status, err := ReadPropfind(r.Body) + if err != nil { + sublog.Debug().Err(err).Msg("error reading propfind request") + w.WriteHeader(status) + return + } + + // retrieve a specific storage space + client, err := p.selector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error retrieving a gateway service client") + w.WriteHeader(http.StatusInternalServerError) + return + } + + // TODO look up all spaces and request the root_info in the field mask + spaces, rpcStatus, err := spacelookup.LookUpStorageSpacesForPathWithChildren(ctx, client, fn) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if rpcStatus.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&sublog, w, rpcStatus) + return + } + + resourceInfos, sendTusHeaders, ok := p.getResourceInfos(ctx, w, r, pf, spaces, fn, depth, sublog) + if !ok { + // getResourceInfos handles responses in case of an error so we can just return here. + return + } + p.propfindResponse(ctx, w, r, ns, pf, sendTusHeaders, resourceInfos, sublog) +} + +// HandleSpacesPropfind handles a spaces based propfind request +func (p *Handler) HandleSpacesPropfind(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "spaces_propfind") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("spaceid", spaceID).Logger() + dh := r.Header.Get(net.HeaderDepth) + + depth, err := net.ParseDepth(dh) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "Invalid Depth header value") + span.SetAttributes(semconv.HTTPResponseStatusCodeKey.Int(http.StatusBadRequest)) + sublog.Debug().Str("depth", dh).Msg(err.Error()) + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Invalid Depth header value: %v", dh) + b, err := errors.Marshal(http.StatusBadRequest, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + + if depth == net.DepthInfinity && !p.c.AllowPropfindDepthInfinitiy { + span.RecordError(errors.ErrInvalidDepth) + span.SetStatus(codes.Error, "DEPTH: infinity is not supported") + span.SetAttributes(semconv.HTTPResponseStatusCodeKey.Int(http.StatusBadRequest)) + sublog.Debug().Str("depth", dh).Msg(errors.ErrInvalidDepth.Error()) + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Invalid Depth header value: %v", dh) + b, err := errors.Marshal(http.StatusBadRequest, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + + pf, status, err := ReadPropfind(r.Body) + if err != nil { + sublog.Debug().Err(err).Msg("error reading propfind request") + w.WriteHeader(status) + return + } + + ref, err := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) + if err != nil { + sublog.Debug().Msg("invalid space id") + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Invalid space id: %v", spaceID) + b, err := errors.Marshal(http.StatusBadRequest, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + + client, err := p.selector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return + } + + metadataKeys, _ := metadataKeys(pf) + + // stat the reference and request the space in the field mask + res, err := client.Stat(ctx, &provider.StatRequest{ + Ref: &ref, + ArbitraryMetadataKeys: metadataKeys, + FieldMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, // TODO use more sophisticated filter? we don't need all space properties, afaict only the spacetype + }) + if err != nil { + sublog.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return + } + if res.Status.Code != rpc.Code_CODE_OK { + status := rstatus.HTTPStatusFromCode(res.Status.Code) + if res.Status.Code == rpc.Code_CODE_ABORTED { + // aborted is used for etag an lock mismatches, which translates to 412 + // in case a real Conflict response is needed, the calling code needs to send the header + status = http.StatusPreconditionFailed + } + m := res.Status.Message + if res.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + // check if user has access to resource + sRes, err := client.Stat(ctx, &provider.StatRequest{Ref: &provider.Reference{ResourceId: ref.GetResourceId()}}) + if err != nil { + sublog.Error().Err(err).Msg("error performing stat grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if sRes.Status.Code != rpc.Code_CODE_OK { + // return not found error so we do not leak existence of a space + status = http.StatusNotFound + } + } + if status == http.StatusNotFound { + m = "Resource not found" // mimic the oc10 error message + } + w.WriteHeader(status) + b, err := errors.Marshal(status, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + var space *provider.StorageSpace + if res.Info.Space == nil { + sublog.Debug().Msg("stat did not include a space, executing an additional lookup request") + // fake a space root + space = &provider.StorageSpace{ + Id: &provider.StorageSpaceId{OpaqueId: spaceID}, + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/"), + }, + }, + }, + Root: ref.ResourceId, + RootInfo: res.Info, + } + } + + res.Info.Path = r.URL.Path + + resourceInfos := []*provider.ResourceInfo{ + res.Info, + } + if res.Info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER && depth != net.DepthZero { + childInfos, ok := p.getSpaceResourceInfos(ctx, w, r, pf, &ref, r.URL.Path, depth, sublog) + if !ok { + // getResourceInfos handles responses in case of an error so we can just return here. + return + } + resourceInfos = append(resourceInfos, childInfos...) + } + + // prefix space id to paths + for i := range resourceInfos { + resourceInfos[i].Path = path.Join("/", spaceID, resourceInfos[i].Path) + // add space to info so propfindResponse can access space type + if resourceInfos[i].Space == nil { + resourceInfos[i].Space = space + } + } + + sendTusHeaders := true + // let clients know this collection supports tus.io POST requests to start uploads + if res.Info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + if res.Info.Opaque != nil { + _, ok := res.Info.Opaque.Map["disable_tus"] + sendTusHeaders = !ok + } + } + + p.propfindResponse(ctx, w, r, "", pf, sendTusHeaders, resourceInfos, sublog) +} + +func (p *Handler) propfindResponse(ctx context.Context, w http.ResponseWriter, r *http.Request, namespace string, pf XML, sendTusHeaders bool, resourceInfos []*provider.ResourceInfo, log zerolog.Logger) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(ctx, "propfind_response") + defer span.End() + + var linkshares map[string]struct{} + // public link access does not show share-types + // oc:share-type is not part of an allprops response + if namespace != "/public" { + // only fetch this if property was queried + for _, prop := range pf.Prop { + if prop.Space == net.NsOwncloud && (prop.Local == "share-types" || prop.Local == "permissions") { + filters := make([]*link.ListPublicSharesRequest_Filter, 0, len(resourceInfos)) + for i := range resourceInfos { + // FIXME this is expensive + // the filters array grow by one for every file in a folder + // TODO store public links as grants on the storage, reassembling them here is too costly + // we can then add the filter if the file has share-types=3 in the opaque, + // same as user / group shares for share indicators + filters = append(filters, publicshare.ResourceIDFilter(resourceInfos[i].Id)) + } + client, err := p.selector.Next() + if err != nil { + log.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return + } + listResp, err := client.ListPublicShares(ctx, &link.ListPublicSharesRequest{Filters: filters}) + if err == nil { + linkshares = make(map[string]struct{}, len(listResp.Share)) + for i := range listResp.Share { + linkshares[listResp.Share[i].ResourceId.OpaqueId] = struct{}{} + } + } else { + log.Error().Err(err).Msg("propfindResponse: couldn't list public shares") + span.SetStatus(codes.Error, err.Error()) + } + break + } + } + } + + prefer := net.ParsePrefer(r.Header.Get(net.HeaderPrefer)) + returnMinimal := prefer[net.HeaderPreferReturn] == "minimal" + + propRes, err := MultistatusResponse(ctx, &pf, resourceInfos, p.PublicURL, namespace, linkshares, returnMinimal, p.urlSigner) + if err != nil { + log.Error().Err(err).Msg("error formatting propfind") + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set(net.HeaderDav, "1, 3, extended-mkcol") + w.Header().Set(net.HeaderContentType, "application/xml; charset=utf-8") + if sendTusHeaders { + w.Header().Add(net.HeaderAccessControlExposeHeaders, net.HeaderTusResumable) + w.Header().Add(net.HeaderAccessControlExposeHeaders, net.HeaderTusVersion) + w.Header().Add(net.HeaderAccessControlExposeHeaders, net.HeaderTusExtension) + w.Header().Set(net.HeaderAccessControlExposeHeaders, strings.Join(w.Header().Values(net.HeaderAccessControlExposeHeaders), ", ")) + w.Header().Set(net.HeaderTusResumable, "1.0.0") + w.Header().Set(net.HeaderTusVersion, "1.0.0") + w.Header().Set(net.HeaderTusExtension, "creation, creation-with-upload, checksum, expiration") + } + w.Header().Add(net.HeaderVary, net.HeaderPrefer) + w.Header().Set(net.HeaderVary, strings.Join(w.Header().Values(net.HeaderVary), ", ")) + if returnMinimal { + w.Header().Set(net.HeaderPreferenceApplied, "return=minimal") + } + + w.WriteHeader(http.StatusMultiStatus) + if _, err := w.Write(propRes); err != nil { + log.Err(err).Msg("error writing response") + } +} + +// TODO this is just a stat -> rename +func (p *Handler) statSpace(ctx context.Context, ref *provider.Reference, metadataKeys, fieldMaskPaths []string) (*provider.ResourceInfo, *rpc.Status, error) { + client, err := p.selector.Next() + if err != nil { + return nil, nil, err + } + req := &provider.StatRequest{ + Ref: ref, + ArbitraryMetadataKeys: metadataKeys, + FieldMask: &fieldmaskpb.FieldMask{Paths: fieldMaskPaths}, + } + res, err := client.Stat(ctx, req) + if err != nil { + return nil, nil, err + } + return res.GetInfo(), res.GetStatus(), nil +} + +func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r *http.Request, pf XML, spaces []*provider.StorageSpace, requestPath string, depth net.Depth, log zerolog.Logger) ([]*provider.ResourceInfo, bool, bool) { + ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "get_resource_infos") + span.SetAttributes(attribute.KeyValue{Key: "requestPath", Value: attribute.StringValue(requestPath)}) + span.SetAttributes(attribute.KeyValue{Key: "depth", Value: attribute.StringValue(depth.String())}) + defer span.End() + + metadataKeys, fieldMaskPaths := metadataKeys(pf) + + // we need to stat all spaces to aggregate the root etag, mtime and size + // TODO cache per space (hah, no longer per user + per space!) + var ( + err error + rootInfo *provider.ResourceInfo + mostRecentChildInfo *provider.ResourceInfo + aggregatedChildSize uint64 + spaceMap = make(map[*provider.ResourceInfo]spaceData, len(spaces)) + ) + for _, space := range spaces { + spacePath := "" + if spacePath = utils.ReadPlainFromOpaque(space.Opaque, "path"); spacePath == "" { + continue // not mounted + } + if space.RootInfo == nil { + spaceRef, err := spacelookup.MakeStorageSpaceReference(space.Id.OpaqueId, ".") + if err != nil { + continue + } + info, status, err := p.statSpace(ctx, &spaceRef, metadataKeys, fieldMaskPaths) + if err != nil || status.GetCode() != rpc.Code_CODE_OK { + continue + } + space.RootInfo = info + } + + // TODO separate stats to the path or to the children, after statting all children update the mtime/etag + // TODO get mtime, and size from space as well, so we no longer have to stat here? would require sending the requested metadata keys as well + // root should be a ResourceInfo so it can contain the full stat, not only the id ... do we even need spaces then? + // metadata keys could all be prefixed with "root." to indicate we want more than the root id ... + // TODO can we reuse the space.rootinfo? + spaceRef := spacelookup.MakeRelativeReference(space, requestPath, false) + var info *provider.ResourceInfo + if spaceRef.Path == "." && utils.ResourceIDEqual(spaceRef.ResourceId, space.Root) { + info = space.RootInfo + } else { + var status *rpc.Status + info, status, err = p.statSpace(ctx, spaceRef, metadataKeys, fieldMaskPaths) + if err != nil || status.GetCode() != rpc.Code_CODE_OK { + continue + } + } + + // adjust path + info.Path = filepath.Join(spacePath, spaceRef.Path) + info.Name = filepath.Base(info.Path) + + spaceMap[info] = spaceData{Ref: spaceRef, SpaceType: space.SpaceType} + + if rootInfo == nil && requestPath == info.Path { + rootInfo = info + } else if requestPath != spacePath && strings.HasPrefix(spacePath, requestPath) { // Check if the space is a child of the requested path + // aggregate child metadata + aggregatedChildSize += info.Size + if mostRecentChildInfo == nil { + mostRecentChildInfo = info + continue + } + if mostRecentChildInfo.Mtime == nil || (info.Mtime != nil && utils.TSToUnixNano(info.Mtime) > utils.TSToUnixNano(mostRecentChildInfo.Mtime)) { + mostRecentChildInfo = info + } + } + } + + if len(spaceMap) == 0 || rootInfo == nil { + // TODO if we have children invent node on the fly + w.WriteHeader(http.StatusNotFound) + m := "Resource not found" + b, err := errors.Marshal(http.StatusNotFound, m, "", "") + errors.HandleWebdavError(&log, w, b, err) + return nil, false, false + } + if mostRecentChildInfo != nil { + if rootInfo.Mtime == nil || (mostRecentChildInfo.Mtime != nil && utils.TSToUnixNano(mostRecentChildInfo.Mtime) > utils.TSToUnixNano(rootInfo.Mtime)) { + rootInfo.Mtime = mostRecentChildInfo.Mtime + if mostRecentChildInfo.Etag != "" { + rootInfo.Etag = mostRecentChildInfo.Etag + } + } + if rootInfo.Etag == "" { + rootInfo.Etag = mostRecentChildInfo.Etag + } + } + + // add size of children + rootInfo.Size += aggregatedChildSize + + resourceInfos := []*provider.ResourceInfo{ + rootInfo, // PROPFIND always includes the root resource + } + + if rootInfo.Type == provider.ResourceType_RESOURCE_TYPE_FILE || depth == net.DepthZero { + // If the resource is a file then it can't have any children so we can + // stop here. + return resourceInfos, true, true + } + + childInfos := map[string]*provider.ResourceInfo{} + for spaceInfo, spaceData := range spaceMap { + switch { + case spaceInfo.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER && depth != net.DepthInfinity: + addChild(childInfos, spaceInfo, requestPath, rootInfo) + + case spaceInfo.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER && depth == net.DepthOne: + switch { + case strings.HasPrefix(requestPath, spaceInfo.Path) && spaceData.SpaceType != "virtual": + client, err := p.selector.Next() + if err != nil { + log.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return nil, false, false + } + req := &provider.ListContainerRequest{ + Ref: spaceData.Ref, + ArbitraryMetadataKeys: metadataKeys, + } + res, err := client.ListContainer(ctx, req) + if err != nil { + log.Error().Err(err).Msg("error sending list container grpc request") + w.WriteHeader(http.StatusInternalServerError) + return nil, false, false + } + + if res.Status.Code != rpc.Code_CODE_OK { + log.Debug().Interface("status", res.Status).Msg("List Container not ok, skipping") + continue + } + for _, info := range res.Infos { + info.Path = path.Join(requestPath, info.Path) + } + resourceInfos = append(resourceInfos, res.Infos...) + case strings.HasPrefix(spaceInfo.Path, requestPath): // space is a deep child of the requested path + addChild(childInfos, spaceInfo, requestPath, rootInfo) + } + + case depth == net.DepthInfinity: + // use a stack to explore sub-containers breadth-first + if spaceInfo != rootInfo { + resourceInfos = append(resourceInfos, spaceInfo) + } + stack := []*provider.ResourceInfo{spaceInfo} + for len(stack) != 0 { + info := stack[0] + stack = stack[1:] + + if info.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER || spaceData.SpaceType == "virtual" { + continue + } + client, err := p.selector.Next() + if err != nil { + log.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return nil, false, false + } + req := &provider.ListContainerRequest{ + Ref: &provider.Reference{ + ResourceId: spaceInfo.Id, + // TODO here we cut of the path that we added after stating the space above + Path: utils.MakeRelativePath(strings.TrimPrefix(info.Path, spaceInfo.Path)), + }, + ArbitraryMetadataKeys: metadataKeys, + } + res, err := client.ListContainer(ctx, req) // FIXME public link depth infinity -> "gateway: could not find provider: gateway: error calling ListStorageProviders: rpc error: code = PermissionDenied desc = auth: core access token is invalid" + if err != nil { + log.Error().Err(err).Interface("info", info).Msg("error sending list container grpc request") + w.WriteHeader(http.StatusInternalServerError) + return nil, false, false + } + if res.Status.Code != rpc.Code_CODE_OK { + log.Debug().Interface("status", res.Status).Msg("List Container not ok, skipping") + continue + } + + // check sub-containers in reverse order and add them to the stack + // the reversed order here will produce a more logical sorting of results + for i := len(res.Infos) - 1; i >= 0; i-- { + // add path to resource + res.Infos[i].Path = filepath.Join(info.Path, res.Infos[i].Path) + if res.Infos[i].Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + stack = append(stack, res.Infos[i]) + } + } + + resourceInfos = append(resourceInfos, res.Infos...) + // TODO: stream response to avoid storing too many results in memory + // we can do that after having stated the root. + } + } + } + + if rootInfo.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + // now add all aggregated child infos + for _, childInfo := range childInfos { + resourceInfos = append(resourceInfos, childInfo) + } + } + + sendTusHeaders := true + // let clients know this collection supports tus.io POST requests to start uploads + if rootInfo.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + if rootInfo.Opaque != nil { + _, ok := rootInfo.Opaque.Map["disable_tus"] + sendTusHeaders = !ok + } + } + + return resourceInfos, sendTusHeaders, true +} + +func (p *Handler) getSpaceResourceInfos(ctx context.Context, w http.ResponseWriter, r *http.Request, pf XML, ref *provider.Reference, requestPath string, depth net.Depth, log zerolog.Logger) ([]*provider.ResourceInfo, bool) { + ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "get_space_resource_infos") + span.SetAttributes(attribute.KeyValue{Key: "requestPath", Value: attribute.StringValue(requestPath)}) + span.SetAttributes(attribute.KeyValue{Key: "depth", Value: attribute.StringValue(depth.String())}) + defer span.End() + + client, err := p.selector.Next() + if err != nil { + log.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return nil, false + } + + metadataKeys, _ := metadataKeys(pf) + + resourceInfos := []*provider.ResourceInfo{} + + req := &provider.ListContainerRequest{ + Ref: ref, + ArbitraryMetadataKeys: metadataKeys, + FieldMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, // TODO use more sophisticated filter + } + res, err := client.ListContainer(ctx, req) + if err != nil { + log.Error().Err(err).Msg("error sending list container grpc request") + w.WriteHeader(http.StatusInternalServerError) + return nil, false + } + + if res.Status.Code != rpc.Code_CODE_OK { + log.Debug().Interface("status", res.Status).Msg("List Container not ok, skipping") + w.WriteHeader(http.StatusInternalServerError) + return nil, false + } + for _, info := range res.Infos { + info.Path = path.Join(requestPath, info.Path) + } + resourceInfos = append(resourceInfos, res.Infos...) + + if depth == net.DepthInfinity { + // use a stack to explore sub-containers breadth-first + stack := resourceInfos + for len(stack) != 0 { + info := stack[0] + stack = stack[1:] + + if info.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER /*|| space.SpaceType == "virtual"*/ { + continue + } + req := &provider.ListContainerRequest{ + Ref: &provider.Reference{ + ResourceId: info.Id, + Path: ".", + }, + ArbitraryMetadataKeys: metadataKeys, + } + res, err := client.ListContainer(ctx, req) // FIXME public link depth infinity -> "gateway: could not find provider: gateway: error calling ListStorageProviders: rpc error: code = PermissionDenied desc = auth: core access token is invalid" + if err != nil { + log.Error().Err(err).Interface("info", info).Msg("error sending list container grpc request") + w.WriteHeader(http.StatusInternalServerError) + return nil, false + } + if res.Status.Code != rpc.Code_CODE_OK { + log.Debug().Interface("status", res.Status).Msg("List Container not ok, skipping") + continue + } + + // check sub-containers in reverse order and add them to the stack + // the reversed order here will produce a more logical sorting of results + for i := len(res.Infos) - 1; i >= 0; i-- { + // add path to resource + res.Infos[i].Path = filepath.Join(info.Path, res.Infos[i].Path) + res.Infos[i].Path = utils.MakeRelativePath(res.Infos[i].Path) + if res.Infos[i].Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + stack = append(stack, res.Infos[i]) + } + } + + resourceInfos = append(resourceInfos, res.Infos...) + // TODO: stream response to avoid storing too many results in memory + // we can do that after having stated the root. + } + } + + return resourceInfos, true +} + +func metadataKeysWithPrefix(prefix string, keys []string) []string { + fullKeys := []string{} + for _, key := range keys { + fullKeys = append(fullKeys, fmt.Sprintf("%s.%s", prefix, key)) + } + return fullKeys +} + +// metadataKeys splits the propfind properties into arbitrary metadata and ResourceInfo field mask paths +func metadataKeys(pf XML) ([]string, []string) { + + var metadataKeys []string + var fieldMaskKeys []string + + if pf.Allprop != nil { + // TODO this changes the behavior and returns all properties if allprops has been set, + // but allprops should only return some default properties + // see https://tools.ietf.org/html/rfc4918#section-9.1 + // the description of arbitrary_metadata_keys in https://cs3org.github.io/cs3apis/#cs3.storage.provider.v1beta1.ListContainerRequest an others may need clarification + // tracked in https://github.com/cs3org/cs3apis/issues/104 + metadataKeys = append(metadataKeys, "*") + fieldMaskKeys = append(fieldMaskKeys, "*") + } else { + metadataKeys = make([]string, 0, len(pf.Prop)) + fieldMaskKeys = make([]string, 0, len(pf.Prop)) + for i := range pf.Prop { + if requiresExplicitFetching(&pf.Prop[i]) { + key := metadataKeyOf(&pf.Prop[i]) + switch key { + case "share-types": + fieldMaskKeys = append(fieldMaskKeys, key) + case "http://owncloud.org/ns/audio": + metadataKeys = append(metadataKeys, metadataKeysWithPrefix("libre.graph.audio", audioKeys)...) + case "http://owncloud.org/ns/location": + metadataKeys = append(metadataKeys, metadataKeysWithPrefix("libre.graph.location", locationKeys)...) + case "http://owncloud.org/ns/image": + metadataKeys = append(metadataKeys, metadataKeysWithPrefix("libre.graph.image", imageKeys)...) + case "http://owncloud.org/ns/photo": + metadataKeys = append(metadataKeys, metadataKeysWithPrefix("libre.graph.photo", photoKeys)...) + default: + metadataKeys = append(metadataKeys, key) + } + + } + } + } + return metadataKeys, fieldMaskKeys +} + +func addChild(childInfos map[string]*provider.ResourceInfo, + spaceInfo *provider.ResourceInfo, + requestPath string, + rootInfo *provider.ResourceInfo, +) { + if spaceInfo == rootInfo { + return // already accounted for + } + + childPath := strings.TrimPrefix(spaceInfo.Path, requestPath) + childName, tail := router.ShiftPath(childPath) + if tail != "/" { + spaceInfo.Type = provider.ResourceType_RESOURCE_TYPE_CONTAINER + spaceInfo.Checksum = nil + // TODO unset opaque checksum + } + spaceInfo.Path = path.Join(requestPath, childName) + if existingChild, ok := childInfos[childName]; ok { + // aggregate size + childInfos[childName].Size += spaceInfo.Size + // use most recent child + if existingChild.Mtime == nil || (spaceInfo.Mtime != nil && utils.TSToUnixNano(spaceInfo.Mtime) > utils.TSToUnixNano(existingChild.Mtime)) { + childInfos[childName].Mtime = spaceInfo.Mtime + childInfos[childName].Etag = spaceInfo.Etag + } + // only update fileid if the resource is a direct child + if tail == "/" { + childInfos[childName].Id = spaceInfo.Id + } + } else { + childInfos[childName] = spaceInfo + } +} + +func requiresExplicitFetching(n *xml.Name) bool { + switch n.Space { + case net.NsDav: + switch n.Local { + case "quota-available-bytes", "quota-used-bytes", "lockdiscovery": + // A PROPFIND request SHOULD NOT return DAV:quota-available-bytes and DAV:quota-used-bytes + // from https://www.rfc-editor.org/rfc/rfc4331.html#section-2 + return true + default: + return false + } + case net.NsOwncloud: + switch n.Local { + case "favorite", "share-types", "checksums", "size", "tags", "audio", "location", "image", "photo": + return true + default: + return false + } + case net.NsOCS: + return false + } + return true +} + +// ReadPropfind extracts and parses the propfind XML information from a Reader +// from https://github.com/golang/net/blob/e514e69ffb8bc3c76a71ae40de0118d794855992/webdav/xml.go#L178-L205 +func ReadPropfind(r io.Reader) (pf XML, status int, err error) { + c := countingReader{r: r} + if err = xml.NewDecoder(&c).Decode(&pf); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to propfind allprop. + // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND + return XML{Allprop: new(struct{})}, 0, nil + } + err = errors.ErrInvalidPropfind + } + return XML{}, http.StatusBadRequest, err + } + + if pf.Allprop == nil && pf.Include != nil { + return XML{}, http.StatusBadRequest, errors.ErrInvalidPropfind + } + if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) { + return XML{}, http.StatusBadRequest, errors.ErrInvalidPropfind + } + if pf.Prop != nil && pf.Propname != nil { + return XML{}, http.StatusBadRequest, errors.ErrInvalidPropfind + } + if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil { + // jfd: I think is perfectly valid ... treat it as allprop + return XML{Allprop: new(struct{})}, 0, nil + } + return pf, 0, nil +} + +// MultistatusResponse converts a list of resource infos into a multistatus response string +func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}, returnMinimal bool, downloadURLSigner signedurl.Signer) ([]byte, error) { + g, ctx := errgroup.WithContext(ctx) + + type work struct { + position int + info *provider.ResourceInfo + } + type result struct { + position int + info *ResponseXML + } + workChan := make(chan work, len(mds)) + resultChan := make(chan result, len(mds)) + + // Distribute work + g.Go(func() error { + defer close(workChan) + for i, md := range mds { + select { + case workChan <- work{position: i, info: md}: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + }) + + // Spawn workers that'll concurrently work the queue + numWorkers := 50 + if len(mds) < numWorkers { + numWorkers = len(mds) + } + for i := 0; i < numWorkers; i++ { + g.Go(func() error { + for work := range workChan { + res, err := mdToPropResponse(ctx, pf, work.info, publicURL, ns, linkshares, returnMinimal, downloadURLSigner) + if err != nil { + return err + } + select { + case resultChan <- result{position: work.position, info: res}: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + }) + } + + // Wait for things to settle down, then close results chan + go func() { + _ = g.Wait() // error is checked later + close(resultChan) + }() + + if err := g.Wait(); err != nil { + return nil, err + } + + responses := make([]*ResponseXML, len(mds)) + for res := range resultChan { + responses[res.position] = res.info + } + + msr := NewMultiStatusResponseXML() + msr.Responses = responses + msg, err := xml.Marshal(msr) + if err != nil { + return nil, err + } + return msg, nil +} + +// mdToPropResponse converts the CS3 metadata into a webdav PropResponse +// ns is the CS3 namespace that needs to be removed from the CS3 path before +// prefixing it with the baseURI +func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}, returnMinimal bool, urlSigner signedurl.Signer) (*ResponseXML, error) { + ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "md_to_prop_response") + span.SetAttributes(attribute.KeyValue{Key: "publicURL", Value: attribute.StringValue(publicURL)}) + span.SetAttributes(attribute.KeyValue{Key: "ns", Value: attribute.StringValue(ns)}) + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Interface("md", md).Str("ns", ns).Logger() + id := md.Id + p := strings.TrimPrefix(md.Path, ns) + + baseURI := ctx.Value(net.CtxKeyBaseURI).(string) + + ref := path.Join(baseURI, p) + if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + ref += "/" + } + + response := ResponseXML{ + Href: net.EncodePath(ref), + Propstat: []PropstatXML{}, + } + + var ls *link.PublicShare + + // -1 indicates uncalculated + // -2 indicates unknown (default) + // -3 indicates unlimited + quota := net.PropQuotaUnknown + size := strconv.FormatUint(md.Size, 10) + var lock *provider.Lock + shareTypes := "" + // TODO refactor helper functions: GetOpaqueJSONEncoded(opaque, key string, *struct) err, GetOpaquePlainEncoded(opaque, key) value, err + // or use ok like pattern and return bool? + if md.Opaque != nil && md.Opaque.Map != nil { + if md.Opaque.Map["link-share"] != nil && md.Opaque.Map["link-share"].Decoder == "json" { + ls = &link.PublicShare{} + err := json.Unmarshal(md.Opaque.Map["link-share"].Value, ls) + if err != nil { + sublog.Error().Err(err).Msg("could not unmarshal link json") + } + } + if quota = utils.ReadPlainFromOpaque(md.Opaque, "quota"); quota == "" { + quota = net.PropQuotaUnknown + } + if md.Opaque.Map["lock"] != nil && md.Opaque.Map["lock"].Decoder == "json" { + lock = &provider.Lock{} + err := json.Unmarshal(md.Opaque.Map["lock"].Value, lock) + if err != nil { + sublog.Error().Err(err).Msg("could not unmarshal locks json") + } + } + shareTypes = utils.ReadPlainFromOpaque(md.Opaque, "share-types") + } + role := conversions.RoleFromResourcePermissions(md.PermissionSet, ls != nil) + + if md.Space != nil && md.Space.SpaceType != "grant" && utils.ResourceIDEqual(md.Space.Root, id) { + // a space root is never shared + shareTypes = "" + } + var wdp string + isPublic := ls != nil + isShared := shareTypes != "" && !net.IsCurrentUserOwnerOrManager(ctx, md.Owner, md) + if md.PermissionSet != nil { + wdp = role.WebDAVPermissions( + md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER, + isShared, + false, + isPublic, + ) + } + + // replace fileid of /public/{token} mountpoint with grant fileid + if ls != nil && id != nil && id.SpaceId == utils.PublicStorageSpaceID && id.OpaqueId == ls.Token { + id = ls.ResourceId + } + + propstatOK := PropstatXML{ + Status: "HTTP/1.1 200 OK", + Prop: []prop.PropertyXML{}, + } + propstatNotFound := PropstatXML{ + Status: "HTTP/1.1 404 Not Found", + Prop: []prop.PropertyXML{}, + } + + appendToOK := func(p ...prop.PropertyXML) { + propstatOK.Prop = append(propstatOK.Prop, p...) + } + appendToNotFound := func(p ...prop.PropertyXML) { + propstatNotFound.Prop = append(propstatNotFound.Prop, p...) + } + if returnMinimal { + appendToNotFound = func(p ...prop.PropertyXML) {} + } + + appendMetadataProp := func(metadata map[string]string, tagNamespace string, name string, metadataPrefix string, keys []string) { + content := strings.Builder{} + for _, key := range keys { + kebabCaseKey := strcase.ToKebab(key) + if v, ok := metadata[fmt.Sprintf("%s.%s", metadataPrefix, key)]; ok { + content.WriteString("<") + content.WriteString(tagNamespace) + content.WriteString(":") + content.WriteString(kebabCaseKey) + content.WriteString(">") + content.Write(prop.Escaped("", v).InnerXML) + content.WriteString("") + } + } + + propName := fmt.Sprintf("%s:%s", tagNamespace, name) + if content.Len() > 0 { + appendToOK(prop.Raw(propName, content.String())) + } else { + appendToNotFound(prop.NotFound(propName)) + } + } + + // when allprops has been requested + if pf.Allprop != nil { + // return all known properties + + if id != nil { + sid := storagespace.FormatResourceID(id) + appendToOK( + prop.Escaped("oc:id", sid), + prop.Escaped("oc:fileid", sid), + prop.Escaped("oc:spaceid", storagespace.FormatStorageID(id.StorageId, id.SpaceId)), + ) + } + + if md.ParentId != nil { + appendToOK(prop.Escaped("oc:file-parent", storagespace.FormatResourceID(md.ParentId))) + } else { + appendToNotFound(prop.NotFound("oc:file-parent")) + } + + // we need to add the shareid if possible - the only way to extract it here is to parse it from the path + if ref, err := storagespace.ParseReference(strings.TrimPrefix(p, "/")); err == nil && ref.GetResourceId().GetSpaceId() == utils.ShareStorageSpaceID { + appendToOK(prop.Raw("oc:shareid", ref.GetResourceId().GetOpaqueId())) + } + + if md.Name != "" { + appendToOK(prop.Escaped("oc:name", md.Name)) + appendToOK(prop.Escaped("d:displayname", md.Name)) + } + + if md.Etag != "" { + // etags must be enclosed in double quotes and cannot contain them. + // See https://tools.ietf.org/html/rfc7232#section-2.3 for details + // TODO(jfd) handle weak tags that start with 'W/' + appendToOK(prop.Escaped("d:getetag", quoteEtag(md.Etag))) + } + + if md.PermissionSet != nil { + appendToOK(prop.Escaped("oc:permissions", wdp)) + } + + // always return size, well nearly always ... public link shares are a little weird + if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + appendToOK(prop.Raw("d:resourcetype", "")) + if ls == nil { + appendToOK(prop.Escaped("oc:size", size)) + } + // A PROPFIND request SHOULD NOT return DAV:quota-available-bytes and DAV:quota-used-bytes + // from https://www.rfc-editor.org/rfc/rfc4331.html#section-2 + // appendToOK(prop.NewProp("d:quota-used-bytes", size)) + // appendToOK(prop.NewProp("d:quota-available-bytes", quota)) + } else { + appendToOK( + prop.Escaped("d:resourcetype", ""), + prop.Escaped("d:getcontentlength", size), + ) + if md.MimeType != "" { + appendToOK(prop.Escaped("d:getcontenttype", md.MimeType)) + } + } + // Finder needs the getLastModified property to work. + if md.Mtime != nil { + t := utils.TSToTime(md.Mtime).UTC() + lastModifiedString := t.Format(net.RFC1123) + appendToOK(prop.Escaped("d:getlastmodified", lastModifiedString)) + } + + // stay bug compatible with oc10, see https://github.com/owncloud/core/pull/38304#issuecomment-762185241 + var checksums strings.Builder + if md.Checksum != nil { + checksums.WriteString("") + checksums.WriteString(strings.ToUpper(string(xs.GRPC2PKGXS(md.Checksum.Type)))) + checksums.WriteString(":") + checksums.WriteString(md.Checksum.Sum) + } + if md.Opaque != nil { + if e, ok := md.Opaque.Map["md5"]; ok { + if checksums.Len() == 0 { + checksums.WriteString("MD5:") + } else { + checksums.WriteString(" MD5:") + } + checksums.Write(e.Value) + } + if e, ok := md.Opaque.Map["adler32"]; ok { + if checksums.Len() == 0 { + checksums.WriteString("ADLER32:") + } else { + checksums.WriteString(" ADLER32:") + } + checksums.Write(e.Value) + } + } + if checksums.Len() > 0 { + checksums.WriteString("") + appendToOK(prop.Raw("oc:checksums", checksums.String())) + } + + if k := md.GetArbitraryMetadata().GetMetadata(); k != nil { + propstatOK.Prop = append(propstatOK.Prop, prop.Raw("oc:tags", k["tags"])) + appendMetadataProp(k, "oc", "audio", "libre.graph.audio", audioKeys) + appendMetadataProp(k, "oc", "location", "libre.graph.location", locationKeys) + appendMetadataProp(k, "oc", "image", "libre.graph.image", imageKeys) + appendMetadataProp(k, "oc", "photo", "libre.graph.photo", photoKeys) + } + + if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + // directories have no preview + appendToNotFound(prop.NotFound("oc:has-preview")) + } else if md.MimeType != "" { + hasPreview(md, appendToOK) + } + + // ls do not report any properties as missing by default + if ls == nil { + // favorites from arbitrary metadata + if k := md.GetArbitraryMetadata(); k == nil { + appendToOK(prop.Raw("oc:favorite", "0")) + } else if amd := k.GetMetadata(); amd == nil { + appendToOK(prop.Raw("oc:favorite", "0")) + } else if v, ok := amd[net.PropOcFavorite]; ok && v != "" { + appendToOK(prop.Escaped("oc:favorite", v)) + } else { + appendToOK(prop.Raw("oc:favorite", "0")) + } + } + + if lock != nil { + appendToOK(prop.Raw("d:lockdiscovery", activeLocks(&sublog, lock))) + } + // TODO return other properties ... but how do we put them in a namespace? + } else { + // otherwise return only the requested properties + for i := range pf.Prop { + switch pf.Prop[i].Space { + case net.NsOwncloud: + switch pf.Prop[i].Local { + // TODO(jfd): maybe phoenix and the other clients can just use this id as an opaque string? + // I tested the desktop client and phoenix to annotate which properties are requestted, see below cases + case "fileid": // phoenix only + if id != nil { + appendToOK(prop.Escaped("oc:fileid", storagespace.FormatResourceID(id))) + } else { + appendToNotFound(prop.NotFound("oc:fileid")) + } + case "id": // desktop client only + if id != nil { + appendToOK(prop.Escaped("oc:id", storagespace.FormatResourceID(id))) + } else { + appendToNotFound(prop.NotFound("oc:id")) + } + case "file-parent": + if md.ParentId != nil { + appendToOK(prop.Escaped("oc:file-parent", storagespace.FormatResourceID(md.ParentId))) + } else { + appendToNotFound(prop.NotFound("oc:file-parent")) + } + case "spaceid": + if id != nil { + appendToOK(prop.Escaped("oc:spaceid", storagespace.FormatStorageID(id.StorageId, id.SpaceId))) + } else { + appendToNotFound(prop.Escaped("oc:spaceid", "")) + } + case "permissions": // both + // oc:permissions take several char flags to indicate the permissions the user has on this node: + // D = delete + // NV = update (renameable moveable) + // W = update (files only) + // CK = create (folders only) + // S = Shared + // R = Shareable (Reshare) + // M = Mounted + // in contrast, the ocs:share-permissions further down below indicate clients the maximum permissions that can be granted + appendToOK(prop.Escaped("oc:permissions", wdp)) + case "public-link-permission": // only on a share root node + if ls != nil && md.PermissionSet != nil { + appendToOK(prop.Escaped("oc:public-link-permission", role.OCSPermissions().String())) + } else { + appendToNotFound(prop.NotFound("oc:public-link-permission")) + } + case "public-link-item-type": // only on a share root node + if ls != nil { + if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + appendToOK(prop.Raw("oc:public-link-item-type", "folder")) + } else { + appendToOK(prop.Raw("oc:public-link-item-type", "file")) + // redirectref is another option + } + } else { + appendToNotFound(prop.NotFound("oc:public-link-item-type")) + } + case "public-link-share-datetime": + if ls != nil && ls.Mtime != nil { + t := utils.TSToTime(ls.Mtime).UTC() // TODO or ctime? + shareTimeString := t.Format(net.RFC1123) + appendToOK(prop.Escaped("oc:public-link-share-datetime", shareTimeString)) + } else { + appendToNotFound(prop.NotFound("oc:public-link-share-datetime")) + } + case "public-link-share-owner": + if ls != nil && ls.Owner != nil { + if net.IsCurrentUserOwnerOrManager(ctx, ls.Owner, nil) { + u := ctxpkg.ContextMustGetUser(ctx) + appendToOK(prop.Escaped("oc:public-link-share-owner", u.Username)) + } else { + u, _ := ctxpkg.ContextGetUser(ctx) + sublog.Error().Interface("share", ls).Interface("user", u).Msg("the current user in the context should be the owner of a public link share") + appendToNotFound(prop.NotFound("oc:public-link-share-owner")) + } + } else { + appendToNotFound(prop.NotFound("oc:public-link-share-owner")) + } + case "public-link-expiration": + if ls != nil && ls.Expiration != nil { + t := utils.TSToTime(ls.Expiration).UTC() + expireTimeString := t.Format(net.RFC1123) + appendToOK(prop.Escaped("oc:public-link-expiration", expireTimeString)) + } else { + appendToNotFound(prop.NotFound("oc:public-link-expiration")) + } + case "size": // phoenix only + // TODO we cannot find out if md.Size is set or not because ints in go default to 0 + // TODO what is the difference to d:quota-used-bytes (which only exists for collections)? + // oc:size is available on files and folders and behaves like d:getcontentlength or d:quota-used-bytes respectively + // The hasPrefix is a workaround to make children of the link root show a size if they have 0 bytes + if ls == nil || strings.HasPrefix(p, "/"+ls.Token+"/") { + appendToOK(prop.Escaped("oc:size", size)) + } else { + // link share root collection has no size + appendToNotFound(prop.NotFound("oc:size")) + } + case "owner-id": // phoenix only + if md.Owner != nil { + if net.IsCurrentUserOwnerOrManager(ctx, md.Owner, md) { + u := ctxpkg.ContextMustGetUser(ctx) + appendToOK(prop.Escaped("oc:owner-id", u.Username)) + } else { + sublog.Debug().Msg("TODO fetch user username") + appendToNotFound(prop.NotFound("oc:owner-id")) + } + } else { + appendToNotFound(prop.NotFound("oc:owner-id")) + } + case "favorite": // phoenix only + // TODO: can be 0 or 1?, in oc10 it is present or not + // TODO: read favorite via separate call? that would be expensive? I hope it is in the md + // TODO: this boolean favorite property is so horribly wrong ... either it is presont, or it is not ... unless ... it is possible to have a non binary value ... we need to double check + if ls == nil { + if k := md.GetArbitraryMetadata(); k == nil { + appendToOK(prop.Raw("oc:favorite", "0")) + } else if amd := k.GetMetadata(); amd == nil { + appendToOK(prop.Raw("oc:favorite", "0")) + } else if v, ok := amd[net.PropOcFavorite]; ok && v != "" { + appendToOK(prop.Raw("oc:favorite", "1")) + } else { + appendToOK(prop.Raw("oc:favorite", "0")) + } + } else { + // link share root collection has no favorite + appendToNotFound(prop.NotFound("oc:favorite")) + } + case "checksums": // desktop ... not really ... the desktop sends the OC-Checksum header + + // stay bug compatible with oc10, see https://github.com/owncloud/core/pull/38304#issuecomment-762185241 + var checksums strings.Builder + if md.Checksum != nil { + checksums.WriteString("") + checksums.WriteString(strings.ToUpper(string(xs.GRPC2PKGXS(md.Checksum.Type)))) + checksums.WriteString(":") + checksums.WriteString(md.Checksum.Sum) + } + if md.Opaque != nil { + if e, ok := md.Opaque.Map["md5"]; ok { + if checksums.Len() == 0 { + checksums.WriteString("MD5:") + } else { + checksums.WriteString(" MD5:") + } + checksums.Write(e.Value) + } + if e, ok := md.Opaque.Map["adler32"]; ok { + if checksums.Len() == 0 { + checksums.WriteString("ADLER32:") + } else { + checksums.WriteString(" ADLER32:") + } + checksums.Write(e.Value) + } + } + if checksums.Len() > 13 { + checksums.WriteString("") + appendToOK(prop.Raw("oc:checksums", checksums.String())) + } else { + appendToNotFound(prop.NotFound("oc:checksums")) + } + case "share-types": // used to render share indicators to share owners + var types strings.Builder + + sts := strings.Split(shareTypes, ",") + for _, shareType := range sts { + switch shareType { + case "1": // provider.GranteeType_GRANTEE_TYPE_USER + types.WriteString("" + strconv.Itoa(int(conversions.ShareTypeUser)) + "") + case "2": // provider.GranteeType_GRANTEE_TYPE_GROUP + types.WriteString("" + strconv.Itoa(int(conversions.ShareTypeGroup)) + "") + default: + sublog.Debug().Interface("shareType", shareType).Msg("unknown share type, ignoring") + } + } + + if id != nil { + if _, ok := linkshares[id.OpaqueId]; ok { + types.WriteString("3") + } + } + + if types.Len() != 0 { + appendToOK(prop.Raw("oc:share-types", types.String())) + } else { + appendToNotFound(prop.NotFound("oc:" + pf.Prop[i].Local)) + } + case "owner-display-name": // phoenix only + if md.Owner != nil { + if net.IsCurrentUserOwnerOrManager(ctx, md.Owner, md) { + u := ctxpkg.ContextMustGetUser(ctx) + appendToOK(prop.Escaped("oc:owner-display-name", u.DisplayName)) + } else { + sublog.Debug().Msg("TODO fetch user displayname") + appendToNotFound(prop.NotFound("oc:owner-display-name")) + } + } else { + appendToNotFound(prop.NotFound("oc:owner-display-name")) + } + case "downloadURL": // desktop + if md.Type == provider.ResourceType_RESOURCE_TYPE_FILE { + url := downloadURL(ctx, sublog, isPublic, p, ls, publicURL, baseURI, urlSigner) + if url != "" { + appendToOK(prop.Escaped("oc:downloadURL", url)) + } else { + appendToNotFound(prop.NotFound("oc:" + pf.Prop[i].Local)) + } + + } else { + appendToNotFound(prop.NotFound("oc:" + pf.Prop[i].Local)) + } + case "privatelink": + privateURL, err := url.Parse(publicURL) + if err == nil && id != nil { + privateURL.Path = path.Join(privateURL.Path, "f", storagespace.FormatResourceID(id)) + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:privatelink", privateURL.String())) + } else { + propstatNotFound.Prop = append(propstatNotFound.Prop, prop.NotFound("oc:privatelink")) + } + case "signature-auth": + if isPublic { + // We only want to add the attribute to the root of the propfind. + if strings.HasSuffix(p, ls.Token) && ls.Signature != nil { + expiration := time.Unix(int64(ls.Signature.SignatureExpiration.Seconds), int64(ls.Signature.SignatureExpiration.Nanos)) + var sb strings.Builder + sb.WriteString("") + sb.WriteString(ls.Signature.Signature) + sb.WriteString("") + sb.WriteString("") + sb.WriteString(expiration.Format(time.RFC3339)) + sb.WriteString("") + + appendToOK(prop.Raw("oc:signature-auth", sb.String())) + } else { + appendToNotFound(prop.NotFound("oc:signature-auth")) + } + } + case "tags": + if k := md.GetArbitraryMetadata().GetMetadata(); k != nil { + propstatOK.Prop = append(propstatOK.Prop, prop.Raw("oc:tags", k["tags"])) + } + case "audio": + if k := md.GetArbitraryMetadata().GetMetadata(); k != nil { + appendMetadataProp(k, "oc", "audio", "libre.graph.audio", audioKeys) + } + case "location": + if k := md.GetArbitraryMetadata().GetMetadata(); k != nil { + appendMetadataProp(k, "oc", "location", "libre.graph.location", locationKeys) + } + case "image": + if k := md.GetArbitraryMetadata().GetMetadata(); k != nil { + appendMetadataProp(k, "oc", "image", "libre.graph.image", imageKeys) + } + case "photo": + if k := md.GetArbitraryMetadata().GetMetadata(); k != nil { + appendMetadataProp(k, "oc", "photo", "libre.graph.photo", photoKeys) + } + case "name": + appendToOK(prop.Escaped("oc:name", md.Name)) + case "shareid": + if ref, err := storagespace.ParseReference(strings.TrimPrefix(p, "/")); err == nil && ref.GetResourceId().GetSpaceId() == utils.ShareStorageSpaceID { + appendToOK(prop.Raw("oc:shareid", ref.GetResourceId().GetOpaqueId())) + } + case "dDC": // desktop + fallthrough + case "data-fingerprint": // desktop + // used by admins to indicate a backup has been restored, + // can only occur on the root node + // server implementation in https://github.com/owncloud/core/pull/24054 + // see https://doc.owncloud.com/server/admin_manual/configuration/server/occ_command.html#maintenance-commands + // TODO(jfd): double check the client behavior with reva on backup restore + fallthrough + case "has-preview": + if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + // directories have no preview + appendToNotFound(prop.NotFound("oc:has-preview")) + } else if md.MimeType != "" { + hasPreview(md, appendToOK) + } + default: + appendToNotFound(prop.NotFound("oc:" + pf.Prop[i].Local)) + } + case net.NsDav: + switch pf.Prop[i].Local { + case "getetag": // both + if md.Etag != "" { + appendToOK(prop.Escaped("d:getetag", quoteEtag(md.Etag))) + } else { + appendToNotFound(prop.NotFound("d:getetag")) + } + case "getcontentlength": // both + // see everts stance on this https://stackoverflow.com/a/31621912, he points to http://tools.ietf.org/html/rfc4918#section-15.3 + // > Purpose: Contains the Content-Length header returned by a GET without accept headers. + // which only would make sense when eg. rendering a plain HTML filelisting when GETing a collection, + // which is not the case ... so we don't return it on collections. OpenCloud has oc:size for that + // TODO we cannot find out if md.Size is set or not because ints in go default to 0 + if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + appendToNotFound(prop.NotFound("d:getcontentlength")) + } else { + appendToOK(prop.Escaped("d:getcontentlength", size)) + } + case "resourcetype": // both + if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + appendToOK(prop.Raw("d:resourcetype", "")) + } else { + appendToOK(prop.Raw("d:resourcetype", "")) + // redirectref is another option + } + case "getcontenttype": // phoenix + if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + // directories have no contenttype + appendToNotFound(prop.NotFound("d:getcontenttype")) + } else if md.MimeType != "" { + appendToOK(prop.Escaped("d:getcontenttype", md.MimeType)) + } + case "getlastmodified": // both + // TODO we cannot find out if md.Mtime is set or not because ints in go default to 0 + if md.Mtime != nil { + t := utils.TSToTime(md.Mtime).UTC() + lastModifiedString := t.Format(net.RFC1123) + appendToOK(prop.Escaped("d:getlastmodified", lastModifiedString)) + } else { + appendToNotFound(prop.NotFound("d:getlastmodified")) + } + case "quota-used-bytes": // RFC 4331 + if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + // always returns the current usage, + // in oc10 there seems to be a bug that makes the size in webdav differ from the one in the user properties, not taking shares into account + // in OpenCloud we plan to always mak the quota a property of the storage space + appendToOK(prop.Escaped("d:quota-used-bytes", size)) + } else { + appendToNotFound(prop.NotFound("d:quota-used-bytes")) + } + case "quota-available-bytes": // RFC 4331 + if md.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + // oc10 returns -3 for unlimited, -2 for unknown, -1 for uncalculated + appendToOK(prop.Escaped("d:quota-available-bytes", quota)) + } else { + appendToNotFound(prop.NotFound("d:quota-available-bytes")) + } + case "lockdiscovery": // http://www.webdav.org/specs/rfc2518.html#PROPERTY_lockdiscovery + if lock == nil { + appendToNotFound(prop.NotFound("d:lockdiscovery")) + } else { + appendToOK(prop.Raw("d:lockdiscovery", activeLocks(&sublog, lock))) + } + default: + appendToNotFound(prop.NotFound("d:" + pf.Prop[i].Local)) + } + case net.NsOCS: + switch pf.Prop[i].Local { + // ocs:share-permissions indicate clients the maximum permissions that can be granted: + // 1 = read + // 2 = write (update) + // 4 = create + // 8 = delete + // 16 = share + // shared files can never have the create or delete permission bit set + case "share-permissions": + if md.PermissionSet != nil { + perms := role.OCSPermissions() + // shared files cant have the create or delete permission set + if md.Type == provider.ResourceType_RESOURCE_TYPE_FILE { + perms &^= conversions.PermissionCreate + perms &^= conversions.PermissionDelete + } + appendToOK(prop.EscapedNS(pf.Prop[i].Space, pf.Prop[i].Local, perms.String())) + } + default: + appendToNotFound(prop.NotFound("d:" + pf.Prop[i].Local)) + } + default: + // handle custom properties + if k := md.GetArbitraryMetadata(); k == nil { + appendToNotFound(prop.NotFoundNS(pf.Prop[i].Space, pf.Prop[i].Local)) + } else if amd := k.GetMetadata(); amd == nil { + appendToNotFound(prop.NotFoundNS(pf.Prop[i].Space, pf.Prop[i].Local)) + } else if v, ok := amd[metadataKeyOf(&pf.Prop[i])]; ok && v != "" { + appendToOK(prop.EscapedNS(pf.Prop[i].Space, pf.Prop[i].Local, v)) + } else { + appendToNotFound(prop.NotFoundNS(pf.Prop[i].Space, pf.Prop[i].Local)) + } + } + } + } + + if status := utils.ReadPlainFromOpaque(md.Opaque, "status"); status == "processing" { + response.Propstat = append(response.Propstat, PropstatXML{ + Status: "HTTP/1.1 425 TOO EARLY", + Prop: propstatOK.Prop, + }) + return &response, nil + } + + if len(propstatOK.Prop) > 0 { + response.Propstat = append(response.Propstat, propstatOK) + } + if len(propstatNotFound.Prop) > 0 { + response.Propstat = append(response.Propstat, propstatNotFound) + } + + return &response, nil +} + +func hasPreview(md *provider.ResourceInfo, appendToOK func(p ...prop.PropertyXML)) { + _, match := thumbnail.SupportedMimeTypes[md.MimeType] + if match { + appendToOK(prop.Escaped("oc:has-preview", "1")) + } else { + appendToOK(prop.Escaped("oc:has-preview", "0")) + } +} + +func downloadURL(ctx context.Context, log zerolog.Logger, isPublic bool, path string, ls *link.PublicShare, publicURL string, baseURI string, urlSigner signedurl.Signer) string { + parts := strings.Split(path, "/") + encodedPath, err := url.JoinPath("/", parts...) + if err != nil { + log.Error().Err(err).Msg("failed to encode the path for the download URL") + return "" + } + + switch { + case isPublic: + var queryString string + if !ls.PasswordProtected { + queryString = encodedPath + } else { + expiration := time.Unix(int64(ls.Signature.SignatureExpiration.Seconds), int64(ls.Signature.SignatureExpiration.Nanos)) + var sb strings.Builder + + sb.WriteString(encodedPath) + sb.WriteString("?signature=") + sb.WriteString(ls.Signature.Signature) + sb.WriteString("&expiration=") + sb.WriteString(url.QueryEscape(expiration.Format(time.RFC3339))) + + queryString = sb.String() + } + return publicURL + baseURI + queryString + case urlSigner != nil: + u, ok := ctxpkg.ContextGetUser(ctx) + if !ok { + log.Error().Msg("could not get user from context for download URL signing") + return "" + } + signedURL, err := urlSigner.Sign(publicURL+baseURI+encodedPath, u.Id.OpaqueId, 30*time.Minute) + if err != nil { + log.Error().Err(err).Msg("failed to sign download URL") + return "" + } else { + return signedURL + } + } + return "" +} + +func activeLocks(log *zerolog.Logger, lock *provider.Lock) string { + if lock == nil || lock.Type == provider.LockType_LOCK_TYPE_INVALID { + return "" + } + expiration := "Infinity" + if lock.Expiration != nil { + now := uint64(time.Now().Unix()) + // Should we hide expired locks here? No. + // + // If the timeout expires, then the lock SHOULD be removed. In this + // case the server SHOULD act as if an UNLOCK method was executed by the + // server on the resource using the lock token of the timed-out lock, + // performed with its override authority. + // + // see https://datatracker.ietf.org/doc/html/rfc4918#section-6.6 + if lock.Expiration.Seconds >= now { + expiration = "Second-" + strconv.FormatUint(lock.Expiration.Seconds-now, 10) + } else { + expiration = "Second-0" + } + } + + // xml.Encode cannot render emptytags like , see https://github.com/golang/go/issues/21399 + var activelocks strings.Builder + activelocks.WriteString("") + // webdav locktype write | transaction + switch lock.Type { + case provider.LockType_LOCK_TYPE_EXCL: + fallthrough + case provider.LockType_LOCK_TYPE_WRITE: + activelocks.WriteString("") + } + // webdav lockscope exclusive, shared, or local + switch lock.Type { + case provider.LockType_LOCK_TYPE_EXCL: + fallthrough + case provider.LockType_LOCK_TYPE_WRITE: + activelocks.WriteString("") + case provider.LockType_LOCK_TYPE_SHARED: + activelocks.WriteString("") + } + // we currently only support depth infinity + activelocks.WriteString("Infinity") + + if lock.User != nil || lock.AppName != "" { + activelocks.WriteString("") + + if lock.User != nil { + // TODO oc10 uses displayname and email, needs a user lookup + activelocks.WriteString(prop.Escape(lock.User.OpaqueId + "@" + lock.User.Idp)) + } + if lock.AppName != "" { + if lock.User != nil { + activelocks.WriteString(" via ") + } + activelocks.WriteString(prop.Escape(lock.AppName)) + } + activelocks.WriteString("") + } + + if un := utils.ReadPlainFromOpaque(lock.Opaque, "lockownername"); un != "" { + activelocks.WriteString("") + activelocks.WriteString(un) + activelocks.WriteString("") + } + if lt := utils.ReadPlainFromOpaque(lock.Opaque, "locktime"); lt != "" { + activelocks.WriteString("") + activelocks.WriteString(lt) + activelocks.WriteString("") + } + activelocks.WriteString("") + activelocks.WriteString(expiration) + activelocks.WriteString("") + if lock.LockId != "" { + activelocks.WriteString("") + activelocks.WriteString(prop.Escape(lock.LockId)) + activelocks.WriteString("") + } + // lockroot is only used when setting the lock + activelocks.WriteString("") + return activelocks.String() +} + +// be defensive about wrong encoded etags +func quoteEtag(etag string) string { + if strings.HasPrefix(etag, "W/") { + return `W/"` + strings.Trim(etag[2:], `"`) + `"` + } + return `"` + strings.Trim(etag, `"`) + `"` +} + +func (c *countingReader) Read(p []byte) (int, error) { + n, err := c.r.Read(p) + c.n += n + return n, err +} + +func metadataKeyOf(n *xml.Name) string { + switch n.Local { + case "quota-available-bytes": + return "quota" + case "share-types", "tags", "lockdiscovery": + return n.Local + default: + return fmt.Sprintf("%s/%s", n.Space, n.Local) + } +} + +// UnmarshalXML appends the property names enclosed within start to pn. +// +// It returns an error if start does not contain any properties or if +// properties contain values. Character data between properties is ignored. +func (pn *Props) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + for { + t, err := prop.Next(d) + if err != nil { + return err + } + switch e := t.(type) { + case xml.EndElement: + // jfd: I think is perfectly valid ... treat it as allprop + /* + if len(*pn) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + */ + return nil + case xml.StartElement: + t, err = prop.Next(d) + if err != nil { + return err + } + if _, ok := t.(xml.EndElement); !ok { + return fmt.Errorf("unexpected token %T", t) + } + *pn = append(*pn, e.Name) + } + } +} diff --git a/services/webdav/pkg/ocdav/propfind/propfind_suite_test.go b/services/webdav/pkg/ocdav/propfind/propfind_suite_test.go new file mode 100644 index 0000000000..d7d601b592 --- /dev/null +++ b/services/webdav/pkg/ocdav/propfind/propfind_suite_test.go @@ -0,0 +1,31 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package propfind_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestPropfind(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Propfind Suite") +} diff --git a/services/webdav/pkg/ocdav/propfind/propfind_test.go b/services/webdav/pkg/ocdav/propfind/propfind_test.go new file mode 100644 index 0000000000..d003e13430 --- /dev/null +++ b/services/webdav/pkg/ocdav/propfind/propfind_test.go @@ -0,0 +1,1696 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package propfind_test + +import ( + "context" + "encoding/xml" + "io" + "net/http" + "net/http/httptest" + "strings" + + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1" + sprovider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + typesv1beta1 "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/propfind" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks" + "github.com/stretchr/testify/mock" + "google.golang.org/grpc" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type selector struct { + client gateway.GatewayAPIClient +} + +func (s selector) Next(opts ...pool.Option) (gateway.GatewayAPIClient, error) { + return s.client, nil +} + +var _ = Describe("PropfindWithDepthInfinity", func() { + var ( + handler *propfind.Handler + client *mocks.GatewayAPIClient + ctx context.Context + + readResponse = func(r io.Reader) (*propfind.MultiStatusResponseUnmarshalXML, string, error) { + buf, err := io.ReadAll(r) + if err != nil { + return nil, "", err + } + res := &propfind.MultiStatusResponseUnmarshalXML{} + err = xml.Unmarshal(buf, res) + if err != nil { + return nil, "", err + } + + return res, string(buf), nil + } + + mockStat = func(ref *sprovider.Reference, info *sprovider.ResourceInfo) { + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *sprovider.StatRequest) bool { + return utils.ResourceIDEqual(req.Ref.ResourceId, ref.ResourceId) && + (ref.Path == "" || req.Ref.Path == ref.Path) + })).Return(&sprovider.StatResponse{ + Status: status.NewOK(ctx), + Info: info, + }, nil) + } + mockListContainer = func(ref *sprovider.Reference, infos []*sprovider.ResourceInfo) { + client.On("ListContainer", mock.Anything, mock.MatchedBy(func(req *sprovider.ListContainerRequest) bool { + match := utils.ResourceIDEqual(req.Ref.ResourceId, ref.ResourceId) && + (ref.Path == "" || req.Ref.Path == ref.Path) + return match + })).Return(&sprovider.ListContainerResponse{ + Status: status.NewOK(ctx), + Infos: infos, + }, nil) + } + + foospace *sprovider.StorageSpace + fooquxspace *sprovider.StorageSpace + fooFileShareSpace *sprovider.StorageSpace + fooFileShare2Space *sprovider.StorageSpace + fooDirShareSpace *sprovider.StorageSpace + ) + + JustBeforeEach(func() { + ctx = context.WithValue(context.Background(), net.CtxKeyBaseURI, "http://127.0.0.1:3000") + client = &mocks.GatewayAPIClient{} + sel := selector{ + client: client, + } + + cfg := &config.Config{ + FilesNamespace: "/users/{{.Username}}", + WebdavNamespace: "/users/{{.Username}}", + AllowPropfindDepthInfinitiy: true, + NameValidation: config.NameValidation{ + MaxLength: 255, + InvalidChars: []string{"\f", "\r", "\n", "\\"}, + }, + } + + handler = propfind.NewHandler("127.0.0.1:3000", sel, nil, cfg) + + foospace = &sprovider.StorageSpace{ + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/foo"), + }, + }, + }, + Id: &sprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"})}, + Root: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, + Name: "foospace", + } + fooquxspace = &sprovider.StorageSpace{ + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/foo/qux"), + }, + }, + }, + Id: &sprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "root"})}, + Root: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "root"}, + Name: "fooquxspace", + } + fooFileShareSpace = &sprovider.StorageSpace{ + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/foo/Shares/sharedFile"), + }, + }, + }, + Id: &sprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace", OpaqueId: "sharedfile"})}, + Root: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace", OpaqueId: "sharedfile"}, + Name: "fooFileShareSpace", + } + fooFileShare2Space = &sprovider.StorageSpace{ + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/foo/Shares/sharedFile2"), + }, + }, + }, + Id: &sprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace2", OpaqueId: "sharedfile2"})}, + Root: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace2", OpaqueId: "sharedfile2"}, + Name: "fooFileShareSpace2", + } + fooDirShareSpace = &sprovider.StorageSpace{ + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/foo/Shares/sharedDir"), + }, + }, + }, + Id: &sprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddir"})}, + Root: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddir"}, + Name: "fooDirShareSpace", + } + + // For the space mounted a /foo we assign a storageid "foospace" and a root opaqueid "root" + // it contains four resources + // - ./bar, file, 100 bytes, opaqueid "bar" + // - ./baz, file, 1 byte, opaqueid "baz" + // - ./dir, folder, 30 bytes, opaqueid "dir" + // - ./dir/entry, file, 30 bytes, opaqueid "direntry" + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: ".", + Size: uint64(131), + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "."}, + []*sprovider.ResourceInfo{ + { + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "bar"}, + Path: "bar", + Size: 100, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + { + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "baz"}, + Path: "baz", + Size: 1, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + { + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, + Path: "dir", + Size: 30, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./bar"}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "bar"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "./bar", + Size: uint64(100), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "bar"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "bar"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "./bar", + Size: uint64(100), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./baz"}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "baz"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "./baz", + Size: uint64(1), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "baz"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "baz"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "./baz", + Size: uint64(1), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./dir"}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: "./dir", + Size: uint64(30), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./dir&dir"}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: "./dir&dir", + Name: "dir&dir", + Size: uint64(30), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: "./dir", + Size: uint64(30), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./dir"}, + []*sprovider.ResourceInfo{ + { + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "direntry"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "entry", + Size: 30, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./dir&dir"}, + []*sprovider.ResourceInfo{ + { + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "direntry"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "entry", + Size: 30, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, Path: "."}, + []*sprovider.ResourceInfo{ + { + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "direntry"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "entry", + Size: 30, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + }) + + // For the space mounted a /foo/qux we assign a storageid "foospace" and a root opaqueid "root" + // it contains one resource + // - ./quux, file, 1000 bytes, opaqueid "quux" + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "root"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "root"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: ".", + Size: uint64(1000), + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "root"}, Path: "."}, + []*sprovider.ResourceInfo{ + { + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "quux"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "./quux", + Size: 1000, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "fooquxspace"}, + }, + }) + + // For the space mounted a /foo/Shares/sharedFile we assign a spaceid "fooFileShareSpace" and a root opaqueid "sharedfile" + // it is a file resource, 2000 bytes, opaqueid "sharedfile" + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace", OpaqueId: "sharedfile"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace", OpaqueId: "sharedfile"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: ".", + Size: uint64(2000), + Mtime: &typesv1beta1.Timestamp{Seconds: 1}, + Etag: "1", + }) + + // For the space mounted a /foo/Shares/sharedFile2 we assign a spaceid "fooFileShareSpace2" and a root opaqueid "sharedfile2" + // it is a file resource, 2500 bytes, opaqueid "sharedfile2" + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace2", OpaqueId: "sharedfile2"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace2", OpaqueId: "sharedfile2"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: ".", + Size: uint64(2500), + Mtime: &typesv1beta1.Timestamp{Seconds: 2}, + Etag: "2", + }) + + // For the space mounted a /foo/Shares/sharedFile2 we assign a spaceid "fooDirShareSpace" and a root opaqueid "shareddir" + // it is a folder containing one resource + // ./something, file, 1500 bytes, opaqueid "shareddirsomething" + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddir"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddir"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: ".", + Size: uint64(1500), + Mtime: &typesv1beta1.Timestamp{Seconds: 3}, + Etag: "3", + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddir"}, Path: "."}, + []*sprovider.ResourceInfo{ + { + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddirsomething"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "something", + Size: 1500, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "fooDirShareSpace"}, + }, + }) + + client.On("ListPublicShares", mock.Anything, mock.Anything).Return( + func(_ context.Context, req *link.ListPublicSharesRequest, _ ...grpc.CallOption) *link.ListPublicSharesResponse { + + var shares []*link.PublicShare + if len(req.Filters) == 0 { + shares = []*link.PublicShare{} + } else { + term := req.Filters[0].Term.(*link.ListPublicSharesRequest_Filter_ResourceId) + switch { + case term != nil && term.ResourceId != nil && term.ResourceId.OpaqueId == "bar": + shares = []*link.PublicShare{ + { + Id: &link.PublicShareId{OpaqueId: "share1"}, + ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "bar"}, + }, + } + default: + shares = []*link.PublicShare{} + } + } + return &link.ListPublicSharesResponse{ + Status: status.NewOK(ctx), + Share: shares, + } + }, nil) + }) + + Describe("NewHandler", func() { + It("returns a handler", func() { + Expect(handler).ToNot(BeNil()) + }) + }) + + Describe("HandlePathPropfind", func() { + Context("with just one space", func() { + JustBeforeEach(func() { + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *sprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, "/foo") + })).Return(&sprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*sprovider.StorageSpace{foospace}, + }, nil) + client.On("ListStorageSpaces", mock.Anything, mock.Anything).Return(&sprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*sprovider.StorageSpace{}, + }, nil) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: ".", + Size: uint64(131), + }) + }) + + It("verifies the depth header", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + req.Header.Set(net.HeaderDepth, "invalid") + req = req.WithContext(ctx) + Expect(err).ToNot(HaveOccurred()) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusBadRequest)) + }) + + It("stats a path", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + req = req.WithContext(ctx) + Expect(err).ToNot(HaveOccurred()) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(4)) + + root := res.Responses[0] + Expect(root.Href).To(Equal("http:/127.0.0.1:3000/foo/")) + Expect(string(root.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("131")) + + bar := res.Responses[1] + Expect(bar.Href).To(Equal("http:/127.0.0.1:3000/foo/bar")) + Expect(string(bar.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("100")) + + baz := res.Responses[2] + Expect(baz.Href).To(Equal("http:/127.0.0.1:3000/foo/baz")) + Expect(string(baz.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1")) + + dir := res.Responses[3] + Expect(dir.Href).To(Equal("http:/127.0.0.1:3000/foo/dir/")) + Expect(string(dir.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + }) + + It("stats a file", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo/bar", strings.NewReader("")) + req = req.WithContext(ctx) + Expect(err).ToNot(HaveOccurred()) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(1)) + + bar := res.Responses[0] + Expect(bar.Href).To(Equal("http:/127.0.0.1:3000/foo/bar")) + Expect(string(bar.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("100")) + }) + }) + + Context("with one nested file space", func() { + JustBeforeEach(func() { + client.On("ListStorageSpaces", mock.Anything, mock.Anything).Return( + func(_ context.Context, req *sprovider.ListStorageSpacesRequest, _ ...grpc.CallOption) *sprovider.ListStorageSpacesResponse { + var spaces []*sprovider.StorageSpace + switch string(req.Opaque.Map["path"].Value) { + case "/", "/foo": + spaces = []*sprovider.StorageSpace{foospace, fooFileShareSpace} + case "/foo/Shares", "/foo/Shares/sharedFile": + spaces = []*sprovider.StorageSpace{fooFileShareSpace} + } + return &sprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: spaces, + } + }, + nil) + }) + + It("stats the parent", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(5)) + + parent := res.Responses[0] + Expect(parent.Href).To(Equal("http:/127.0.0.1:3000/foo/")) + Expect(string(parent.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("2131")) + + sf := res.Responses[4] + Expect(sf.Href).To(Equal("http:/127.0.0.1:3000/foo/Shares/")) + Expect(string(sf.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("2000")) + }) + + It("stats the embedded space", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo/Shares/sharedFile", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(1)) + + sf := res.Responses[0] + Expect(sf.Href).To(Equal("http:/127.0.0.1:3000/foo/Shares/sharedFile")) + Expect(string(sf.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("2000")) + Expect(string(sf.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("Thu, 01 Jan 1970 00:00:01 GMT")) + Expect(string(sf.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring(`"1"`)) + }) + }) + + Context("with two nested file spaces and a nested directory space", func() { + JustBeforeEach(func() { + client.On("ListStorageSpaces", mock.Anything, mock.Anything).Return( + func(_ context.Context, req *sprovider.ListStorageSpacesRequest, _ ...grpc.CallOption) *sprovider.ListStorageSpacesResponse { + var spaces []*sprovider.StorageSpace + switch string(req.Opaque.Map["path"].Value) { + case "/", "/foo": + spaces = []*sprovider.StorageSpace{foospace, fooFileShareSpace, fooFileShare2Space, fooDirShareSpace} + case "/foo/Shares": + spaces = []*sprovider.StorageSpace{fooFileShareSpace, fooFileShare2Space, fooDirShareSpace} + case "/foo/Shares/sharedFile": + spaces = []*sprovider.StorageSpace{fooFileShareSpace} + case "/foo/Shares/sharedFile2": + spaces = []*sprovider.StorageSpace{fooFileShare2Space} + } + return &sprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: spaces, + } + }, + nil) + }) + + It("stats the parent", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(5)) + + parent := res.Responses[0] + Expect(parent.Href).To(Equal("http:/127.0.0.1:3000/foo/")) + Expect(string(parent.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("6131")) + + shares := res.Responses[4] + Expect(shares.Href).To(Equal("http:/127.0.0.1:3000/foo/Shares/")) + Expect(string(shares.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("6000")) + Expect(string(shares.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("Thu, 01 Jan 1970 00:00:03 GMT")) + Expect(string(shares.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring(`"3"`)) + }) + + It("stats the embedded space", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo/Shares/sharedFile", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(1)) + + sf := res.Responses[0] + Expect(sf.Href).To(Equal("http:/127.0.0.1:3000/foo/Shares/sharedFile")) + Expect(string(sf.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("2000")) + }) + + It("includes all the things™ when depth is infinity", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + req.Header.Add(net.HeaderDepth, "infinity") + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(9)) + + paths := []string{} + for _, r := range res.Responses { + paths = append(paths, r.Href) + } + Expect(paths).To(ConsistOf( + "http:/127.0.0.1:3000/foo/", + "http:/127.0.0.1:3000/foo/bar", + "http:/127.0.0.1:3000/foo/baz", + "http:/127.0.0.1:3000/foo/dir/", + "http:/127.0.0.1:3000/foo/dir/entry", + "http:/127.0.0.1:3000/foo/Shares/sharedFile", + "http:/127.0.0.1:3000/foo/Shares/sharedFile2", + "http:/127.0.0.1:3000/foo/Shares/sharedDir/", + "http:/127.0.0.1:3000/foo/Shares/sharedDir/something", + )) + }) + }) + + Context("with a nested directory space", func() { + JustBeforeEach(func() { + client.On("ListStorageSpaces", mock.Anything, mock.Anything).Return( + func(_ context.Context, req *sprovider.ListStorageSpacesRequest, _ ...grpc.CallOption) *sprovider.ListStorageSpacesResponse { + var spaces []*sprovider.StorageSpace + switch string(req.Opaque.Map["path"].Value) { + case "/", "/foo": + spaces = []*sprovider.StorageSpace{foospace, fooquxspace} + case "/foo/qux": + spaces = []*sprovider.StorageSpace{fooquxspace} + } + return &sprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: spaces, + } + }, + nil) + }) + + // Pending, the code for handling missing parents is still missing + PIt("handles children with no parent", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusOK)) + }) + + It("mounts embedded spaces", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(5)) + + root := res.Responses[0] + Expect(root.Href).To(Equal("http:/127.0.0.1:3000/foo/")) + Expect(string(root.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1131")) + + bar := res.Responses[1] + Expect(bar.Href).To(Equal("http:/127.0.0.1:3000/foo/bar")) + Expect(string(bar.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("100")) + + baz := res.Responses[2] + Expect(baz.Href).To(Equal("http:/127.0.0.1:3000/foo/baz")) + Expect(string(baz.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1")) + + dir := res.Responses[3] + Expect(dir.Href).To(Equal("http:/127.0.0.1:3000/foo/dir/")) + Expect(string(dir.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + + qux := res.Responses[4] + Expect(qux.Href).To(Equal("http:/127.0.0.1:3000/foo/qux/")) + Expect(string(qux.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1000")) + }) + + It("stats the embedded space", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo/qux/", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(2)) + + qux := res.Responses[0] + Expect(qux.Href).To(Equal("http:/127.0.0.1:3000/foo/qux/")) + Expect(string(qux.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1000")) + + quux := res.Responses[1] + Expect(quux.Href).To(Equal("http:/127.0.0.1:3000/foo/qux/quux")) + Expect(string(quux.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1000")) + }) + }) + }) + + Describe("HandleSpacesPropfind", func() { + /* + JustBeforeEach(func() { + client.On("Stat", mock.Anything, mock.Anything).Return(func(_ context.Context, req *sprovider.StatRequest, _ ...grpc.CallOption) *sprovider.StatResponse { + switch { + case req.Ref.ResourceId.OpaqueId == "foospace": + return &sprovider.StatResponse{ + Status: status.NewOK(ctx), + Info: &sprovider.ResourceInfo{ + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Id: &sprovider.ResourceId{OpaqueId: "foospaceroot", StorageId: "foospaceroot"}, + Size: 131, + Path: ".", + }, + } + default: + return &sprovider.StatResponse{ + Status: status.NewNotFound(ctx, "not found"), + } + } + }, nil) + }) + */ + + It("handles invalid space ids", func() { + client.On("Stat", mock.Anything, mock.Anything).Return(&sprovider.StatResponse{ + Status: status.NewNotFound(ctx, "not found"), + }, nil) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + + handler.HandleSpacesPropfind(rr, req, "does-not-exist") + Expect(rr.Code).To(Equal(http.StatusNotFound)) + }) + + It("stats the space root", func() { + client.On("Stat", mock.Anything, mock.Anything).Return(&sprovider.StatResponse{ + Status: status.NewNotFound(ctx, "not found"), + }, nil) + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + spaceID := storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}) + spaceIDUrl := net.EncodePath(spaceID) + handler.HandleSpacesPropfind(rr, req, spaceID) + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(4)) + + root := res.Responses[0] + Expect(root.Href).To(Equal("http:/127.0.0.1:3000/" + spaceIDUrl + "/")) + Expect(string(root.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("131")) + + bar := res.Responses[1] + Expect(bar.Href).To(Equal("http:/127.0.0.1:3000/" + spaceIDUrl + "/bar")) + Expect(string(bar.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("100")) + Expect(string(bar.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("provider-1$foospace!foospace")) + + baz := res.Responses[2] + Expect(baz.Href).To(Equal("http:/127.0.0.1:3000/" + spaceIDUrl + "/baz")) + Expect(string(baz.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1")) + Expect(string(baz.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("provider-1$foospace!foospace")) + + dir := res.Responses[3] + Expect(dir.Href).To(Equal("http:/127.0.0.1:3000/" + spaceIDUrl + "/dir/")) + Expect(string(dir.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + Expect(string(dir.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("provider-1$foospace!foospace")) + }) + + It("stats a file", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/bar", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + spaceID := storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}) + handler.HandleSpacesPropfind(rr, req, spaceID) + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(1)) + Expect(string(res.Responses[0].Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("100")) + }) + + It("stats a directory", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/dir", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + spaceID := storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}) + handler.HandleSpacesPropfind(rr, req, spaceID) + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(2)) + Expect(string(res.Responses[0].Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + Expect(string(res.Responses[1].Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + }) + + It("stats a directory with xml special characters", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/dir&dir", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + spaceID := storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}) + handler.HandleSpacesPropfind(rr, req, spaceID) + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(2)) + Expect(string(res.Responses[0].Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("dir&dir")) + Expect(string(res.Responses[1].Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + }) + + It("includes all the things™ when depth is infinity", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", strings.NewReader("")) + req.Header.Add(net.HeaderDepth, "infinity") + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + spaceID := storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}) + handler.HandleSpacesPropfind(rr, req, spaceID) + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + _, _, err = readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + + }) + }) +}) + +var _ = Describe("PropfindWithoutDepthInfinity", func() { + var ( + handler *propfind.Handler + client *mocks.GatewayAPIClient + ctx context.Context + + readResponse = func(r io.Reader) (*propfind.MultiStatusResponseUnmarshalXML, string, error) { + buf, err := io.ReadAll(r) + if err != nil { + return nil, "", err + } + res := &propfind.MultiStatusResponseUnmarshalXML{} + err = xml.Unmarshal(buf, res) + if err != nil { + return nil, "", err + } + + return res, string(buf), nil + } + + mockStat = func(ref *sprovider.Reference, info *sprovider.ResourceInfo) { + client.On("Stat", mock.Anything, mock.MatchedBy(func(req *sprovider.StatRequest) bool { + return utils.ResourceIDEqual(req.Ref.ResourceId, ref.ResourceId) && + (ref.Path == "" || req.Ref.Path == ref.Path) + })).Return(&sprovider.StatResponse{ + Status: status.NewOK(ctx), + Info: info, + }, nil) + } + mockListContainer = func(ref *sprovider.Reference, infos []*sprovider.ResourceInfo) { + client.On("ListContainer", mock.Anything, mock.MatchedBy(func(req *sprovider.ListContainerRequest) bool { + match := utils.ResourceIDEqual(req.Ref.ResourceId, ref.ResourceId) && + (ref.Path == "" || req.Ref.Path == ref.Path) + return match + })).Return(&sprovider.ListContainerResponse{ + Status: status.NewOK(ctx), + Infos: infos, + }, nil) + } + + foospace *sprovider.StorageSpace + fooquxspace *sprovider.StorageSpace + fooFileShareSpace *sprovider.StorageSpace + fooFileShare2Space *sprovider.StorageSpace + fooDirShareSpace *sprovider.StorageSpace + ) + + JustBeforeEach(func() { + ctx = context.WithValue(context.Background(), net.CtxKeyBaseURI, "http://127.0.0.1:3000") + client = &mocks.GatewayAPIClient{} + sel := selector{ + client: client, + } + + cfg := &config.Config{ + FilesNamespace: "/users/{{.Username}}", + WebdavNamespace: "/users/{{.Username}}", + AllowPropfindDepthInfinitiy: false, + NameValidation: config.NameValidation{ + MaxLength: 255, + InvalidChars: []string{"\f", "\r", "\n", "\\"}, + }, + } + + handler = propfind.NewHandler("127.0.0.1:3000", sel, nil, cfg) + + foospace = &sprovider.StorageSpace{ + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/foo"), + }, + }, + }, + Id: &sprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"})}, + Root: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, + Name: "foospace", + } + fooquxspace = &sprovider.StorageSpace{ + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/foo/qux"), + }, + }, + }, + Id: &sprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "root"})}, + Root: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "root"}, + Name: "fooquxspace", + } + fooFileShareSpace = &sprovider.StorageSpace{ + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/foo/Shares/sharedFile"), + }, + }, + }, + Id: &sprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace", OpaqueId: "sharedfile"})}, + Root: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace", OpaqueId: "sharedfile"}, + Name: "fooFileShareSpace", + } + fooFileShare2Space = &sprovider.StorageSpace{ + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/foo/Shares/sharedFile2"), + }, + }, + }, + Id: &sprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace2", OpaqueId: "sharedfile2"})}, + Root: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace2", OpaqueId: "sharedfile2"}, + Name: "fooFileShareSpace2", + } + fooDirShareSpace = &sprovider.StorageSpace{ + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/foo/Shares/sharedDir"), + }, + }, + }, + Id: &sprovider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddir"})}, + Root: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddir"}, + Name: "fooDirShareSpace", + } + + // For the space mounted a /foo we assign a storageid "foospace" and a root opaqueid "root" + // it contains four resources + // - ./bar, file, 100 bytes, opaqueid "bar" + // - ./baz, file, 1 byte, opaqueid "baz" + // - ./dir, folder, 30 bytes, opaqueid "dir" + // - ./dir/entry, file, 30 bytes, opaqueid "direntry" + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: ".", + Size: uint64(131), + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "."}, + []*sprovider.ResourceInfo{ + { + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "bar"}, + Path: "bar", + Size: 100, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + { + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "baz"}, + Path: "baz", + Size: 1, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + { + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, + Path: "dir", + Size: 30, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./bar"}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "bar"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "./bar", + Size: uint64(100), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "bar"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "bar"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "./bar", + Size: uint64(100), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./baz"}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "baz"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "./baz", + Size: uint64(1), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "baz"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "baz"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "./baz", + Size: uint64(1), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./dir"}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: "./dir", + Size: uint64(30), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./dir&dir"}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: "./dir&dir", + Name: "dir&dir", + Size: uint64(30), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: "./dir", + Size: uint64(30), + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./dir"}, + []*sprovider.ResourceInfo{ + { + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "direntry"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "entry", + Size: 30, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}, Path: "./dir&dir"}, + []*sprovider.ResourceInfo{ + { + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "direntry"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "entry", + Size: 30, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "dir"}, Path: "."}, + []*sprovider.ResourceInfo{ + { + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "direntry"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "entry", + Size: 30, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + }, + }) + + // For the space mounted a /foo/qux we assign a storageid "foospace" and a root opaqueid "root" + // it contains one resource + // - ./quux, file, 1000 bytes, opaqueid "quux" + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "root"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "root"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: ".", + Size: uint64(1000), + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "root"}, Path: "."}, + []*sprovider.ResourceInfo{ + { + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "quux"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "./quux", + Size: 1000, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooquxspace", OpaqueId: "fooquxspace"}, + }, + }) + + // For the space mounted a /foo/Shares/sharedFile we assign a spaceid "fooFileShareSpace" and a root opaqueid "sharedfile" + // it is a file resource, 2000 bytes, opaqueid "sharedfile" + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace", OpaqueId: "sharedfile"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace", OpaqueId: "sharedfile"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: ".", + Size: uint64(2000), + Mtime: &typesv1beta1.Timestamp{Seconds: 1}, + Etag: "1", + }) + + // For the space mounted a /foo/Shares/sharedFile2 we assign a spaceid "fooFileShareSpace2" and a root opaqueid "sharedfile2" + // it is a file resource, 2500 bytes, opaqueid "sharedfile2" + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace2", OpaqueId: "sharedfile2"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooFileShareSpace2", OpaqueId: "sharedfile2"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: ".", + Size: uint64(2500), + Mtime: &typesv1beta1.Timestamp{Seconds: 2}, + Etag: "2", + }) + + // For the space mounted a /foo/Shares/sharedFile2 we assign a spaceid "fooDirShareSpace" and a root opaqueid "shareddir" + // it is a folder containing one resource + // ./something, file, 1500 bytes, opaqueid "shareddirsomething" + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddir"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddir"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: ".", + Size: uint64(1500), + Mtime: &typesv1beta1.Timestamp{Seconds: 3}, + Etag: "3", + }) + mockListContainer(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddir"}, Path: "."}, + []*sprovider.ResourceInfo{ + { + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "shareddirsomething"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_FILE, + Path: "something", + Size: 1500, + ParentId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "fooDirShareSpace", OpaqueId: "fooDirShareSpace"}, + }, + }) + + client.On("ListPublicShares", mock.Anything, mock.Anything).Return( + func(_ context.Context, req *link.ListPublicSharesRequest, _ ...grpc.CallOption) *link.ListPublicSharesResponse { + + var shares []*link.PublicShare + if len(req.Filters) == 0 { + shares = []*link.PublicShare{} + } else { + term := req.Filters[0].Term.(*link.ListPublicSharesRequest_Filter_ResourceId) + switch { + case term != nil && term.ResourceId != nil && term.ResourceId.OpaqueId == "bar": + shares = []*link.PublicShare{ + { + Id: &link.PublicShareId{OpaqueId: "share1"}, + ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "bar"}, + }, + } + default: + shares = []*link.PublicShare{} + } + } + return &link.ListPublicSharesResponse{ + Status: status.NewOK(ctx), + Share: shares, + } + }, nil) + }) + + Describe("NewHandler", func() { + It("returns a handler", func() { + Expect(handler).ToNot(BeNil()) + }) + }) + + Describe("HandlePathPropfind", func() { + Context("with just one space", func() { + JustBeforeEach(func() { + client.On("ListStorageSpaces", mock.Anything, mock.MatchedBy(func(req *sprovider.ListStorageSpacesRequest) bool { + p := string(req.Opaque.Map["path"].Value) + return p == "/" || strings.HasPrefix(p, "/foo") + })).Return(&sprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*sprovider.StorageSpace{foospace}, + }, nil) + client.On("ListStorageSpaces", mock.Anything, mock.Anything).Return(&sprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: []*sprovider.StorageSpace{}, + }, nil) + mockStat(&sprovider.Reference{ResourceId: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, Path: "."}, + &sprovider.ResourceInfo{ + Id: &sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "foospace"}, + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Path: ".", + Size: uint64(131), + }) + }) + + It("verifies the depth header", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + req.Header.Set(net.HeaderDepth, "invalid") + req = req.WithContext(ctx) + Expect(err).ToNot(HaveOccurred()) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusBadRequest)) + }) + + It("stats a path", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + req = req.WithContext(ctx) + Expect(err).ToNot(HaveOccurred()) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(4)) + + root := res.Responses[0] + Expect(root.Href).To(Equal("http:/127.0.0.1:3000/foo/")) + Expect(string(root.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("131")) + + bar := res.Responses[1] + Expect(bar.Href).To(Equal("http:/127.0.0.1:3000/foo/bar")) + Expect(string(bar.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("100")) + + baz := res.Responses[2] + Expect(baz.Href).To(Equal("http:/127.0.0.1:3000/foo/baz")) + Expect(string(baz.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1")) + + dir := res.Responses[3] + Expect(dir.Href).To(Equal("http:/127.0.0.1:3000/foo/dir/")) + Expect(string(dir.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + }) + + It("stats a file", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo/bar", strings.NewReader("")) + req = req.WithContext(ctx) + Expect(err).ToNot(HaveOccurred()) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(1)) + + bar := res.Responses[0] + Expect(bar.Href).To(Equal("http:/127.0.0.1:3000/foo/bar")) + Expect(string(bar.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("100")) + }) + }) + + Context("with one nested file space", func() { + JustBeforeEach(func() { + client.On("ListStorageSpaces", mock.Anything, mock.Anything).Return( + func(_ context.Context, req *sprovider.ListStorageSpacesRequest, _ ...grpc.CallOption) *sprovider.ListStorageSpacesResponse { + var spaces []*sprovider.StorageSpace + switch string(req.Opaque.Map["path"].Value) { + case "/", "/foo": + spaces = []*sprovider.StorageSpace{foospace, fooFileShareSpace} + case "/foo/Shares", "/foo/Shares/sharedFile": + spaces = []*sprovider.StorageSpace{fooFileShareSpace} + } + return &sprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: spaces, + } + }, + nil) + }) + + It("stats the parent", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(5)) + + parent := res.Responses[0] + Expect(parent.Href).To(Equal("http:/127.0.0.1:3000/foo/")) + Expect(string(parent.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("2131")) + + sf := res.Responses[4] + Expect(sf.Href).To(Equal("http:/127.0.0.1:3000/foo/Shares/")) + Expect(string(sf.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("2000")) + }) + + It("stats the embedded space", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo/Shares/sharedFile", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(1)) + + sf := res.Responses[0] + Expect(sf.Href).To(Equal("http:/127.0.0.1:3000/foo/Shares/sharedFile")) + Expect(string(sf.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("2000")) + Expect(string(sf.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("Thu, 01 Jan 1970 00:00:01 GMT")) + Expect(string(sf.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring(`"1"`)) + }) + }) + + Context("with two nested file spaces and a nested directory space", func() { + JustBeforeEach(func() { + client.On("ListStorageSpaces", mock.Anything, mock.Anything).Return( + func(_ context.Context, req *sprovider.ListStorageSpacesRequest, _ ...grpc.CallOption) *sprovider.ListStorageSpacesResponse { + var spaces []*sprovider.StorageSpace + switch string(req.Opaque.Map["path"].Value) { + case "/", "/foo": + spaces = []*sprovider.StorageSpace{foospace, fooFileShareSpace, fooFileShare2Space, fooDirShareSpace} + case "/foo/Shares": + spaces = []*sprovider.StorageSpace{fooFileShareSpace, fooFileShare2Space, fooDirShareSpace} + case "/foo/Shares/sharedFile": + spaces = []*sprovider.StorageSpace{fooFileShareSpace} + case "/foo/Shares/sharedFile2": + spaces = []*sprovider.StorageSpace{fooFileShare2Space} + } + return &sprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: spaces, + } + }, + nil) + }) + + It("stats the parent", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(5)) + + parent := res.Responses[0] + Expect(parent.Href).To(Equal("http:/127.0.0.1:3000/foo/")) + Expect(string(parent.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("6131")) + + shares := res.Responses[4] + Expect(shares.Href).To(Equal("http:/127.0.0.1:3000/foo/Shares/")) + Expect(string(shares.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("6000")) + Expect(string(shares.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("Thu, 01 Jan 1970 00:00:03 GMT")) + Expect(string(shares.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring(`"3"`)) + }) + + It("stats the embedded space", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo/Shares/sharedFile", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(1)) + + sf := res.Responses[0] + Expect(sf.Href).To(Equal("http:/127.0.0.1:3000/foo/Shares/sharedFile")) + Expect(string(sf.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("2000")) + }) + + It("includes all the things™ when depth is infinity", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + req.Header.Add(net.HeaderDepth, "infinity") + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusBadRequest)) + }) + }) + + Context("with a nested directory space", func() { + JustBeforeEach(func() { + client.On("ListStorageSpaces", mock.Anything, mock.Anything).Return( + func(_ context.Context, req *sprovider.ListStorageSpacesRequest, _ ...grpc.CallOption) *sprovider.ListStorageSpacesResponse { + var spaces []*sprovider.StorageSpace + switch string(req.Opaque.Map["path"].Value) { + case "/", "/foo": + spaces = []*sprovider.StorageSpace{foospace, fooquxspace} + case "/foo/qux": + spaces = []*sprovider.StorageSpace{fooquxspace} + } + return &sprovider.ListStorageSpacesResponse{ + Status: status.NewOK(ctx), + StorageSpaces: spaces, + } + }, + nil) + }) + + // Pending, the code for handling missing parents is still missing + PIt("handles children with no parent", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusOK)) + }) + + It("mounts embedded spaces", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(5)) + + root := res.Responses[0] + Expect(root.Href).To(Equal("http:/127.0.0.1:3000/foo/")) + Expect(string(root.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1131")) + + bar := res.Responses[1] + Expect(bar.Href).To(Equal("http:/127.0.0.1:3000/foo/bar")) + Expect(string(bar.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("100")) + + baz := res.Responses[2] + Expect(baz.Href).To(Equal("http:/127.0.0.1:3000/foo/baz")) + Expect(string(baz.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1")) + + dir := res.Responses[3] + Expect(dir.Href).To(Equal("http:/127.0.0.1:3000/foo/dir/")) + Expect(string(dir.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + + qux := res.Responses[4] + Expect(qux.Href).To(Equal("http:/127.0.0.1:3000/foo/qux/")) + Expect(string(qux.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1000")) + }) + + It("stats the embedded space", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo/qux/", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + handler.HandlePathPropfind(rr, req, "/") + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(2)) + + qux := res.Responses[0] + Expect(qux.Href).To(Equal("http:/127.0.0.1:3000/foo/qux/")) + Expect(string(qux.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1000")) + + quux := res.Responses[1] + Expect(quux.Href).To(Equal("http:/127.0.0.1:3000/foo/qux/quux")) + Expect(string(quux.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1000")) + }) + }) + }) + + Describe("HandleSpacesPropfind", func() { + /* + JustBeforeEach(func() { + client.On("Stat", mock.Anything, mock.Anything).Return(func(_ context.Context, req *sprovider.StatRequest, _ ...grpc.CallOption) *sprovider.StatResponse { + switch { + case req.Ref.ResourceId.OpaqueId == "foospace": + return &sprovider.StatResponse{ + Status: status.NewOK(ctx), + Info: &sprovider.ResourceInfo{ + Type: sprovider.ResourceType_RESOURCE_TYPE_CONTAINER, + Id: &sprovider.ResourceId{OpaqueId: "foospaceroot", StorageId: "foospaceroot"}, + Size: 131, + Path: ".", + }, + } + default: + return &sprovider.StatResponse{ + Status: status.NewNotFound(ctx, "not found"), + } + } + }, nil) + }) + */ + + It("handles invalid space ids", func() { + client.On("Stat", mock.Anything, mock.Anything).Return(&sprovider.StatResponse{ + Status: status.NewNotFound(ctx, "not found"), + }, nil) + + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + + handler.HandleSpacesPropfind(rr, req, "does-not-exist") + Expect(rr.Code).To(Equal(http.StatusNotFound)) + }) + + It("stats the space root", func() { + client.On("Stat", mock.Anything, mock.Anything).Return(&sprovider.StatResponse{ + Status: status.NewNotFound(ctx, "not found"), + }, nil) + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + spaceID := storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}) + spaceIDUrl := net.EncodePath(spaceID) + handler.HandleSpacesPropfind(rr, req, spaceID) + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(4)) + + root := res.Responses[0] + Expect(root.Href).To(Equal("http:/127.0.0.1:3000/" + spaceIDUrl + "/")) + Expect(string(root.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("131")) + + bar := res.Responses[1] + Expect(bar.Href).To(Equal("http:/127.0.0.1:3000/" + spaceIDUrl + "/bar")) + Expect(string(bar.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("100")) + Expect(string(bar.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("provider-1$foospace!foospace")) + + baz := res.Responses[2] + Expect(baz.Href).To(Equal("http:/127.0.0.1:3000/" + spaceIDUrl + "/baz")) + Expect(string(baz.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("1")) + Expect(string(baz.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("provider-1$foospace!foospace")) + + dir := res.Responses[3] + Expect(dir.Href).To(Equal("http:/127.0.0.1:3000/" + spaceIDUrl + "/dir/")) + Expect(string(dir.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + Expect(string(dir.Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("provider-1$foospace!foospace")) + }) + + It("stats a file", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/bar", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + spaceID := storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}) + handler.HandleSpacesPropfind(rr, req, spaceID) + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(1)) + Expect(string(res.Responses[0].Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("100")) + }) + + It("stats a directory", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/dir", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + spaceID := storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}) + handler.HandleSpacesPropfind(rr, req, spaceID) + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(2)) + Expect(string(res.Responses[0].Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + Expect(string(res.Responses[1].Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + }) + + It("stats a directory with xml special characters", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/dir&dir", strings.NewReader("")) + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + spaceID := storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}) + handler.HandleSpacesPropfind(rr, req, spaceID) + Expect(rr.Code).To(Equal(http.StatusMultiStatus)) + + res, _, err := readResponse(rr.Result().Body) + Expect(err).ToNot(HaveOccurred()) + Expect(len(res.Responses)).To(Equal(2)) + Expect(string(res.Responses[0].Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("dir&dir")) + Expect(string(res.Responses[1].Propstat[0].Prop[0].InnerXML)).To(ContainSubstring("30")) + }) + + It("includes all the things™ when depth is infinity", func() { + rr := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", strings.NewReader("")) + req.Header.Add(net.HeaderDepth, "infinity") + Expect(err).ToNot(HaveOccurred()) + req = req.WithContext(ctx) + + spaceID := storagespace.FormatResourceID(&sprovider.ResourceId{StorageId: "provider-1", SpaceId: "foospace", OpaqueId: "root"}) + handler.HandleSpacesPropfind(rr, req, spaceID) + Expect(rr.Code).To(Equal(http.StatusBadRequest)) + + _, _, err = readResponse(rr.Result().Body) + Expect(err).To(HaveOccurred()) + + }) + }) +}) diff --git a/services/webdav/pkg/ocdav/proppatch.go b/services/webdav/pkg/ocdav/proppatch.go new file mode 100644 index 0000000000..301c01a77f --- /dev/null +++ b/services/webdav/pkg/ocdav/proppatch.go @@ -0,0 +1,495 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "io" + "net/http" + "path" + "strings" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/prop" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/propfind" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" + "github.com/opencloud-eu/reva/v2/pkg/errtypes" + "github.com/opencloud-eu/reva/v2/pkg/permission" + rstatus "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/rs/zerolog" +) + +func (s *svc) handlePathProppatch(w http.ResponseWriter, r *http.Request, ns string) (status int, err error) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "proppatch") + defer span.End() + + fn := path.Join(ns, r.URL.Path) + + sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() + + pp, status, err := readProppatch(r.Body) + if err != nil { + return status, err + } + + space, rpcStatus, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, fn) + switch { + case err != nil: + return http.StatusInternalServerError, err + case rpcStatus.Code == rpc.Code_CODE_ABORTED: + return http.StatusPreconditionFailed, errtypes.NewErrtypeFromStatus(rpcStatus) + case rpcStatus.Code != rpc.Code_CODE_OK: + return rstatus.HTTPStatusFromCode(rpcStatus.Code), errtypes.NewErrtypeFromStatus(rpcStatus) + } + + client, err := s.gatewaySelector.Next() + if err != nil { + return http.StatusInternalServerError, errtypes.InternalError(err.Error()) + } + // check if resource exists + statReq := &provider.StatRequest{Ref: spacelookup.MakeRelativeReference(space, fn, false)} + statRes, err := client.Stat(ctx, statReq) + switch { + case err != nil: + return http.StatusInternalServerError, err + case statRes.Status.Code == rpc.Code_CODE_ABORTED: + return http.StatusPreconditionFailed, errtypes.NewErrtypeFromStatus(statRes.Status) + case statRes.Status.Code != rpc.Code_CODE_OK: + return rstatus.HTTPStatusFromCode(rpcStatus.Code), errtypes.NewErrtypeFromStatus(statRes.Status) + } + + acceptedProps, removedProps, ok := s.handleProppatch(ctx, w, r, spacelookup.MakeRelativeReference(space, fn, false), pp, sublog) + if !ok { + // handleProppatch handles responses in error cases so return 0 + return 0, nil + } + + nRef := strings.TrimPrefix(fn, ns) + nRef = path.Join(ctx.Value(net.CtxKeyBaseURI).(string), nRef) + if statRes.Info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + nRef += "/" + } + + s.handleProppatchResponse(ctx, w, r, acceptedProps, removedProps, nRef, sublog) + return 0, nil +} + +func (s *svc) handleSpacesProppatch(w http.ResponseWriter, r *http.Request, spaceID string) (status int, err error) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "spaces_proppatch") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("spaceid", spaceID).Logger() + + pp, status, err := readProppatch(r.Body) + if err != nil { + return status, err + } + + ref, err := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) + if err != nil { + return http.StatusBadRequest, err + } + + acceptedProps, removedProps, ok := s.handleProppatch(ctx, w, r, &ref, pp, sublog) + if !ok { + // handleProppatch handles responses in error cases so return 0 + return 0, nil + } + + nRef := path.Join(spaceID, r.URL.Path) + nRef = path.Join(ctx.Value(net.CtxKeyBaseURI).(string), nRef) + + s.handleProppatchResponse(ctx, w, r, acceptedProps, removedProps, nRef, sublog) + return 0, nil +} + +func (s *svc) handleProppatch(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference, patches []Proppatch, log zerolog.Logger) ([]xml.Name, []xml.Name, bool) { + + rreq := &provider.UnsetArbitraryMetadataRequest{ + Ref: ref, + ArbitraryMetadataKeys: []string{""}, + LockId: requestLock(r), + } + sreq := &provider.SetArbitraryMetadataRequest{ + Ref: ref, + ArbitraryMetadata: &provider.ArbitraryMetadata{ + Metadata: map[string]string{}, + }, + LockId: requestLock(r), + } + + acceptedProps := []xml.Name{} + removedProps := []xml.Name{} + + client, err := s.gatewaySelector.Next() + if err != nil { + log.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + for i := range patches { + if len(patches[i].Props) < 1 { + continue + } + for j := range patches[i].Props { + propNameXML := patches[i].Props[j].XMLName + // don't use path.Join. It removes the double slash! concatenate with a / + key := fmt.Sprintf("%s/%s", patches[i].Props[j].XMLName.Space, patches[i].Props[j].XMLName.Local) + value := string(patches[i].Props[j].InnerXML) + remove := patches[i].Remove + // boolean flags may be "set" to false as well + if s.isBooleanProperty(key) { + // Make boolean properties either "0" or "1" + value = s.as0or1(value) + if value == "0" { + remove = true + } + } + // Webdav spec requires the operations to be executed in the order + // specified in the PROPPATCH request + // http://www.webdav.org/specs/rfc2518.html#rfc.section.8.2 + // FIXME: batch this somehow + if remove { + rreq.ArbitraryMetadataKeys[0] = key + res, err := client.UnsetArbitraryMetadata(ctx, rreq) + if err != nil { + log.Error().Err(err).Msg("error sending a grpc UnsetArbitraryMetadata request") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + + if res.Status.Code != rpc.Code_CODE_OK { + status := rstatus.HTTPStatusFromCode(res.Status.Code) + if res.Status.Code == rpc.Code_CODE_ABORTED { + // aborted is used for etag an lock mismatches, which translates to 412 + // in case a real Conflict response is needed, the calling code needs to send the header + status = http.StatusPreconditionFailed + } + m := res.Status.Message + if res.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + // check if user has access to resource + sRes, err := client.Stat(ctx, &provider.StatRequest{Ref: ref}) + if err != nil { + log.Error().Err(err).Msg("error performing stat grpc request") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + if sRes.Status.Code != rpc.Code_CODE_OK { + // return not found error so we do not leak existence of a file + // TODO hide permission failed for users without access in every kind of request + // TODO should this be done in the driver? + status = http.StatusNotFound + } + } + if status == http.StatusNotFound { + m = "Resource not found" // mimic the oc10 error message + } + w.WriteHeader(status) + b, err := errors.Marshal(status, m, "", "") + errors.HandleWebdavError(&log, w, b, err) + return nil, nil, false + } + if key == "http://owncloud.org/ns/favorite" { + statRes, err := client.Stat(ctx, &provider.StatRequest{Ref: ref}) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + currentUser := ctxpkg.ContextMustGetUser(ctx) + ok, err := utils.CheckPermission(ctx, permission.WriteFavorites, client) + if err != nil { + log.Error().Err(err).Msg("error checking permission") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + if !ok { + log.Info().Interface("user", currentUser).Msg("user not allowed to unset favorite") + w.WriteHeader(http.StatusForbidden) + return nil, nil, false + } + err = s.favoritesManager.UnsetFavorite(ctx, currentUser.Id, statRes.Info) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + } + removedProps = append(removedProps, propNameXML) + } else { + sreq.ArbitraryMetadata.Metadata[key] = value + res, err := client.SetArbitraryMetadata(ctx, sreq) + if err != nil { + log.Error().Err(err).Str("key", key).Str("value", value).Msg("error sending a grpc SetArbitraryMetadata request") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + + if res.Status.Code != rpc.Code_CODE_OK { + status := rstatus.HTTPStatusFromCode(res.Status.Code) + if res.Status.Code == rpc.Code_CODE_ABORTED { + // aborted is used for etag an lock mismatches, which translates to 412 + // in case a real Conflict response is needed, the calling code needs to send the header + status = http.StatusPreconditionFailed + } + m := res.Status.Message + if res.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + // check if user has access to resource + sRes, err := client.Stat(ctx, &provider.StatRequest{Ref: ref}) + if err != nil { + log.Error().Err(err).Msg("error performing stat grpc request") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + if sRes.Status.Code != rpc.Code_CODE_OK { + // return not found error so we don't leak existence of a file + // TODO hide permission failed for users without access in every kind of request + // TODO should this be done in the driver? + status = http.StatusNotFound + } + } + if status == http.StatusNotFound { + m = "Resource not found" // mimic the oc10 error message + } + w.WriteHeader(status) + b, err := errors.Marshal(status, m, "", "") + errors.HandleWebdavError(&log, w, b, err) + return nil, nil, false + } + + acceptedProps = append(acceptedProps, propNameXML) + delete(sreq.ArbitraryMetadata.Metadata, key) + + if key == "http://owncloud.org/ns/favorite" { + statRes, err := client.Stat(ctx, &provider.StatRequest{Ref: ref}) + if err != nil || statRes.Info == nil { + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + currentUser := ctxpkg.ContextMustGetUser(ctx) + ok, err := utils.CheckPermission(ctx, permission.WriteFavorites, client) + if err != nil { + log.Error().Err(err).Msg("error checking permission") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + if !ok { + log.Info().Interface("user", currentUser).Msg("user not allowed to set favorite") + w.WriteHeader(http.StatusForbidden) + return nil, nil, false + } + err = s.favoritesManager.SetFavorite(ctx, currentUser.Id, statRes.Info) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + } + } + } + // FIXME: in case of error, need to set all properties back to the original state, + // and return the error in the matching propstat block, if applicable + // http://www.webdav.org/specs/rfc2518.html#rfc.section.8.2 + } + + return acceptedProps, removedProps, true +} + +func (s *svc) handleProppatchResponse(ctx context.Context, w http.ResponseWriter, r *http.Request, acceptedProps, removedProps []xml.Name, path string, log zerolog.Logger) { + propRes, err := s.formatProppatchResponse(ctx, acceptedProps, removedProps, path) + if err != nil { + log.Error().Err(err).Msg("error formatting proppatch response") + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set(net.HeaderDav, "1, 3, extended-mkcol") + w.Header().Set(net.HeaderContentType, "application/xml; charset=utf-8") + w.WriteHeader(http.StatusMultiStatus) + if _, err := w.Write(propRes); err != nil { + log.Err(err).Msg("error writing response") + } +} + +func (s *svc) formatProppatchResponse(ctx context.Context, acceptedProps []xml.Name, removedProps []xml.Name, ref string) ([]byte, error) { + responses := make([]propfind.ResponseXML, 0, 1) + response := propfind.ResponseXML{ + Href: net.EncodePath(ref), + Propstat: []propfind.PropstatXML{}, + } + + if len(acceptedProps) > 0 { + propstatBody := []prop.PropertyXML{} + for i := range acceptedProps { + propstatBody = append(propstatBody, prop.EscapedNS(acceptedProps[i].Space, acceptedProps[i].Local, "")) + } + response.Propstat = append(response.Propstat, propfind.PropstatXML{ + Status: "HTTP/1.1 200 OK", + Prop: propstatBody, + }) + } + + if len(removedProps) > 0 { + propstatBody := []prop.PropertyXML{} + for i := range removedProps { + propstatBody = append(propstatBody, prop.EscapedNS(removedProps[i].Space, removedProps[i].Local, "")) + } + response.Propstat = append(response.Propstat, propfind.PropstatXML{ + Status: "HTTP/1.1 204 No Content", + Prop: propstatBody, + }) + } + + responses = append(responses, response) + responsesXML, err := xml.Marshal(&responses) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + buf.WriteString(``) + buf.Write(responsesXML) + buf.WriteString(``) + return buf.Bytes(), nil +} + +func (s *svc) isBooleanProperty(prop string) bool { + // TODO add other properties we know to be boolean? + return prop == net.PropOcFavorite +} + +func (s *svc) as0or1(val string) string { + switch strings.TrimSpace(val) { + case "false": + return "0" + case "": + return "0" + case "0": + return "0" + case "no": + return "0" + case "off": + return "0" + } + return "1" +} + +// Proppatch describes a property update instruction as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH +type Proppatch struct { + // Remove specifies whether this patch removes properties. If it does not + // remove them, it sets them. + Remove bool + // Props contains the properties to be set or removed. + Props []prop.PropertyXML +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) +type proppatchProps []prop.PropertyXML + +// UnmarshalXML appends the property names and values enclosed within start +// to ps. +// +// An xml:lang attribute that is defined either on the DAV:prop or property +// name XML element is propagated to the property's Lang field. +// +// UnmarshalXML returns an error if start does not contain any properties or if +// property values contain syntactically incorrect XML. +func (ps *proppatchProps) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + lang := xmlLang(start, "") + for { + t, err := prop.Next(d) + if err != nil { + return err + } + switch elem := t.(type) { + case xml.EndElement: + if len(*ps) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case xml.StartElement: + p := prop.PropertyXML{} + err = d.DecodeElement(&p, &elem) + if err != nil { + return err + } + // special handling for the lang property + p.Lang = xmlLang(t.(xml.StartElement), lang) + *ps = append(*ps, p) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove +type setRemove struct { + XMLName xml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + Prop proppatchProps `xml:"DAV: prop"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate +type propertyupdate struct { + XMLName xml.Name `xml:"DAV: propertyupdate"` + Lang string `xml:"xml:lang,attr,omitempty"` + SetRemove []setRemove `xml:",any"` +} + +func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) { + var pu propertyupdate + if err = xml.NewDecoder(r).Decode(&pu); err != nil { + return nil, http.StatusBadRequest, err + } + for _, op := range pu.SetRemove { + remove := false + switch op.XMLName { + case xml.Name{Space: net.NsDav, Local: "set"}: + // No-op. + case xml.Name{Space: net.NsDav, Local: "remove"}: + for _, p := range op.Prop { + if len(p.InnerXML) > 0 { + return nil, http.StatusBadRequest, errors.ErrInvalidProppatch + } + } + remove = true + default: + return nil, http.StatusBadRequest, errors.ErrInvalidProppatch + } + patches = append(patches, Proppatch{Remove: remove, Props: op.Prop}) + } + return patches, 0, nil +} + +var xmlLangName = xml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} + +func xmlLang(s xml.StartElement, d string) string { + for _, attr := range s.Attr { + if attr.Name == xmlLangName { + return attr.Value + } + } + return d +} diff --git a/services/webdav/pkg/ocdav/publicfile.go b/services/webdav/pkg/ocdav/publicfile.go new file mode 100644 index 0000000000..014d1d79d2 --- /dev/null +++ b/services/webdav/pkg/ocdav/publicfile.go @@ -0,0 +1,201 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "fmt" + "net/http" + "path" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + typesv1beta1 "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + ocdaverrors "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/propfind" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" +) + +// PublicFileHandler handles requests on a shared file. it needs to be wrapped in a collection +type PublicFileHandler struct { + namespace string +} + +func (h *PublicFileHandler) Init(ns string) error { + h.namespace = path.Join("/", ns) + return nil +} + +// Handler handles requests +func (h *PublicFileHandler) Handler(s *svc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + log := appctx.GetLogger(ctx) + token, relativePath := router.ShiftPath(r.URL.Path) + + base := path.Join(ctx.Value(net.CtxKeyBaseURI).(string), token) + ctx = context.WithValue(ctx, net.CtxKeyBaseURI, base) + r = r.WithContext(ctx) + + log.Debug().Str("relativePath", relativePath).Msg("PublicFileHandler func") + + if relativePath != "" && relativePath != "/" { + // accessing the file + + switch r.Method { + case MethodPropfind: + s.handlePropfindOnToken(w, r, h.namespace, false) + case http.MethodGet: + s.handlePathGet(w, r, h.namespace) + case http.MethodOptions: + s.handleOptions(w, r) + case http.MethodHead: + s.handlePathHead(w, r, h.namespace) + case http.MethodPut: + s.handlePathPut(w, r, h.namespace) + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } + } else { + // accessing the virtual parent folder + switch r.Method { + case MethodPropfind: + s.handlePropfindOnToken(w, r, h.namespace, true) + case http.MethodOptions: + s.handleOptions(w, r) + case http.MethodHead: + s.handlePathHead(w, r, h.namespace) + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } + } + }) +} + +// ns is the namespace that is prefixed to the path in the cs3 namespace +func (s *svc) handlePropfindOnToken(w http.ResponseWriter, r *http.Request, ns string, onContainer bool) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "token_propfind") + defer span.End() + + tokenStatInfo, ok := TokenStatInfoFromContext(ctx) + if !ok { + span.RecordError(ocdaverrors.ErrTokenStatInfoMissing) + span.SetStatus(codes.Error, ocdaverrors.ErrTokenStatInfoMissing.Error()) + span.SetAttributes(semconv.HTTPResponseStatusCodeKey.Int(http.StatusInternalServerError)) + w.WriteHeader(http.StatusInternalServerError) + b, err := ocdaverrors.Marshal(http.StatusInternalServerError, ocdaverrors.ErrTokenStatInfoMissing.Error(), "", "") + ocdaverrors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + sublog := appctx.GetLogger(ctx).With().Interface("tokenStatInfo", tokenStatInfo).Logger() + sublog.Debug().Msg("handlePropfindOnToken") + + dh := r.Header.Get(net.HeaderDepth) + depth, err := net.ParseDepth(dh) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "Invalid Depth header value") + span.SetAttributes(semconv.HTTPResponseStatusCodeKey.Int(http.StatusBadRequest)) + sublog.Debug().Str("depth", dh).Msg(err.Error()) + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Invalid Depth header value: %v", dh) + b, err := ocdaverrors.Marshal(http.StatusBadRequest, m, "", "") + ocdaverrors.HandleWebdavError(&sublog, w, b, err) + return + } + + if depth == net.DepthInfinity && !s.c.AllowPropfindDepthInfinitiy { + span.RecordError(ocdaverrors.ErrInvalidDepth) + span.SetStatus(codes.Error, "DEPTH: infinity is not supported") + span.SetAttributes(semconv.HTTPResponseStatusCodeKey.Int(http.StatusBadRequest)) + sublog.Debug().Str("depth", dh).Msg(ocdaverrors.ErrInvalidDepth.Error()) + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Invalid Depth header value: %v", dh) + b, err := ocdaverrors.Marshal(http.StatusBadRequest, m, "", "") + ocdaverrors.HandleWebdavError(&sublog, w, b, err) + return + } + + pf, status, err := propfind.ReadPropfind(r.Body) + if err != nil { + sublog.Debug().Err(err).Msg("error reading propfind request") + w.WriteHeader(status) + return + } + + infos := s.getPublicFileInfos(onContainer, depth == net.DepthZero, tokenStatInfo) + + prefer := net.ParsePrefer(r.Header.Get("prefer")) + returnMinimal := prefer[net.HeaderPreferReturn] == "minimal" + + propRes, err := propfind.MultistatusResponse(ctx, &pf, infos, s.c.PublicURL, ns, nil, returnMinimal, nil) + if err != nil { + sublog.Error().Err(err).Msg("error formatting propfind") + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Header().Set(net.HeaderDav, "1, 3, extended-mkcol") + w.Header().Set(net.HeaderContentType, "application/xml; charset=utf-8") + w.Header().Set(net.HeaderVary, net.HeaderPrefer) + if returnMinimal { + w.Header().Set(net.HeaderPreferenceApplied, "return=minimal") + } + w.WriteHeader(http.StatusMultiStatus) + if _, err := w.Write(propRes); err != nil { + sublog.Err(err).Msg("error writing response") + } +} + +// there are only two possible entries +// 1. the non existing collection +// 2. the shared file +func (s *svc) getPublicFileInfos(onContainer, onlyRoot bool, i *provider.ResourceInfo) []*provider.ResourceInfo { + infos := []*provider.ResourceInfo{} + if onContainer { + // copy link-share data if present + // we don't copy everything because the checksum should not be present + var o *typesv1beta1.Opaque + if i.Opaque != nil && i.Opaque.Map != nil && i.Opaque.Map["link-share"] != nil { + o = &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "link-share": i.Opaque.Map["link-share"], + }, + } + } + // always add collection + infos = append(infos, &provider.ResourceInfo{ + // Opaque carries the link-share data we need when rendering the collection root href + Opaque: o, + Path: path.Dir(i.Path), + Type: provider.ResourceType_RESOURCE_TYPE_CONTAINER, + }) + if onlyRoot { + return infos + } + } + + // add the file info + infos = append(infos, i) + + return infos +} diff --git a/services/webdav/pkg/ocdav/put.go b/services/webdav/pkg/ocdav/put.go new file mode 100644 index 0000000000..286daac9f8 --- /dev/null +++ b/services/webdav/pkg/ocdav/put.go @@ -0,0 +1,456 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "io" + "net/http" + "path" + "path/filepath" + "strconv" + "strings" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/errtypes" + "github.com/opencloud-eu/reva/v2/pkg/rhttp" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/rs/zerolog" + "go.opentelemetry.io/otel/propagation" +) + +func sufferMacOSFinder(r *http.Request) bool { + return r.Header.Get(net.HeaderExpectedEntityLength) != "" +} + +func handleMacOSFinder(w http.ResponseWriter, r *http.Request) error { + /* + Many webservers will not cooperate well with Finder PUT requests, + because it uses 'Chunked' transfer encoding for the request body. + The symptom of this problem is that Finder sends files to the + server, but they arrive as 0-length files. + If we don't do anything, the user might think they are uploading + files successfully, but they end up empty on the server. Instead, + we throw back an error if we detect this. + The reason Finder uses Chunked, is because it thinks the files + might change as it's being uploaded, and therefore the + Content-Length can vary. + Instead it sends the X-Expected-Entity-Length header with the size + of the file at the very start of the request. If this header is set, + but we don't get a request body we will fail the request to + protect the end-user. + */ + + log := appctx.GetLogger(r.Context()) + content := r.Header.Get(net.HeaderContentLength) + expected := r.Header.Get(net.HeaderExpectedEntityLength) + log.Warn().Str("content-length", content).Str("x-expected-entity-length", expected).Msg("Mac OS Finder corner-case detected") + + // The best mitigation to this problem is to tell users to not use crappy Finder. + // Another possible mitigation is to change the use the value of X-Expected-Entity-Length header in the Content-Length header. + expectedInt, err := strconv.ParseInt(expected, 10, 64) + if err != nil { + log.Error().Err(err).Msg("error parsing expected length") + w.WriteHeader(http.StatusBadRequest) + return err + } + r.Header.Set(net.HeaderContentLength, expected) + r.ContentLength = expectedInt + return nil +} + +func isContentRange(r *http.Request) bool { + /* + Content-Range is dangerous for PUT requests: PUT per definition + stores a full resource. draft-ietf-httpbis-p2-semantics-15 says + in section 7.6: + An origin server SHOULD reject any PUT request that contains a + Content-Range header field, since it might be misinterpreted as + partial content (or might be partial content that is being mistakenly + PUT as a full representation). Partial content updates are possible + by targeting a separately identified resource with state that + overlaps a portion of the larger resource, or by using a different + method that has been specifically defined for partial updates (for + example, the PATCH method defined in [RFC5789]). + This clarifies RFC2616 section 9.6: + The recipient of the entity MUST NOT ignore any Content-* + (e.g. Content-Range) headers that it does not understand or implement + and MUST return a 501 (Not Implemented) response in such cases. + OTOH is a PUT request with a Content-Range currently the only way to + continue an aborted upload request and is supported by curl, mod_dav, + Tomcat and others. Since some clients do use this feature which results + in unexpected behaviour (cf PEAR::HTTP_WebDAV_Client 1.0.1), we reject + all PUT requests with a Content-Range for now. + */ + return r.Header.Get(net.HeaderContentRange) != "" +} + +func (s *svc) handlePathPut(w http.ResponseWriter, r *http.Request, ns string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "put") + defer span.End() + + fn := path.Join(ns, r.URL.Path) + sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() + + if err := ValidateName(filename(r.URL.Path), s.nameValidators); err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, err.Error(), "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + + space, status, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, fn) + if err != nil { + sublog.Error().Err(err).Str("path", fn).Msg("failed to look up storage space") + w.WriteHeader(http.StatusInternalServerError) + return + } + if status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&sublog, w, status) + return + } + + s.handlePut(ctx, w, r, spacelookup.MakeRelativeReference(space, fn, false), sublog) +} + +func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference, log zerolog.Logger) { + if !checkPreconditions(w, r, log) { + // checkPreconditions handles error returns + return + } + + length, err := getContentLength(r) + if err != nil { + log.Error().Err(err).Msg("error getting the content length") + w.WriteHeader(http.StatusBadRequest) + return + } + + client, err := s.gatewaySelector.Next() + if err != nil { + log.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return + } + + // Test if the target is a secret filedrop + tokenStatInfo, ok := TokenStatInfoFromContext(ctx) + // We assume that when the uploader can create containers, but is not allowed to list them, it is a secret file drop + if ok && tokenStatInfo.GetPermissionSet().CreateContainer && !tokenStatInfo.GetPermissionSet().ListContainer { + // TODO we can skip this stat if the tokenStatInfo is the direct parent + sReq := &provider.StatRequest{ + Ref: ref, + } + sRes, err := client.Stat(ctx, sReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + // We also need to continue if we are not allowed to stat a resource. We may not have stat permission. That still means it exists and we need to find a new filename. + switch sRes.Status.Code { + case rpc.Code_CODE_OK, rpc.Code_CODE_PERMISSION_DENIED: + // find next filename + newName, status, err := FindName(ctx, client, filepath.Base(ref.Path), sRes.GetInfo().GetParentId()) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if status.Code != rpc.Code_CODE_OK { + log.Error().Interface("status", status).Msg("error listing file") + errors.HandleErrorStatus(&log, w, status) + return + } + ref.Path = utils.MakeRelativePath(filepath.Join(filepath.Dir(ref.GetPath()), newName)) + case rpc.Code_CODE_NOT_FOUND: + // just continue with normal upload + default: + log.Error().Interface("status", sRes.Status).Msg("error stating file") + errors.HandleErrorStatus(&log, w, sRes.Status) + return + } + } + + opaque := &typespb.Opaque{} + if mtime := r.Header.Get(net.HeaderOCMtime); mtime != "" { + utils.AppendPlainToOpaque(opaque, net.HeaderOCMtime, mtime) + + // TODO: find a way to check if the storage really accepted the value + w.Header().Set(net.HeaderOCMtime, "accepted") + } + if length == 0 { + tfRes, err := client.TouchFile(ctx, &provider.TouchFileRequest{ + Opaque: opaque, + Ref: ref, + }) + if err != nil { + log.Error().Err(err).Msg("error sending grpc touch file request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if tfRes.Status.Code == rpc.Code_CODE_OK { + sRes, err := client.Stat(ctx, &provider.StatRequest{ + Ref: ref, + }) + if err != nil { + log.Error().Err(err).Msg("error sending grpc touch file request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if sRes.Status.Code != rpc.Code_CODE_OK { + log.Error().Interface("status", sRes.Status).Msg("error touching file") + errors.HandleErrorStatus(&log, w, sRes.Status) + return + } + + w.Header().Set(net.HeaderETag, sRes.Info.Etag) + w.Header().Set(net.HeaderOCETag, sRes.Info.Etag) + w.Header().Set(net.HeaderOCFileID, storagespace.FormatResourceID(sRes.Info.Id)) + w.Header().Set(net.HeaderLastModified, net.RFC1123Z(sRes.Info.Mtime)) + + w.WriteHeader(http.StatusCreated) + return + } + + if tfRes.Status.Code != rpc.Code_CODE_ALREADY_EXISTS { + log.Error().Interface("status", tfRes.Status).Msg("error touching file") + errors.HandleErrorStatus(&log, w, tfRes.Status) + return + } + } + + utils.AppendPlainToOpaque(opaque, net.HeaderUploadLength, strconv.FormatInt(length, 10)) + + // curl -X PUT https://demo.example.org/remote.php/webdav/testcs.bin -u demo:demo -d '123' -v -H 'OC-Checksum: SHA1:40bd001563085fc35165329ea1ff5c5ecbdbbeef' + + var cparts []string + // TUS Upload-Checksum header takes precedence + if checksum := r.Header.Get(net.HeaderUploadChecksum); checksum != "" { + cparts = strings.SplitN(checksum, " ", 2) + if len(cparts) != 2 { + log.Debug().Str("upload-checksum", checksum).Msg("invalid Upload-Checksum format, expected '[algorithm] [checksum]'") + w.WriteHeader(http.StatusBadRequest) + return + } + // Then try OpenCloud header + } else if checksum := r.Header.Get(net.HeaderOCChecksum); checksum != "" { + cparts = strings.SplitN(checksum, ":", 2) + if len(cparts) != 2 { + log.Debug().Str("oc-checksum", checksum).Msg("invalid OC-Checksum format, expected '[algorithm]:[checksum]'") + w.WriteHeader(http.StatusBadRequest) + return + } + } + // we do not check the algorithm here, because it might depend on the storage + if len(cparts) == 2 { + // Translate into TUS style Upload-Checksum header + // algorithm is always lowercase, checksum is separated by space + utils.AppendPlainToOpaque(opaque, net.HeaderUploadChecksum, strings.ToLower(cparts[0])+" "+cparts[1]) + } + + uReq := &provider.InitiateFileUploadRequest{ + Ref: ref, + Opaque: opaque, + LockId: requestLock(r), + } + if ifMatch := r.Header.Get(net.HeaderIfMatch); ifMatch != "" { + uReq.Options = &provider.InitiateFileUploadRequest_IfMatch{IfMatch: ifMatch} + } + + // where to upload the file? + uRes, err := client.InitiateFileUpload(ctx, uReq) + if err != nil { + log.Error().Err(err).Msg("error initiating file upload") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if uRes.Status.Code != rpc.Code_CODE_OK { + if r.ProtoMajor == 1 { + // drain body to avoid `connection closed` errors + _, _ = io.Copy(io.Discard, r.Body) + } + switch uRes.Status.Code { + case rpc.Code_CODE_PERMISSION_DENIED: + status := http.StatusForbidden + m := uRes.Status.Message + // check if user has access to parent + sRes, err := client.Stat(ctx, &provider.StatRequest{Ref: &provider.Reference{ + ResourceId: ref.ResourceId, + Path: utils.MakeRelativePath(path.Dir(ref.Path)), + }}) + if err != nil { + log.Error().Err(err).Msg("error performing stat grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if sRes.Status.Code != rpc.Code_CODE_OK { + // return not found error so we do not leak existence of a file + // TODO hide permission failed for users without access in every kind of request + // TODO should this be done in the driver? + status = http.StatusNotFound + } + if status == http.StatusNotFound { + m = "Resource not found" // mimic the oc10 error message + } + w.WriteHeader(status) + b, err := errors.Marshal(status, m, "", "") + errors.HandleWebdavError(&log, w, b, err) + case rpc.Code_CODE_ABORTED: + w.WriteHeader(http.StatusPreconditionFailed) + case rpc.Code_CODE_FAILED_PRECONDITION: + w.WriteHeader(http.StatusConflict) + default: + errors.HandleErrorStatus(&log, w, uRes.Status) + } + return + } + + // ony send actual PUT request if file has bytes. Otherwise the initiate file upload request creates the file + if length != 0 { + var ep, token string + for _, p := range uRes.Protocols { + if p.Protocol == "simple" { + ep, token = p.UploadEndpoint, p.Token + } + } + + httpReq, err := rhttp.NewRequest(ctx, http.MethodPut, ep, r.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + Propagator.Inject(ctx, propagation.HeaderCarrier(httpReq.Header)) + httpReq.Header.Set(TokenTransportHeader, token) + httpReq.ContentLength = length + + httpRes, err := s.client.Do(httpReq) + if err != nil { + log.Error().Err(err).Msg("error doing PUT request to data service") + w.WriteHeader(http.StatusInternalServerError) + return + } + defer httpRes.Body.Close() + if httpRes.StatusCode != http.StatusOK { + if httpRes.StatusCode == http.StatusPartialContent { + w.WriteHeader(http.StatusPartialContent) + return + } + if httpRes.StatusCode == errtypes.StatusChecksumMismatch { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "The computed checksum does not match the one received from the client.", "", "") + errors.HandleWebdavError(&log, w, b, err) + return + } + log.Error().Err(err).Msg("PUT request to data server failed") + w.WriteHeader(httpRes.StatusCode) + return + } + + // copy headers if they are present + if httpRes.Header.Get(net.HeaderETag) != "" { + w.Header().Set(net.HeaderETag, httpRes.Header.Get(net.HeaderETag)) + } + if httpRes.Header.Get(net.HeaderOCETag) != "" { + w.Header().Set(net.HeaderOCETag, httpRes.Header.Get(net.HeaderOCETag)) + } + if httpRes.Header.Get(net.HeaderOCFileID) != "" { + w.Header().Set(net.HeaderOCFileID, httpRes.Header.Get(net.HeaderOCFileID)) + } + if httpRes.Header.Get(net.HeaderLastModified) != "" { + w.Header().Set(net.HeaderLastModified, httpRes.Header.Get(net.HeaderLastModified)) + } + } + + // file was new + // FIXME make created flag a property on the InitiateFileUploadResponse + if created := utils.ReadPlainFromOpaque(uRes.Opaque, "created"); created == "true" { + w.WriteHeader(http.StatusCreated) + return + } + + // overwrite + w.WriteHeader(http.StatusNoContent) +} + +func (s *svc) handleSpacesPut(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "spaces_put") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("path", r.URL.Path).Logger() + + ref, err := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + if ref.GetResourceId().GetOpaqueId() != "" && ref.GetResourceId().GetSpaceId() != ref.GetResourceId().GetOpaqueId() && r.URL.Path == "/" { + s.handlePut(ctx, w, r, &ref, sublog) + return + } + + if err := ValidateName(filename(ref.Path), s.nameValidators); err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, err.Error(), "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + + s.handlePut(ctx, w, r, &ref, sublog) +} + +func checkPreconditions(w http.ResponseWriter, r *http.Request, log zerolog.Logger) bool { + if isContentRange(r) { + log.Debug().Msg("Content-Range not supported for PUT") + w.WriteHeader(http.StatusNotImplemented) + return false + } + + if sufferMacOSFinder(r) { + err := handleMacOSFinder(w, r) + if err != nil { + log.Debug().Err(err).Msg("error handling Mac OS corner-case") + w.WriteHeader(http.StatusInternalServerError) + return false + } + } + return true +} + +func getContentLength(r *http.Request) (int64, error) { + length, err := strconv.ParseInt(r.Header.Get(net.HeaderContentLength), 10, 64) + if err != nil { + // Fallback to Upload-Length + length, err = strconv.ParseInt(r.Header.Get(net.HeaderUploadLength), 10, 64) + if err != nil { + return 0, err + } + } + return length, nil +} diff --git a/services/webdav/pkg/ocdav/redirect.go b/services/webdav/pkg/ocdav/redirect.go new file mode 100644 index 0000000000..0e3d0bc9c3 --- /dev/null +++ b/services/webdav/pkg/ocdav/redirect.go @@ -0,0 +1,32 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "net/http" + "net/url" + "path" +) + +func (s *svc) handleLegacyPath(w http.ResponseWriter, r *http.Request) { + query := r.URL.Query() + dir := query.Get("dir") + url := s.c.PublicURL + path.Join("#", "/files/list/all", url.PathEscape(dir)) + http.Redirect(w, r, url, http.StatusMovedPermanently) +} diff --git a/services/webdav/pkg/ocdav/report.go b/services/webdav/pkg/ocdav/report.go new file mode 100644 index 0000000000..dbcfd476a5 --- /dev/null +++ b/services/webdav/pkg/ocdav/report.go @@ -0,0 +1,199 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "encoding/xml" + "io" + "net/http" + + rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + providerv1beta1 "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/propfind" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" + "github.com/opencloud-eu/reva/v2/pkg/permission" + "github.com/opencloud-eu/reva/v2/pkg/utils" +) + +const ( + elementNameSearchFiles = "search-files" + elementNameFilterFiles = "filter-files" +) + +func (s *svc) handleReport(w http.ResponseWriter, r *http.Request, ns string) { + ctx := r.Context() + log := appctx.GetLogger(ctx) + // fn := path.Join(ns, r.URL.Path) + + rep, status, err := readReport(r.Body) + if err != nil { + log.Error().Err(err).Msg("error reading report") + w.WriteHeader(status) + return + } + if rep.SearchFiles != nil { + s.doSearchFiles(w, r, rep.SearchFiles) + return + } + + if rep.FilterFiles != nil { + s.doFilterFiles(w, r, rep.FilterFiles, ns) + return + } + + // TODO(jfd): implement report + + w.WriteHeader(http.StatusNotImplemented) +} + +func (s *svc) doSearchFiles(w http.ResponseWriter, r *http.Request, sf *reportSearchFiles) { + w.WriteHeader(http.StatusNotImplemented) +} + +func (s *svc) doFilterFiles(w http.ResponseWriter, r *http.Request, ff *reportFilterFiles, namespace string) { + ctx := r.Context() + log := appctx.GetLogger(ctx) + + if ff.Rules.Favorite { + // List the users favorite resources. + client, err := s.gatewaySelector.Next() + if err != nil { + log.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return + } + currentUser := ctxpkg.ContextMustGetUser(ctx) + ok, err := utils.CheckPermission(ctx, permission.ListFavorites, client) + if err != nil { + log.Error().Err(err).Msg("error checking permission") + w.WriteHeader(http.StatusInternalServerError) + return + } + if !ok { + log.Info().Interface("user", currentUser).Msg("user not allowed to list favorites") + w.WriteHeader(http.StatusForbidden) + return + } + favorites, err := s.favoritesManager.ListFavorites(ctx, currentUser.Id) + if err != nil { + log.Error().Err(err).Msg("error getting favorites") + w.WriteHeader(http.StatusInternalServerError) + return + } + + infos := make([]*provider.ResourceInfo, 0, len(favorites)) + for i := range favorites { + statRes, err := client.Stat(ctx, &providerv1beta1.StatRequest{Ref: &providerv1beta1.Reference{ResourceId: favorites[i]}}) + if err != nil { + log.Error().Err(err).Msg("error getting resource info") + continue + } + if statRes.Status.Code != rpcv1beta1.Code_CODE_OK { + log.Error().Interface("stat_response", statRes).Msg("error getting resource info") + continue + } + infos = append(infos, statRes.Info) + } + + prefer := net.ParsePrefer(r.Header.Get("prefer")) + returnMinimal := prefer[net.HeaderPreferReturn] == "minimal" + + responsesXML, err := propfind.MultistatusResponse(ctx, &propfind.XML{Prop: ff.Prop}, infos, s.c.PublicURL, namespace, nil, returnMinimal, nil) + if err != nil { + log.Error().Err(err).Msg("error formatting propfind") + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set(net.HeaderDav, "1, 3, extended-mkcol") + w.Header().Set(net.HeaderContentType, "application/xml; charset=utf-8") + w.Header().Set(net.HeaderVary, net.HeaderPrefer) + if returnMinimal { + w.Header().Set(net.HeaderPreferenceApplied, "return=minimal") + } + w.WriteHeader(http.StatusMultiStatus) + if _, err := w.Write(responsesXML); err != nil { + log.Err(err).Msg("error writing response") + } + } +} + +type report struct { + SearchFiles *reportSearchFiles + // FilterFiles TODO add this for tag based search + FilterFiles *reportFilterFiles `xml:"filter-files"` +} +type reportSearchFiles struct { + XMLName xml.Name `xml:"search-files"` + Lang string `xml:"xml:lang,attr,omitempty"` + Prop propfind.Props `xml:"DAV: prop"` + Search reportSearchFilesSearch `xml:"search"` +} +type reportSearchFilesSearch struct { + Pattern string `xml:"search"` + Limit int `xml:"limit"` + Offset int `xml:"offset"` +} + +type reportFilterFiles struct { + XMLName xml.Name `xml:"filter-files"` + Lang string `xml:"xml:lang,attr,omitempty"` + Prop propfind.Props `xml:"DAV: prop"` + Rules reportFilterFilesRules `xml:"filter-rules"` +} + +type reportFilterFilesRules struct { + Favorite bool `xml:"favorite"` + SystemTag int `xml:"systemtag"` +} + +func readReport(r io.Reader) (rep *report, status int, err error) { + decoder := xml.NewDecoder(r) + rep = &report{} + for { + t, err := decoder.Token() + if err == io.EOF { + // io.EOF is a successful end + return rep, 0, nil + } + if err != nil { + return nil, http.StatusBadRequest, err + } + + if v, ok := t.(xml.StartElement); ok { + if v.Name.Local == elementNameSearchFiles { + var repSF reportSearchFiles + err = decoder.DecodeElement(&repSF, &v) + if err != nil { + return nil, http.StatusBadRequest, err + } + rep.SearchFiles = &repSF + } else if v.Name.Local == elementNameFilterFiles { + var repFF reportFilterFiles + err = decoder.DecodeElement(&repFF, &v) + if err != nil { + return nil, http.StatusBadRequest, err + } + rep.FilterFiles = &repFF + } + } + } +} diff --git a/services/webdav/pkg/ocdav/report_test.go b/services/webdav/pkg/ocdav/report_test.go new file mode 100644 index 0000000000..42b5d64a90 --- /dev/null +++ b/services/webdav/pkg/ocdav/report_test.go @@ -0,0 +1,63 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "strings" + "testing" +) + +func TestUnmarshallReportFilterFiles(t *testing.T) { + ffXML := ` + + + + + + + + + + + + + + + + + + 1 + +` + + reader := strings.NewReader(ffXML) + + report, status, err := readReport(reader) + if status != 0 || err != nil { + t.Error("Failed to unmarshal filter-files xml") + } + + if report.FilterFiles == nil { + t.Error("Failed to unmarshal filter-files xml. FilterFiles is nil") + } + + if report.FilterFiles.Rules.Favorite == false { + t.Error("Failed to correctly unmarshal filter-rules. Favorite is expected to be true.") + } +} diff --git a/services/webdav/pkg/ocdav/spacelookup/spacelookup.go b/services/webdav/pkg/ocdav/spacelookup/spacelookup.go new file mode 100644 index 0000000000..a19f5eb91c --- /dev/null +++ b/services/webdav/pkg/ocdav/spacelookup/spacelookup.go @@ -0,0 +1,191 @@ +// Copyright 2018-2022 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package spacelookup + +import ( + "context" + "fmt" + "strconv" + "strings" + + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + storageProvider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + typesv1beta1 "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +// LookupReferenceForPath returns: +// a reference with root and relative path +// the status and error for the lookup +func LookupReferenceForPath(ctx context.Context, selector pool.Selectable[gateway.GatewayAPIClient], path string) (*storageProvider.Reference, *rpc.Status, error) { + space, cs3Status, err := LookUpStorageSpaceForPath(ctx, selector, path) + if err != nil || cs3Status.Code != rpc.Code_CODE_OK { + return nil, cs3Status, err + } + spacePath := string(space.Opaque.Map["path"].Value) // FIXME error checks + return &storageProvider.Reference{ + ResourceId: space.Root, + Path: utils.MakeRelativePath(strings.TrimPrefix(path, spacePath)), + }, cs3Status, nil +} + +// LookUpStorageSpaceForPath returns: +// the storage spaces responsible for a path +// the status and error for the lookup +func LookUpStorageSpaceForPath(ctx context.Context, selector pool.Selectable[gateway.GatewayAPIClient], path string) (*storageProvider.StorageSpace, *rpc.Status, error) { + // TODO add filter to only fetch spaces changed in the last 30 sec? + // TODO cache space information, invalidate after ... 5min? so we do not need to fetch all spaces? + // TODO use ListContainerStream to listen for changes + // retrieve a specific storage space + lSSReq := &storageProvider.ListStorageSpacesRequest{ + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte(path), + }, + "unique": { + Decoder: "plain", + Value: []byte(strconv.FormatBool(true)), + }, + }, + }, + } + + client, err := selector.Next() + if err != nil { + return nil, status.NewInternal(ctx, "could not select next client"), err + } + + lSSRes, err := client.ListStorageSpaces(ctx, lSSReq) + if err != nil || lSSRes.Status.Code != rpc.Code_CODE_OK { + status := status.NewStatusFromErrType(ctx, "failed to lookup storage spaces", err) + if lSSRes != nil { + status = lSSRes.Status + } + return nil, status, err + } + switch len(lSSRes.StorageSpaces) { + case 0: + return nil, status.NewNotFound(ctx, "no space found"), nil + case 1: + return lSSRes.StorageSpaces[0], lSSRes.Status, nil + } + + return nil, status.NewInternal(ctx, "too many spaces returned"), nil +} + +// LookUpStorageSpacesForPathWithChildren returns: +// the list of storage spaces responsible for a path +// the status and error for the lookup +func LookUpStorageSpacesForPathWithChildren(ctx context.Context, client gateway.GatewayAPIClient, path string) ([]*storageProvider.StorageSpace, *rpc.Status, error) { + // TODO add filter to only fetch spaces changed in the last 30 sec? + // TODO cache space information, invalidate after ... 5min? so we do not need to fetch all spaces? + // TODO use ListContainerStream to listen for changes + // retrieve a specific storage space + lSSReq := &storageProvider.ListStorageSpacesRequest{ + // get all fields, including root_info + FieldMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, + } + // list all providers at or below the given path + lSSReq.Opaque = utils.AppendPlainToOpaque(lSSReq.Opaque, "path", path) + // we want to get all metadata? really? when looking up the space roots we actually only want etag, mtime and type so we can construct a child ... + lSSReq.Opaque = utils.AppendPlainToOpaque(lSSReq.Opaque, "metadata", "*") + + lSSRes, err := client.ListStorageSpaces(ctx, lSSReq) + if err != nil { + return nil, nil, err + } + if lSSRes.Status.GetCode() != rpc.Code_CODE_OK { + return nil, lSSRes.Status, err + } + + return lSSRes.StorageSpaces, lSSRes.Status, nil +} + +// LookUpStorageSpaceByID find a space by ID +func LookUpStorageSpaceByID(ctx context.Context, client gateway.GatewayAPIClient, spaceID string) (*storageProvider.StorageSpace, *rpc.Status, error) { + // retrieve a specific storage space + lSSReq := &storageProvider.ListStorageSpacesRequest{ + Opaque: &typesv1beta1.Opaque{}, + Filters: []*storageProvider.ListStorageSpacesRequest_Filter{ + { + Type: storageProvider.ListStorageSpacesRequest_Filter_TYPE_ID, + Term: &storageProvider.ListStorageSpacesRequest_Filter_Id{ + Id: &storageProvider.StorageSpaceId{ + OpaqueId: spaceID, + }, + }, + }, + }, + } + + lSSRes, err := client.ListStorageSpaces(ctx, lSSReq) + if err != nil || lSSRes.Status.Code != rpc.Code_CODE_OK { + return nil, lSSRes.Status, err + } + + switch len(lSSRes.StorageSpaces) { + case 0: + return nil, &rpc.Status{Code: rpc.Code_CODE_NOT_FOUND}, nil // since the caller only expects a single space return not found status + case 1: + return lSSRes.StorageSpaces[0], lSSRes.Status, nil + default: + return nil, nil, fmt.Errorf("unexpected number of spaces %d", len(lSSRes.StorageSpaces)) + } +} + +// MakeStorageSpaceReference find a space by id and returns a relative reference +func MakeStorageSpaceReference(spaceID string, relativePath string) (storageProvider.Reference, error) { + resourceID, err := storagespace.ParseID(spaceID) + if err != nil { + return storageProvider.Reference{}, err + } + // be tolerant about missing sharesstorageprovider id + if resourceID.StorageId == "" && resourceID.SpaceId == utils.ShareStorageSpaceID { + resourceID.StorageId = utils.ShareStorageProviderID + } + return storageProvider.Reference{ + ResourceId: &resourceID, + Path: utils.MakeRelativePath(relativePath), + }, nil +} + +// MakeRelativeReference returns a relative reference for the given space and path +func MakeRelativeReference(space *storageProvider.StorageSpace, relativePath string, spacesDavRequest bool) *storageProvider.Reference { + if space.Opaque == nil || space.Opaque.Map == nil || space.Opaque.Map["path"] == nil || space.Opaque.Map["path"].Decoder != "plain" { + return nil // not mounted + } + spacePath := string(space.Opaque.Map["path"].Value) + relativeSpacePath := "." + if strings.HasPrefix(relativePath, spacePath) { + relativeSpacePath = utils.MakeRelativePath(strings.TrimPrefix(relativePath, spacePath)) + } else if spacesDavRequest { + relativeSpacePath = utils.MakeRelativePath(relativePath) + } + return &storageProvider.Reference{ + ResourceId: space.Root, + Path: relativeSpacePath, + } +} diff --git a/services/webdav/pkg/ocdav/spaces.go b/services/webdav/pkg/ocdav/spaces.go new file mode 100644 index 0000000000..8daf479c55 --- /dev/null +++ b/services/webdav/pkg/ocdav/spaces.go @@ -0,0 +1,188 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "net/http" + "path" + "strings" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/propfind" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "google.golang.org/protobuf/proto" +) + +// SpacesHandler handles trashbin requests +type SpacesHandler struct { + gatewaySvc string + namespace string + useLoggedInUserNS bool +} + +func (h *SpacesHandler) Init(c *config.Config) error { + h.gatewaySvc = c.GatewaySvc + h.namespace = path.Join("/", c.WebdavNamespace) + h.useLoggedInUserNS = true + return nil +} + +// Handler handles requests +func (h *SpacesHandler) Handler(s *svc, trashbinHandler *TrashbinHandler) http.Handler { + config := s.Config() + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // ctx := r.Context() + // log := appctx.GetLogger(ctx) + + if r.Method == http.MethodOptions { + s.handleOptions(w, r) + return + } + + var segment string + segment, r.URL.Path = router.ShiftPath(r.URL.Path) + if segment == "" { + // listing is disabled, no auth will change that + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + if segment == _trashbinPath { + h.handleSpacesTrashbin(w, r, s, trashbinHandler) + return + } + + spaceID := segment + + // TODO initialize status with http.StatusBadRequest + // TODO initialize err with errors.ErrUnsupportedMethod + var status int // status 0 means the handler already sent the response + var err error + switch r.Method { + case MethodPropfind: + p := propfind.NewHandler(config.PublicURL, s.gatewaySelector, s.urlSigner, config) + p.HandleSpacesPropfind(w, r, spaceID) + case MethodProppatch: + status, err = s.handleSpacesProppatch(w, r, spaceID) + case MethodLock: + status, err = s.handleSpacesLock(w, r, spaceID) + case MethodUnlock: + status, err = s.handleSpaceUnlock(w, r, spaceID) + case MethodMkcol: + status, err = s.handleSpacesMkCol(w, r, spaceID) + case MethodMove: + s.handleSpacesMove(w, r, spaceID) + case MethodCopy: + s.handleSpacesCopy(w, r, spaceID) + case MethodReport: + s.handleReport(w, r, spaceID) + case http.MethodGet: + s.handleSpacesGet(w, r, spaceID) + case http.MethodPut: + s.handleSpacesPut(w, r, spaceID) + case http.MethodPost: + s.handleSpacesTusPost(w, r, spaceID) + case http.MethodOptions: + s.handleOptions(w, r) + case http.MethodHead: + s.handleSpacesHead(w, r, spaceID) + case http.MethodDelete: + status, err = s.handleSpacesDelete(w, r, spaceID) + default: + http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented) + } + + if status != 0 { // 0 means the handler already sent the response + w.WriteHeader(status) + if status != http.StatusNoContent { + var b []byte + if b, err = errors.Marshal(status, err.Error(), "", ""); err == nil { + _, err = w.Write(b) + } + } + } + if err != nil { + appctx.GetLogger(r.Context()).Error().Err(err).Msg(err.Error()) + } + }) +} + +func (h *SpacesHandler) handleSpacesTrashbin(w http.ResponseWriter, r *http.Request, s *svc, trashbinHandler *TrashbinHandler) { + ctx := r.Context() + log := appctx.GetLogger(ctx) + + spaceID, key := splitSpaceAndKey(r.URL.Path) + if spaceID == "" { + // listing is disabled, no auth will change that + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + ref, err := storagespace.ParseReference(spaceID) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + switch r.Method { + case MethodPropfind: + trashbinHandler.listTrashbin(w, r, s, &ref, path.Join(_trashbinPath, spaceID), key) + case MethodMove: + if key == "" { + http.Error(w, "501 Not implemented", http.StatusNotImplemented) + break + } + // find path in url relative to trash base + baseURI := ctx.Value(net.CtxKeyBaseURI).(string) + baseURI = path.Join(baseURI, spaceID) + + dh := r.Header.Get(net.HeaderDestination) + dst, err := net.ParseDestination(baseURI, dh) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + log.Debug().Str("key", key).Str("dst", dst).Msg("spaces restore") + + dstRef := proto.Clone(&ref).(*provider.Reference) + dstRef.Path = utils.MakeRelativePath(dst) + + trashbinHandler.restore(w, r, s, &ref, dstRef, key) + case http.MethodDelete: + trashbinHandler.delete(w, r, s, &ref, key) + default: + http.Error(w, "501 Not implemented", http.StatusNotImplemented) + } +} + +func splitSpaceAndKey(p string) (space, key string) { + p = strings.TrimPrefix(p, "/") + parts := strings.SplitN(p, "/", 2) + space = parts[0] + if len(parts) > 1 { + key = parts[1] + } + return +} diff --git a/services/webdav/pkg/ocdav/status.go b/services/webdav/pkg/ocdav/status.go new file mode 100644 index 0000000000..c14afc3e42 --- /dev/null +++ b/services/webdav/pkg/ocdav/status.go @@ -0,0 +1,54 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "encoding/json" + "net/http" + + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/owncloud/ocs" +) + +func (s *svc) doStatus(w http.ResponseWriter, r *http.Request) { + log := appctx.GetLogger(r.Context()) + status := &ocs.Status{ + Installed: true, + Maintenance: false, + NeedsDBUpgrade: false, + Version: s.c.Version, + VersionString: s.c.VersionString, + Edition: s.c.Edition, + ProductName: s.c.ProductName, + ProductVersion: s.c.ProductVersion, + Product: s.c.Product, + } + + statusJSON, err := json.MarshalIndent(status, "", " ") + if err != nil { + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + return + } + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if _, err := w.Write(statusJSON); err != nil { + log.Err(err).Msg("error writing response") + } +} diff --git a/services/webdav/pkg/ocdav/tpc.go b/services/webdav/pkg/ocdav/tpc.go new file mode 100644 index 0000000000..6a3202aed3 --- /dev/null +++ b/services/webdav/pkg/ocdav/tpc.go @@ -0,0 +1,421 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "fmt" + "io" + "net/http" + "path" + "strconv" + "strings" + "time" + + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/errtypes" + "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" + "github.com/opencloud-eu/reva/v2/pkg/rhttp" +) + +const ( + // PerfMarkerResponseTime corresponds to the interval at which a performance marker is sent back to the TPC client + PerfMarkerResponseTime float64 = 5 +) + +// PerfResponse provides a single chunk of permormance marker response +type PerfResponse struct { + Timestamp time.Time + Bytes uint64 + Index int + Count int +} + +func (p *PerfResponse) getPerfResponseString() string { + var sb strings.Builder + sb.WriteString("Perf Marker\n") + sb.WriteString("Timestamp: " + strconv.FormatInt(p.Timestamp.Unix(), 10) + "\n") + sb.WriteString("Stripe Bytes Transferred: " + strconv.FormatUint(p.Bytes, 10) + "\n") + sb.WriteString("Strip Index: " + strconv.Itoa(p.Index) + "\n") + sb.WriteString("Total Stripe Count: " + strconv.Itoa(p.Count) + "\n") + sb.WriteString("End\n") + return sb.String() +} + +// WriteCounter counts the number of bytes transferred and reports +// back to the TPC client about the progress of the transfer +// through the performance marker response stream. +type WriteCounter struct { + Total uint64 + PrevTime time.Time + w http.ResponseWriter +} + +// SendPerfMarker flushes a single chunk (performance marker) as +// part of the chunked transfer encoding scheme. +func (wc *WriteCounter) SendPerfMarker(size uint64) { + flusher, ok := wc.w.(http.Flusher) + if !ok { + panic("expected http.ResponseWriter to be an http.Flusher") + } + perfResp := PerfResponse{time.Now(), size, 0, 1} + pString := perfResp.getPerfResponseString() + fmt.Fprintln(wc.w, pString) + flusher.Flush() +} + +func (wc *WriteCounter) Write(p []byte) (int, error) { + + n := len(p) + wc.Total += uint64(n) + NowTime := time.Now() + + diff := NowTime.Sub(wc.PrevTime).Seconds() + if diff >= PerfMarkerResponseTime { + wc.SendPerfMarker(wc.Total) + wc.PrevTime = NowTime + } + return n, nil +} + +// +// An example of an HTTP TPC Pull +// +// +-----------------+ GET +----------------+ +// | Src server | <---------------- | Dest server | +// | (Remote) | ----------------> | (Reva) | +// +-----------------+ Data +----------------+ +// ^ +// | +// | COPY +// | +// +----------+ +// | Client | +// +----------+ + +// handleTPCPull performs a GET request on the remote site and upload it +// the requested reva endpoint. +func (s *svc) handleTPCPull(ctx context.Context, w http.ResponseWriter, r *http.Request, ns string) { + src := r.Header.Get("Source") + dst := path.Join(ns, r.URL.Path) + sublog := appctx.GetLogger(ctx).With().Str("src", src).Str("dst", dst).Logger() + + oh := r.Header.Get(net.HeaderOverwrite) + overwrite, err := net.ParseOverwrite(oh) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Overwrite header is set to incorrect value %v", overwrite) + sublog.Warn().Msgf("HTTP TPC Pull: %s", m) + b, err := errors.Marshal(http.StatusBadRequest, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + sublog.Debug().Bool("overwrite", overwrite).Msg("TPC Pull") + + client, err := s.gatewaySelector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return + } + // check if destination exists + ref := &provider.Reference{Path: dst} + dstStatReq := &provider.StatRequest{Ref: ref} + dstStatRes, err := client.Stat(ctx, dstStatReq) + if err != nil { + sublog.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if dstStatRes.Status.Code != rpc.Code_CODE_OK && dstStatRes.Status.Code != rpc.Code_CODE_NOT_FOUND { + errors.HandleErrorStatus(&sublog, w, dstStatRes.Status) + return + } + if dstStatRes.Status.Code == rpc.Code_CODE_OK && oh == "F" { + sublog.Warn().Bool("overwrite", overwrite).Msg("Destination already exists") + w.WriteHeader(http.StatusPreconditionFailed) // 412, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + return + } + + err = s.performHTTPPull(ctx, s.gatewaySelector, r, w, ns) + if err != nil { + sublog.Error().Err(err).Msg("error performing TPC Pull") + return + } + fmt.Fprintf(w, "success: Created") +} + +func (s *svc) performHTTPPull(ctx context.Context, selector pool.Selectable[gateway.GatewayAPIClient], r *http.Request, w http.ResponseWriter, ns string) error { + src := r.Header.Get("Source") + dst := path.Join(ns, r.URL.Path) + sublog := appctx.GetLogger(ctx) + sublog.Debug().Str("src", src).Str("dst", dst).Msg("Performing HTTP Pull") + + // get http client for remote + httpClient := &http.Client{} + + req, err := http.NewRequest("GET", src, nil) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + + // add authentication header + bearerHeader := r.Header.Get(net.HeaderTransferAuth) + req.Header.Add("Authorization", bearerHeader) + + // do download + httpDownloadRes, err := httpClient.Do(req) // lgtm[go/request-forgery] + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + defer httpDownloadRes.Body.Close() + + if httpDownloadRes.StatusCode == http.StatusNotImplemented { + w.WriteHeader(http.StatusBadRequest) + return errtypes.NotSupported("Third-Party copy not supported, source might be a folder") + } + if httpDownloadRes.StatusCode != http.StatusOK { + w.WriteHeader(httpDownloadRes.StatusCode) + return errtypes.InternalError(fmt.Sprintf("Remote GET returned status code %d", httpDownloadRes.StatusCode)) + } + + client, err := s.gatewaySelector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return errtypes.InternalError(err.Error()) + } + // get upload url + uReq := &provider.InitiateFileUploadRequest{ + Ref: &provider.Reference{Path: dst}, + Opaque: &typespb.Opaque{ + Map: map[string]*typespb.OpaqueEntry{ + "sizedeferred": { + Value: []byte("true"), + }, + }, + }, + } + uRes, err := client.InitiateFileUpload(ctx, uReq) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + + if uRes.Status.Code != rpc.Code_CODE_OK { + w.WriteHeader(http.StatusInternalServerError) + return fmt.Errorf("status code %d", uRes.Status.Code) + } + + var uploadEP, uploadToken string + for _, p := range uRes.Protocols { + if p.Protocol == "simple" { + uploadEP, uploadToken = p.UploadEndpoint, p.Token + } + } + + // send performance markers periodically every PerfMarkerResponseTime (5 seconds unless configured) + w.WriteHeader(http.StatusAccepted) + wc := WriteCounter{0, time.Now(), w} + tempReader := io.TeeReader(httpDownloadRes.Body, &wc) + + // do Upload + httpUploadReq, err := rhttp.NewRequest(ctx, "PUT", uploadEP, tempReader) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + httpUploadReq.Header.Set(TokenTransportHeader, uploadToken) + httpUploadRes, err := s.client.Do(httpUploadReq) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + + defer httpUploadRes.Body.Close() + if httpUploadRes.StatusCode != http.StatusOK { + w.WriteHeader(httpUploadRes.StatusCode) + return err + } + return nil +} + +// +// An example of an HTTP TPC Push +// +// +-----------------+ PUT +----------------+ +// | Dest server | <---------------- | Src server | +// | (Remote) | ----------------> | (Reva) | +// +-----------------+ Done +----------------+ +// ^ +// | +// | COPY +// | +// +----------+ +// | Client | +// +----------+ + +// handleTPCPush performs a PUT request on the remote site and while downloading +// data from the requested reva endpoint. +func (s *svc) handleTPCPush(ctx context.Context, w http.ResponseWriter, r *http.Request, ns string) { + src := path.Join(ns, r.URL.Path) + dst := r.Header.Get("Destination") + sublog := appctx.GetLogger(ctx).With().Str("src", src).Str("dst", dst).Logger() + + oh := r.Header.Get(net.HeaderOverwrite) + overwrite, err := net.ParseOverwrite(oh) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Overwrite header is set to incorrect value %v", overwrite) + sublog.Warn().Msgf("HTTP TPC Push: %s", m) + b, err := errors.Marshal(http.StatusBadRequest, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + + sublog.Debug().Bool("overwrite", overwrite).Msg("TPC Push") + + client, err := s.gatewaySelector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return + } + ref := &provider.Reference{Path: src} + srcStatReq := &provider.StatRequest{Ref: ref} + srcStatRes, err := client.Stat(ctx, srcStatReq) + if err != nil { + sublog.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if srcStatRes.Status.Code != rpc.Code_CODE_OK && srcStatRes.Status.Code != rpc.Code_CODE_NOT_FOUND { + errors.HandleErrorStatus(&sublog, w, srcStatRes.Status) + return + } + if srcStatRes.Info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + sublog.Error().Msg("Third-Party copy of a folder is not supported") + w.WriteHeader(http.StatusBadRequest) + return + } + + err = s.performHTTPPush(ctx, r, w, srcStatRes.Info, ns) + if err != nil { + sublog.Error().Err(err).Msg("error performing TPC Push") + return + } + fmt.Fprintf(w, "success: Created") +} + +func (s *svc) performHTTPPush(ctx context.Context, r *http.Request, w http.ResponseWriter, srcInfo *provider.ResourceInfo, ns string) error { + src := path.Join(ns, r.URL.Path) + dst := r.Header.Get("Destination") + + sublog := appctx.GetLogger(ctx) + sublog.Debug().Str("src", src).Str("dst", dst).Msg("Performing HTTP Push") + + // get download url + dReq := &provider.InitiateFileDownloadRequest{ + Ref: &provider.Reference{Path: src}, + } + + client, err := s.gatewaySelector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return err + } + dRes, err := client.InitiateFileDownload(ctx, dReq) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + + if dRes.Status.Code != rpc.Code_CODE_OK { + w.WriteHeader(http.StatusInternalServerError) + return fmt.Errorf("status code %d", dRes.Status.Code) + } + + var downloadEP, downloadToken string + for _, p := range dRes.Protocols { + if p.Protocol == "simple" { + downloadEP, downloadToken = p.DownloadEndpoint, p.Token + } + } + + // do download + httpDownloadReq, err := rhttp.NewRequest(ctx, "GET", downloadEP, nil) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + httpDownloadReq.Header.Set(TokenTransportHeader, downloadToken) + + httpDownloadRes, err := s.client.Do(httpDownloadReq) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + defer httpDownloadRes.Body.Close() + if httpDownloadRes.StatusCode != http.StatusOK { + w.WriteHeader(httpDownloadRes.StatusCode) + return fmt.Errorf("Remote PUT returned status code %d", httpDownloadRes.StatusCode) + } + + // send performance markers periodically every PerfMarkerResponseTime (5 seconds unless configured) + w.WriteHeader(http.StatusAccepted) + wc := WriteCounter{0, time.Now(), w} + tempReader := io.TeeReader(httpDownloadRes.Body, &wc) + + // get http client for a remote call + httpClient := &http.Client{} + req, err := http.NewRequest("PUT", dst, tempReader) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + + // add authentication header and content length + bearerHeader := r.Header.Get(net.HeaderTransferAuth) + req.Header.Add("Authorization", bearerHeader) + req.ContentLength = int64(srcInfo.GetSize()) + + // do Upload + httpUploadRes, err := httpClient.Do(req) // lgtm[go/request-forgery] + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + defer httpUploadRes.Body.Close() + + if httpUploadRes.StatusCode != http.StatusOK { + w.WriteHeader(httpUploadRes.StatusCode) + return err + } + + return nil +} diff --git a/services/webdav/pkg/ocdav/trashbin.go b/services/webdav/pkg/ocdav/trashbin.go new file mode 100644 index 0000000000..c59e64a937 --- /dev/null +++ b/services/webdav/pkg/ocdav/trashbin.go @@ -0,0 +1,665 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "path" + "strconv" + "strings" + "time" + + "go.opentelemetry.io/otel/codes" + + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/prop" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/propfind" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + + "github.com/opencloud-eu/reva/v2/pkg/appctx" + ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" + rstatus "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" + "github.com/opencloud-eu/reva/v2/pkg/utils" +) + +// TrashbinHandler handles trashbin requests +type TrashbinHandler struct { + gatewaySvc string + namespace string + allowPropfindDepthInfinitiy bool +} + +func (h *TrashbinHandler) Init(c *config.Config) error { + h.gatewaySvc = c.GatewaySvc + h.namespace = path.Join("/", c.FilesNamespace) + h.allowPropfindDepthInfinitiy = c.AllowPropfindDepthInfinitiy + return nil +} + +// Handler handles requests +func (h *TrashbinHandler) Handler(s *svc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + log := appctx.GetLogger(ctx) + + if r.Method == http.MethodOptions { + s.handleOptions(w, r) + return + } + + var username string + username, r.URL.Path = splitSpaceAndKey(r.URL.Path) + if username == "" { + // listing is disabled, no auth will change that + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + user, ok := ctxpkg.ContextGetUser(ctx) + if !ok { + w.WriteHeader(http.StatusBadRequest) + return + } + if user.Username != username { + log.Debug().Str("username", username).Interface("user", user).Msg("trying to read another users trash") + // listing other users trash is forbidden, no auth will change that + // do not leak existence of space and return 404 + w.WriteHeader(http.StatusNotFound) + b, err := errors.Marshal(http.StatusNotFound, "not found", "", "") + if err != nil { + log.Error().Msgf("error marshaling xml response: %s", b) + w.WriteHeader(http.StatusInternalServerError) + return + } + _, err = w.Write(b) + if err != nil { + log.Error().Msgf("error writing xml response: %s", b) + w.WriteHeader(http.StatusInternalServerError) + return + } + return + } + + useLoggedInUser := true + ns, newPath, err := s.ApplyLayout(ctx, h.namespace, useLoggedInUser, r.URL.Path) + if err != nil { + w.WriteHeader(http.StatusNotFound) + b, err := errors.Marshal(http.StatusNotFound, fmt.Sprintf("could not get storage for %s", r.URL.Path), "", "") + errors.HandleWebdavError(appctx.GetLogger(r.Context()), w, b, err) + } + r.URL.Path = newPath + + basePath := path.Join(ns, newPath) + space, rpcstatus, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, basePath) + switch { + case err != nil: + log.Error().Err(err).Str("path", basePath).Msg("failed to look up storage space") + w.WriteHeader(http.StatusInternalServerError) + return + case rpcstatus.Code != rpc.Code_CODE_OK: + httpStatus := rstatus.HTTPStatusFromCode(rpcstatus.Code) + w.WriteHeader(httpStatus) + b, err := errors.Marshal(httpStatus, rpcstatus.Message, "", "") + errors.HandleWebdavError(log, w, b, err) + return + } + ref := spacelookup.MakeRelativeReference(space, ".", false) + + // key will be a base64 encoded cs3 path, it uniquely identifies a trash item with an opaque id and an optional path + key := r.URL.Path + + switch r.Method { + case MethodPropfind: + h.listTrashbin(w, r, s, ref, user.Username, key) + case MethodMove: + if key == "" { + http.Error(w, "501 Not implemented", http.StatusNotImplemented) + break + } + // find path in url relative to trash base + trashBase := ctx.Value(net.CtxKeyBaseURI).(string) + baseURI := path.Join(path.Dir(trashBase), "files", username) + + dh := r.Header.Get(net.HeaderDestination) + dst, err := net.ParseDestination(baseURI, dh) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + p := path.Join(ns, dst) + // The destination can be in another space. E.g. the 'Shares Jail'. + space, rpcstatus, err := spacelookup.LookUpStorageSpaceForPath(ctx, s.gatewaySelector, p) + if err != nil { + log.Error().Err(err).Str("path", p).Msg("failed to look up destination storage space") + w.WriteHeader(http.StatusInternalServerError) + return + } + if rpcstatus.Code != rpc.Code_CODE_OK { + httpStatus := rstatus.HTTPStatusFromCode(rpcstatus.Code) + w.WriteHeader(httpStatus) + b, err := errors.Marshal(httpStatus, rpcstatus.Message, "", "") + errors.HandleWebdavError(log, w, b, err) + return + } + dstRef := spacelookup.MakeRelativeReference(space, p, false) + + log.Debug().Str("key", key).Str("dst", dst).Msg("restore") + h.restore(w, r, s, ref, dstRef, key) + case http.MethodDelete: + h.delete(w, r, s, ref, key) + default: + http.Error(w, "501 Not implemented", http.StatusNotImplemented) + } + }) +} + +func (h *TrashbinHandler) getDepth(r *http.Request) (net.Depth, error) { + dh := r.Header.Get(net.HeaderDepth) + depth, err := net.ParseDepth(dh) + if err != nil || depth == net.DepthInfinity && !h.allowPropfindDepthInfinitiy { + return "", errors.ErrInvalidDepth + } + return depth, nil +} + +func (h *TrashbinHandler) listTrashbin(w http.ResponseWriter, r *http.Request, s *svc, ref *provider.Reference, refBase, key string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "list_trashbin") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Logger() + + depth, err := h.getDepth(r) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "Invalid Depth header value") + span.SetAttributes(semconv.HTTPResponseStatusCodeKey.Int(http.StatusBadRequest)) + sublog.Debug().Str("depth", r.Header.Get(net.HeaderDepth)).Msg(err.Error()) + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Invalid Depth header value: %v", r.Header.Get(net.HeaderDepth)) + b, err := errors.Marshal(http.StatusBadRequest, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + + pf, status, err := propfind.ReadPropfind(r.Body) + if err != nil { + sublog.Debug().Err(err).Msg("error reading propfind request") + w.WriteHeader(status) + return + } + + if key == "" && depth == net.DepthZero { + // we are listing the trash root, but without children + // so we just fake a root element without actually querying the gateway + rootHref := path.Join(refBase, key) + propRes, err := h.formatTrashPropfind(ctx, s, ref.ResourceId.SpaceId, refBase, rootHref, &pf, nil, true) + if err != nil { + sublog.Error().Err(err).Msg("error formatting propfind") + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set(net.HeaderDav, "1, 3, extended-mkcol") + w.Header().Set(net.HeaderContentType, "application/xml; charset=utf-8") + w.WriteHeader(http.StatusMultiStatus) + _, err = w.Write(propRes) + if err != nil { + sublog.Error().Err(err).Msg("error writing body") + return + } + return + } + + if depth == net.DepthOne && key != "" && !strings.HasSuffix(key, "/") { + // when a key is provided and the depth is 1 we need to append a / to the key to list the children + key += "/" + } + + client, err := s.gatewaySelector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return + } + // ask gateway for recycle items + getRecycleRes, err := client.ListRecycle(ctx, &provider.ListRecycleRequest{Ref: ref, Key: key}) + if err != nil { + sublog.Error().Err(err).Msg("error calling ListRecycle") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if getRecycleRes.Status.Code != rpc.Code_CODE_OK { + httpStatus := rstatus.HTTPStatusFromCode(getRecycleRes.Status.Code) + w.WriteHeader(httpStatus) + b, err := errors.Marshal(httpStatus, getRecycleRes.Status.Message, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + + items := getRecycleRes.RecycleItems + + if depth == net.DepthInfinity { + var stack []string + // check sub-containers in reverse order and add them to the stack + // the reversed order here will produce a more logical sorting of results + for i := len(items) - 1; i >= 0; i-- { + // for i := range res.Infos { + if items[i].Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + stack = append(stack, items[i].Key+"/") // fetch children of the item + } + } + + for len(stack) > 0 { + key := stack[len(stack)-1] + getRecycleRes, err := client.ListRecycle(ctx, &provider.ListRecycleRequest{Ref: ref, Key: key}) + if err != nil { + sublog.Error().Err(err).Msg("error calling ListRecycle") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if getRecycleRes.Status.Code != rpc.Code_CODE_OK { + httpStatus := rstatus.HTTPStatusFromCode(getRecycleRes.Status.Code) + w.WriteHeader(httpStatus) + b, err := errors.Marshal(httpStatus, getRecycleRes.Status.Message, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + items = append(items, getRecycleRes.RecycleItems...) + + stack = stack[:len(stack)-1] + // check sub-containers in reverse order and add them to the stack + // the reversed order here will produce a more logical sorting of results + for i := len(getRecycleRes.RecycleItems) - 1; i >= 0; i-- { + // for i := range res.Infos { + if getRecycleRes.RecycleItems[i].Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + stack = append(stack, getRecycleRes.RecycleItems[i].Key) + } + } + } + } + + rootHref := path.Join(refBase, key) + propRes, err := h.formatTrashPropfind(ctx, s, ref.ResourceId.SpaceId, refBase, rootHref, &pf, items, depth != net.DepthZero) + if err != nil { + sublog.Error().Err(err).Msg("error formatting propfind") + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set(net.HeaderDav, "1, 3, extended-mkcol") + w.Header().Set(net.HeaderContentType, "application/xml; charset=utf-8") + w.WriteHeader(http.StatusMultiStatus) + _, err = w.Write(propRes) + if err != nil { + sublog.Error().Err(err).Msg("error writing body") + return + } +} + +func (h *TrashbinHandler) formatTrashPropfind(ctx context.Context, s *svc, spaceID, refBase, rootHref string, pf *propfind.XML, items []*provider.RecycleItem, fakeRoot bool) ([]byte, error) { + responses := make([]*propfind.ResponseXML, 0, len(items)+1) + if fakeRoot { + responses = append(responses, &propfind.ResponseXML{ + Href: net.EncodePath(path.Join(ctx.Value(net.CtxKeyBaseURI).(string), rootHref) + "/"), // url encode response.Href TODO + Propstat: []propfind.PropstatXML{ + { + Status: "HTTP/1.1 200 OK", + Prop: []prop.PropertyXML{ + prop.Raw("d:resourcetype", ""), + }, + }, + { + Status: "HTTP/1.1 404 Not Found", + Prop: []prop.PropertyXML{ + prop.NotFound("oc:trashbin-original-filename"), + prop.NotFound("oc:trashbin-original-location"), + prop.NotFound("oc:trashbin-delete-datetime"), + prop.NotFound("d:getcontentlength"), + }, + }, + }, + }) + } + + for i := range items { + res, err := h.itemToPropResponse(ctx, s, spaceID, refBase, pf, items[i]) + if err != nil { + return nil, err + } + responses = append(responses, res) + } + responsesXML, err := xml.Marshal(&responses) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + buf.WriteString(``) + buf.Write(responsesXML) + buf.WriteString(``) + return buf.Bytes(), nil +} + +// itemToPropResponse needs to create a listing that contains a key and destination +// the key is the name of an entry in the trash listing +// for now we need to limit trash to the users home, so we can expect all trash keys to have the home storage as the opaque id +func (h *TrashbinHandler) itemToPropResponse(ctx context.Context, s *svc, spaceID, refBase string, pf *propfind.XML, item *provider.RecycleItem) (*propfind.ResponseXML, error) { + + baseURI := ctx.Value(net.CtxKeyBaseURI).(string) + ref := path.Join(baseURI, refBase, item.GetKey()) + if item.GetType() == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + ref += "/" + } + + response := propfind.ResponseXML{ + Href: net.EncodePath(ref), // url encode response.Href + Propstat: []propfind.PropstatXML{}, + } + + // TODO(jfd): if the path we list here is taken from the ListRecycle request we rely on the gateway to prefix it with the mount point + + t := utils.TSToTime(item.GetDeletionTime()).UTC() + dTime := t.Format(time.RFC1123Z) + size := strconv.FormatUint(item.GetSize(), 10) + + // when allprops has been requested + if pf.Allprop != nil { + // return all known properties + propstatOK := propfind.PropstatXML{ + Status: "HTTP/1.1 200 OK", + Prop: []prop.PropertyXML{}, + } + // yes this is redundant, can be derived from oc:trashbin-original-location which contains the full path, clients should not fetch it + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:trashbin-original-filename", path.Base(item.GetRef().GetPath()))) + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:trashbin-original-location", strings.TrimPrefix(item.GetRef().GetPath(), "/"))) + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:trashbin-delete-timestamp", strconv.FormatUint(item.GetDeletionTime().GetSeconds(), 10))) + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:trashbin-delete-datetime", dTime)) + if item.GetType() == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + propstatOK.Prop = append(propstatOK.Prop, prop.Raw("d:resourcetype", "")) + propstatOK.Prop = append(propstatOK.Prop, prop.Raw("oc:size", size)) + } else { + propstatOK.Prop = append(propstatOK.Prop, + prop.Escaped("d:resourcetype", ""), + prop.Escaped("d:getcontentlength", size), + ) + } + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:spaceid", spaceID)) + response.Propstat = append(response.Propstat, propstatOK) + } else { + // otherwise return only the requested properties + propstatOK := propfind.PropstatXML{ + Status: "HTTP/1.1 200 OK", + Prop: []prop.PropertyXML{}, + } + propstatNotFound := propfind.PropstatXML{ + Status: "HTTP/1.1 404 Not Found", + Prop: []prop.PropertyXML{}, + } + for i := range pf.Prop { + switch pf.Prop[i].Space { + case net.NsOwncloud: + switch pf.Prop[i].Local { + case "oc:size": + if item.GetType() == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:size", size)) + } else { + propstatNotFound.Prop = append(propstatNotFound.Prop, prop.NotFound("oc:size")) + } + case "trashbin-original-filename": + // yes this is redundant, can be derived from oc:trashbin-original-location which contains the full path, clients should not fetch it + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:trashbin-original-filename", path.Base(item.GetRef().GetPath()))) + case "trashbin-original-location": + // TODO (jfd) double check and clarify the cs3 spec what the Key is about and if Path is only the folder that contains the file or if it includes the filename + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:trashbin-original-location", strings.TrimPrefix(item.GetRef().GetPath(), "/"))) + case "trashbin-delete-datetime": + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:trashbin-delete-datetime", dTime)) + case "trashbin-delete-timestamp": + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:trashbin-delete-timestamp", strconv.FormatUint(item.GetDeletionTime().GetSeconds(), 10))) + case "spaceid": + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:spaceid", spaceID)) + default: + propstatNotFound.Prop = append(propstatNotFound.Prop, prop.NotFound("oc:"+pf.Prop[i].Local)) + } + case net.NsDav: + switch pf.Prop[i].Local { + case "getcontentlength": + if item.GetType() == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + propstatNotFound.Prop = append(propstatNotFound.Prop, prop.NotFound("d:getcontentlength")) + } else { + propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("d:getcontentlength", size)) + } + case "resourcetype": + if item.GetType() == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + propstatOK.Prop = append(propstatOK.Prop, prop.Raw("d:resourcetype", "")) + } else { + propstatOK.Prop = append(propstatOK.Prop, prop.Raw("d:resourcetype", "")) + // redirectref is another option + } + case "getcontenttype": + if item.GetType() == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + propstatOK.Prop = append(propstatOK.Prop, prop.Raw("d:getcontenttype", "httpd/unix-directory")) + } else { + propstatNotFound.Prop = append(propstatNotFound.Prop, prop.NotFound("d:getcontenttype")) + } + default: + propstatNotFound.Prop = append(propstatNotFound.Prop, prop.NotFound("d:"+pf.Prop[i].Local)) + } + default: + // TODO (jfd) lookup shortname for unknown namespaces? + propstatNotFound.Prop = append(propstatNotFound.Prop, prop.NotFound(pf.Prop[i].Space+":"+pf.Prop[i].Local)) + } + } + response.Propstat = append(response.Propstat, propstatOK, propstatNotFound) + } + + return &response, nil +} + +func (h *TrashbinHandler) restore(w http.ResponseWriter, r *http.Request, s *svc, ref, dst *provider.Reference, key string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "restore") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Logger() + + oh := r.Header.Get(net.HeaderOverwrite) + + overwrite, err := net.ParseOverwrite(oh) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + client, err := s.gatewaySelector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return + } + dstStatReq := &provider.StatRequest{Ref: dst} + dstStatRes, err := client.Stat(ctx, dstStatReq) + if err != nil { + sublog.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if dstStatRes.Status.Code != rpc.Code_CODE_OK && dstStatRes.Status.Code != rpc.Code_CODE_NOT_FOUND { + errors.HandleErrorStatus(&sublog, w, dstStatRes.Status) + return + } + + // Restoring to a non-existent location is not supported by the WebDAV spec. The following block ensures the target + // restore location exists, and if it doesn't returns a conflict error code. + if dstStatRes.Status.Code == rpc.Code_CODE_NOT_FOUND && isNested(dst.Path) { + parentRef := &provider.Reference{ResourceId: dst.ResourceId, Path: utils.MakeRelativePath(path.Dir(dst.Path))} + parentStatReq := &provider.StatRequest{Ref: parentRef} + + parentStatResponse, err := client.Stat(ctx, parentStatReq) + if err != nil { + sublog.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if parentStatResponse.Status.Code == rpc.Code_CODE_NOT_FOUND { + // 409 if intermediate dir is missing, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + w.WriteHeader(http.StatusConflict) + return + } + } + + successCode := http.StatusCreated // 201 if new resource was created, see https://tools.ietf.org/html/rfc4918#section-9.9.4 + if dstStatRes.Status.Code == rpc.Code_CODE_OK { + successCode = http.StatusNoContent // 204 if target already existed, see https://tools.ietf.org/html/rfc4918#section-9.9.4 + + if !overwrite { + sublog.Warn().Bool("overwrite", overwrite).Msg("dst already exists") + w.WriteHeader(http.StatusPreconditionFailed) // 412, see https://tools.ietf.org/html/rfc4918#section-9.9.4 + b, err := errors.Marshal( + http.StatusPreconditionFailed, + "The destination node already exists, and the overwrite header is set to false", + net.HeaderOverwrite, + "", + ) + errors.HandleWebdavError(&sublog, w, b, err) + return + } + // delete existing tree + delReq := &provider.DeleteRequest{Ref: dst} + delRes, err := client.Delete(ctx, delReq) + if err != nil { + sublog.Error().Err(err).Msg("error sending grpc delete request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if delRes.Status.Code != rpc.Code_CODE_OK && delRes.Status.Code != rpc.Code_CODE_NOT_FOUND { + errors.HandleErrorStatus(&sublog, w, delRes.Status) + return + } + } + + req := &provider.RestoreRecycleItemRequest{ + Ref: ref, + Key: key, + RestoreRef: dst, + } + + res, err := client.RestoreRecycleItem(ctx, req) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc restore recycle item request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if res.Status.Code != rpc.Code_CODE_OK { + if res.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + w.WriteHeader(http.StatusForbidden) + b, err := errors.Marshal(http.StatusForbidden, "Permission denied to restore", "", "") + errors.HandleWebdavError(&sublog, w, b, err) + } + errors.HandleErrorStatus(&sublog, w, res.Status) + return + } + + dstStatRes, err = client.Stat(ctx, dstStatReq) + if err != nil { + sublog.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if dstStatRes.Status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&sublog, w, dstStatRes.Status) + return + } + + info := dstStatRes.Info + w.Header().Set(net.HeaderContentType, info.MimeType) + w.Header().Set(net.HeaderETag, info.Etag) + w.Header().Set(net.HeaderOCFileID, storagespace.FormatResourceID(info.Id)) + w.Header().Set(net.HeaderOCETag, info.Etag) + + w.WriteHeader(successCode) +} + +// delete has only a key +func (h *TrashbinHandler) delete(w http.ResponseWriter, r *http.Request, s *svc, ref *provider.Reference, key string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "erase") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Interface("reference", ref).Str("key", key).Logger() + + req := &provider.PurgeRecycleRequest{ + Ref: ref, + Key: key, + } + + client, err := s.gatewaySelector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return + } + res, err := client.PurgeRecycle(ctx, req) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc restore recycle item request") + w.WriteHeader(http.StatusInternalServerError) + return + } + switch res.Status.Code { + case rpc.Code_CODE_OK: + w.WriteHeader(http.StatusNoContent) + case rpc.Code_CODE_NOT_FOUND: + sublog.Debug().Interface("status", res.Status).Msg("resource not found") + w.WriteHeader(http.StatusConflict) + m := fmt.Sprintf("key %s not found", key) + b, err := errors.Marshal(http.StatusConflict, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + case rpc.Code_CODE_PERMISSION_DENIED: + w.WriteHeader(http.StatusForbidden) + var m string + if key == "" { + m = "Permission denied to purge recycle" + } else { + m = "Permission denied to delete" + } + b, err := errors.Marshal(http.StatusForbidden, m, "", "") + errors.HandleWebdavError(&sublog, w, b, err) + default: + errors.HandleErrorStatus(&sublog, w, res.Status) + } +} + +func isNested(p string) bool { + dir, _ := path.Split(p) + return dir != "/" && dir != "./" +} diff --git a/services/webdav/pkg/ocdav/tus.go b/services/webdav/pkg/ocdav/tus.go new file mode 100644 index 0000000000..c73d3a85a2 --- /dev/null +++ b/services/webdav/pkg/ocdav/tus.go @@ -0,0 +1,393 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "encoding/json" + "io" + "net/http" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/spacelookup" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/conversions" + "github.com/opencloud-eu/reva/v2/pkg/rhttp" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/rs/zerolog" + tusd "github.com/tus/tusd/v2/pkg/handler" + "go.opentelemetry.io/otel/propagation" +) + +// Propagator ensures the importer module uses the same trace propagation strategy. +var Propagator = propagation.NewCompositeTextMapPropagator( + propagation.Baggage{}, + propagation.TraceContext{}, +) + +func (s *svc) handlePathTusPost(w http.ResponseWriter, r *http.Request, ns string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "tus-post") + defer span.End() + + // read filename from metadata + meta := tusd.ParseMetadataHeader(r.Header.Get(net.HeaderUploadMetadata)) + + // append filename to current dir + ref := &provider.Reference{ + // a path based request has no resource id, so we can only provide a path. The gateway has te figure out which provider is responsible + Path: path.Join(ns, r.URL.Path, meta["filename"]), + } + + sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("filename", meta["filename"]).Logger() + + s.handleTusPost(ctx, w, r, meta, ref, sublog) +} + +func (s *svc) handleSpacesTusPost(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "spaces-tus-post") + defer span.End() + + // read filename from metadata + meta := tusd.ParseMetadataHeader(r.Header.Get(net.HeaderUploadMetadata)) + + ref, err := spacelookup.MakeStorageSpaceReference(spaceID, path.Join(r.URL.Path, meta["filename"])) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("path", r.URL.Path).Str("filename", meta["filename"]).Logger() + + s.handleTusPost(ctx, w, r, meta, &ref, sublog) +} + +func (s *svc) handleTusPost(ctx context.Context, w http.ResponseWriter, r *http.Request, meta map[string]string, ref *provider.Reference, log zerolog.Logger) { + w.Header().Add(net.HeaderAccessControlAllowHeaders, strings.Join([]string{net.HeaderTusResumable, net.HeaderUploadLength, net.HeaderUploadMetadata, net.HeaderIfMatch}, ", ")) + w.Header().Add(net.HeaderAccessControlExposeHeaders, strings.Join([]string{net.HeaderTusResumable, net.HeaderUploadOffset, net.HeaderLocation}, ", ")) + w.Header().Set(net.HeaderTusExtension, "creation,creation-with-upload,checksum,expiration") + + w.Header().Set(net.HeaderTusResumable, "1.0.0") + + // Test if the version sent by the client is supported + // GET methods are not checked since a browser may visit this URL and does + // not include this header. This request is not part of the specification. + if r.Header.Get(net.HeaderTusResumable) != "1.0.0" { + w.WriteHeader(http.StatusPreconditionFailed) + return + } + if r.Header.Get(net.HeaderUploadLength) == "" { + w.WriteHeader(http.StatusPreconditionFailed) + return + } + if err := ValidateName(filename(meta["filename"]), s.nameValidators); err != nil { + w.WriteHeader(http.StatusPreconditionFailed) + return + } + + // Test if the target is a secret filedrop + var isSecretFileDrop bool + tokenStatInfo, ok := TokenStatInfoFromContext(ctx) + // We assume that when the uploader can create containers, but is not allowed to list them, it is a secret file drop + if ok && tokenStatInfo.GetPermissionSet().CreateContainer && !tokenStatInfo.GetPermissionSet().ListContainer { + isSecretFileDrop = true + } + + // r.Header.Get(net.HeaderOCChecksum) + // TODO must be SHA1, ADLER32 or MD5 ... in capital letters???? + // curl -X PUT https://demo.example.org/remote.php/webdav/testcs.bin -u demo:demo -d '123' -v -H 'OC-Checksum: SHA1:40bd001563085fc35165329ea1ff5c5ecbdbbeef' + + // TODO check Expect: 100-continue + + client, err := s.gatewaySelector.Next() + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + sReq := &provider.StatRequest{ + Ref: ref, + } + sRes, err := client.Stat(ctx, sReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if sRes.Status.Code != rpc.Code_CODE_OK && sRes.Status.Code != rpc.Code_CODE_NOT_FOUND { + errors.HandleErrorStatus(&log, w, sRes.Status) + return + } + + info := sRes.Info + if info != nil && info.Type != provider.ResourceType_RESOURCE_TYPE_FILE { + log.Warn().Msg("resource is not a file") + w.WriteHeader(http.StatusConflict) + return + } + + if info != nil { + clientETag := r.Header.Get(net.HeaderIfMatch) + serverETag := info.Etag + if clientETag != "" { + if clientETag != serverETag { + log.Warn().Str("client-etag", clientETag).Str("server-etag", serverETag).Msg("etags mismatch") + w.WriteHeader(http.StatusPreconditionFailed) + return + } + } + if isSecretFileDrop { + // find next filename + newName, status, err := FindName(ctx, client, filepath.Base(ref.Path), sRes.GetInfo().GetParentId()) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if status.GetCode() != rpc.Code_CODE_OK { + log.Error().Interface("status", status).Msg("error listing file") + errors.HandleErrorStatus(&log, w, status) + return + } + ref.Path = filepath.Join(filepath.Dir(ref.GetPath()), newName) + sRes.GetInfo().Name = newName + } + } + + uploadLength, err := strconv.ParseInt(r.Header.Get(net.HeaderUploadLength), 10, 64) + if err != nil { + log.Debug().Err(err).Msg("wrong request") + w.WriteHeader(http.StatusBadRequest) + return + } + if uploadLength == 0 { + tfRes, err := client.TouchFile(ctx, &provider.TouchFileRequest{ + Ref: ref, + }) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + switch tfRes.Status.Code { + case rpc.Code_CODE_OK: + w.Header().Set(net.HeaderLocation, "") + w.WriteHeader(http.StatusCreated) + return + case rpc.Code_CODE_ALREADY_EXISTS: + // Fall through to the tus case + default: + log.Error().Interface("status", tfRes.Status).Msg("error touching file") + w.WriteHeader(http.StatusInternalServerError) + return + } + } + + opaqueMap := map[string]*typespb.OpaqueEntry{ + net.HeaderUploadLength: { + Decoder: "plain", + Value: []byte(r.Header.Get(net.HeaderUploadLength)), + }, + } + + mtime := meta["mtime"] + if mtime != "" { + opaqueMap[net.HeaderOCMtime] = &typespb.OpaqueEntry{ + Decoder: "plain", + Value: []byte(mtime), + } + } + + // initiateUpload + uReq := &provider.InitiateFileUploadRequest{ + Ref: ref, + Opaque: &typespb.Opaque{ + Map: opaqueMap, + }, + } + + uRes, err := client.InitiateFileUpload(ctx, uReq) + if err != nil { + log.Error().Err(err).Msg("error initiating file upload") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if uRes.Status.Code != rpc.Code_CODE_OK { + if r.ProtoMajor == 1 { + // drain body to avoid `connection closed` errors + _, _ = io.Copy(io.Discard, r.Body) + } + if uRes.Status.Code == rpc.Code_CODE_NOT_FOUND { + w.WriteHeader(http.StatusPreconditionFailed) + return + } + errors.HandleErrorStatus(&log, w, uRes.Status) + return + } + + var ep, token string + for _, p := range uRes.Protocols { + if p.Protocol == "tus" { + ep, token = p.UploadEndpoint, p.Token + } + } + + // TUS clients don't understand the reva transfer token. We need to append it to the upload endpoint. + // The DataGateway has to take care of pulling it back into the request header upon request arrival. + if token != "" { + if !strings.HasSuffix(ep, "/") { + ep += "/" + } + ep += token + } + + w.Header().Set(net.HeaderLocation, ep) + + // for creation-with-upload extension forward bytes to dataprovider + // TODO check this really streams + if r.Header.Get(net.HeaderContentType) == "application/offset+octet-stream" { + finishUpload := true + if uploadLength > 0 { + var httpRes *http.Response + + httpReq, err := rhttp.NewRequest(ctx, http.MethodPatch, ep, r.Body) + if err != nil { + log.Debug().Err(err).Msg("wrong request") + w.WriteHeader(http.StatusInternalServerError) + return + } + Propagator.Inject(ctx, propagation.HeaderCarrier(httpReq.Header)) + + httpReq.Header.Set(net.HeaderContentType, r.Header.Get(net.HeaderContentType)) + httpReq.Header.Set(net.HeaderContentLength, r.Header.Get(net.HeaderContentLength)) + if r.Header.Get(net.HeaderUploadOffset) != "" { + httpReq.Header.Set(net.HeaderUploadOffset, r.Header.Get(net.HeaderUploadOffset)) + } else { + httpReq.Header.Set(net.HeaderUploadOffset, "0") + } + httpReq.Header.Set(net.HeaderTusResumable, r.Header.Get(net.HeaderTusResumable)) + + httpRes, err = s.client.Do(httpReq) + if err != nil || httpRes == nil { + log.Error().Err(err).Msg("error doing PATCH request to data gateway") + w.WriteHeader(http.StatusInternalServerError) + return + } + defer httpRes.Body.Close() + + if httpRes.StatusCode != http.StatusNoContent { + w.WriteHeader(httpRes.StatusCode) + return + } + + w.Header().Set(net.HeaderUploadOffset, httpRes.Header.Get(net.HeaderUploadOffset)) + w.Header().Set(net.HeaderTusResumable, httpRes.Header.Get(net.HeaderTusResumable)) + w.Header().Set(net.HeaderTusUploadExpires, httpRes.Header.Get(net.HeaderTusUploadExpires)) + if httpRes.Header.Get(net.HeaderOCMtime) != "" { + w.Header().Set(net.HeaderOCMtime, httpRes.Header.Get(net.HeaderOCMtime)) + } + + if strings.HasPrefix(uReq.GetRef().GetPath(), "/public") && uReq.GetRef().GetResourceId() == nil { + // Use the path based request for the public link + sReq.Ref.Path = uReq.Ref.GetPath() + sReq.Ref.ResourceId = nil + } else { + if resid, err := storagespace.ParseID(httpRes.Header.Get(net.HeaderOCFileID)); err == nil { + sReq.Ref = &provider.Reference{ + ResourceId: &resid, + } + } + } + finishUpload = httpRes.Header.Get(net.HeaderUploadOffset) == r.Header.Get(net.HeaderUploadLength) + } + + // check if upload was fully completed + if uploadLength == 0 || finishUpload { + // get uploaded file metadata + + sRes, err := client.Stat(ctx, sReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if sRes.Status.Code != rpc.Code_CODE_OK && sRes.Status.Code != rpc.Code_CODE_NOT_FOUND { + if sRes.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + // the token expired during upload, so the stat failed + // and we can't do anything about it. + // the clients will handle this gracefully by doing a propfind on the file + w.WriteHeader(http.StatusOK) + return + } + + errors.HandleErrorStatus(&log, w, sRes.Status) + return + } + + info := sRes.Info + if info == nil { + log.Error().Msg("No info found for uploaded file") + w.WriteHeader(http.StatusInternalServerError) + return + } + + // get WebDav permissions for file + isPublic := false + if info.Opaque != nil && info.Opaque.Map != nil { + if info.Opaque.Map["link-share"] != nil && info.Opaque.Map["link-share"].Decoder == "json" { + ls := &link.PublicShare{} + _ = json.Unmarshal(info.Opaque.Map["link-share"].Value, ls) + isPublic = ls != nil + } + } + isShared := !net.IsCurrentUserOwnerOrManager(ctx, info.Owner, info) + role := conversions.RoleFromResourcePermissions(info.PermissionSet, isPublic) + permissions := role.WebDAVPermissions( + info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER, + isShared, + false, + isPublic, + ) + + w.Header().Set(net.HeaderContentType, info.MimeType) + w.Header().Set(net.HeaderOCFileID, storagespace.FormatResourceID(info.Id)) + w.Header().Set(net.HeaderOCETag, info.Etag) + w.Header().Set(net.HeaderETag, info.Etag) + w.Header().Set(net.HeaderOCPermissions, permissions) + + t := utils.TSToTime(info.Mtime).UTC() + lastModifiedString := t.Format(time.RFC1123Z) + w.Header().Set(net.HeaderLastModified, lastModifiedString) + } + } + + w.WriteHeader(http.StatusCreated) +} diff --git a/services/webdav/pkg/ocdav/validation.go b/services/webdav/pkg/ocdav/validation.go new file mode 100644 index 0000000000..4d721ccd9d --- /dev/null +++ b/services/webdav/pkg/ocdav/validation.go @@ -0,0 +1,79 @@ +package ocdav + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" +) + +// Validator validates strings +type Validator func(string) error + +// ValidatorsFromConfig returns the configured Validators +func ValidatorsFromConfig(c *config.Config) []Validator { + // we always want to exclude empty names + vals := []Validator{notEmpty()} + + // forbidden characters + vals = append(vals, doesNotContain(c.NameValidation.InvalidChars)) + + // max length + vals = append(vals, isShorterThan(c.NameValidation.MaxLength)) + + return vals +} + +// ValidateName will validate a file or folder name, returning an error when it is not accepted +func ValidateName(name string, validators []Validator) error { + return ValidateDestination(name, append(validators, notReserved())) +} + +// ValidateDestination will validate a file or folder destination name (which can be . or ..), returning an error when it is not accepted +func ValidateDestination(name string, validators []Validator) error { + for _, v := range validators { + if err := v(name); err != nil { + return fmt.Errorf("name validation failed: %w", err) + } + } + return nil +} + +func notReserved() Validator { + return func(s string) error { + if s == ".." || s == "." { + return errors.New(". and .. are reserved names") + } + return nil + } +} + +func notEmpty() Validator { + return func(s string) error { + if strings.TrimSpace(s) == "" { + return errors.New("must not be empty") + } + return nil + } +} + +func doesNotContain(bad []string) Validator { + return func(s string) error { + for _, b := range bad { + if strings.Contains(s, b) { + return fmt.Errorf("must not contain %s", b) + } + } + return nil + } +} + +func isShorterThan(maxLength int) Validator { + return func(s string) error { + if len(s) > maxLength { + return fmt.Errorf("must be shorter than %d", maxLength) + } + return nil + } +} diff --git a/services/webdav/pkg/ocdav/versions.go b/services/webdav/pkg/ocdav/versions.go new file mode 100644 index 0000000000..895f043bf2 --- /dev/null +++ b/services/webdav/pkg/ocdav/versions.go @@ -0,0 +1,258 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "net/http" + "path" + + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/net" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/propfind" + "github.com/opencloud-eu/reva/v2/pkg/storagespace" + "github.com/opencloud-eu/reva/v2/pkg/utils" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/opencloud-eu/reva/v2/pkg/appctx" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" +) + +// VersionsHandler handles version requests +type VersionsHandler struct { +} + +func (h *VersionsHandler) Init(c *config.Config) error { + return nil +} + +// Handler handles requests +// versions can be listed with a PROPFIND to /remote.php/dav/meta//v +// a version is identified by a timestamp, eg. /remote.php/dav/meta//v/1561410426 +func (h *VersionsHandler) Handler(s *svc, rid *provider.ResourceId) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + if rid == nil { + http.Error(w, "404 Not Found", http.StatusNotFound) + return + } + + // baseURI is encoded as part of the response payload in href field + baseURI := path.Join(ctx.Value(net.CtxKeyBaseURI).(string), storagespace.FormatResourceID(rid)) + ctx = context.WithValue(ctx, net.CtxKeyBaseURI, baseURI) + r = r.WithContext(ctx) + + var key string + key, r.URL.Path = router.ShiftPath(r.URL.Path) + if r.Method == http.MethodOptions { + s.handleOptions(w, r) + return + } + if key == "" && r.Method == MethodPropfind { + h.doListVersions(w, r, s, rid) + return + } + if key != "" { + switch r.Method { + case MethodCopy: + // TODO(jfd) cs3api has no delete file version call + // TODO(jfd) restore version to given Destination, but cs3api has no destination + h.doRestore(w, r, s, rid, key) + return + case http.MethodHead: + log := appctx.GetLogger(ctx) + ref := &provider.Reference{ + ResourceId: &provider.ResourceId{ + StorageId: rid.StorageId, + SpaceId: rid.SpaceId, + OpaqueId: key, + }, + Path: utils.MakeRelativePath(r.URL.Path), + } + s.handleHead(ctx, w, r, ref, *log) + return + case http.MethodGet: + log := appctx.GetLogger(ctx) + ref := &provider.Reference{ + ResourceId: &provider.ResourceId{ + StorageId: rid.StorageId, + SpaceId: rid.SpaceId, + OpaqueId: key, + }, + Path: utils.MakeRelativePath(r.URL.Path), + } + s.handleGet(ctx, w, r, ref, "spaces", *log) + return + } + } + + http.Error(w, "501 Forbidden", http.StatusNotImplemented) + }) +} + +func (h *VersionsHandler) doListVersions(w http.ResponseWriter, r *http.Request, s *svc, rid *provider.ResourceId) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "listVersions") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Interface("resourceid", rid).Logger() + + pf, status, err := propfind.ReadPropfind(r.Body) + if err != nil { + sublog.Debug().Err(err).Msg("error reading propfind request") + w.WriteHeader(status) + return + } + + client, err := s.gatewaySelector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return + } + ref := &provider.Reference{ResourceId: rid} + res, err := client.Stat(ctx, &provider.StatRequest{Ref: ref}) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if res.Status.Code != rpc.Code_CODE_OK { + if res.Status.Code == rpc.Code_CODE_PERMISSION_DENIED || res.Status.Code == rpc.Code_CODE_NOT_FOUND { + w.WriteHeader(http.StatusNotFound) + b, err := errors.Marshal(http.StatusNotFound, "Resource not found", "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + errors.HandleErrorStatus(&sublog, w, res.Status) + return + } + + info := res.Info + + lvRes, err := client.ListFileVersions(ctx, &provider.ListFileVersionsRequest{Ref: ref}) + if err != nil { + sublog.Error().Err(err).Msg("error sending list container grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if lvRes.Status.Code != rpc.Code_CODE_OK { + if lvRes.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + w.WriteHeader(http.StatusForbidden) + b, err := errors.Marshal(http.StatusForbidden, "You have no permission to list file versions on this resource", "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + errors.HandleErrorStatus(&sublog, w, lvRes.Status) + return + } + + versions := lvRes.GetVersions() + infos := make([]*provider.ResourceInfo, 0, len(versions)+1) + // add version dir . entry, derived from file info + infos = append(infos, &provider.ResourceInfo{ + Type: provider.ResourceType_RESOURCE_TYPE_CONTAINER, + }) + + for i := range versions { + vi := &provider.ResourceInfo{ + // TODO(jfd) we cannot access version content, this will be a problem when trying to fetch version thumbnails + // Opaque + Type: provider.ResourceType_RESOURCE_TYPE_FILE, + Id: &provider.ResourceId{ + StorageId: "versions", + OpaqueId: info.Id.OpaqueId + "@" + versions[i].GetKey(), + }, + // Checksum + Etag: versions[i].Etag, + // MimeType + Mtime: &types.Timestamp{ + Seconds: versions[i].Mtime, + // TODO cs3apis FileVersion should use types.Timestamp instead of uint64 + }, + Path: path.Join("v", versions[i].Key), + // PermissionSet + Size: versions[i].Size, + Owner: info.Owner, + } + infos = append(infos, vi) + } + + prefer := net.ParsePrefer(r.Header.Get("prefer")) + returnMinimal := prefer[net.HeaderPreferReturn] == "minimal" + + propRes, err := propfind.MultistatusResponse(ctx, &pf, infos, s.c.PublicURL, "", nil, returnMinimal, nil) + if err != nil { + sublog.Error().Err(err).Msg("error formatting propfind") + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set(net.HeaderDav, "1, 3, extended-mkcol") + w.Header().Set(net.HeaderContentType, "application/xml; charset=utf-8") + w.Header().Set(net.HeaderVary, net.HeaderPrefer) + if returnMinimal { + w.Header().Set(net.HeaderPreferenceApplied, "return=minimal") + } + w.WriteHeader(http.StatusMultiStatus) + _, err = w.Write(propRes) + if err != nil { + sublog.Error().Err(err).Msg("error writing body") + return + } + +} + +func (h *VersionsHandler) doRestore(w http.ResponseWriter, r *http.Request, s *svc, rid *provider.ResourceId, key string) { + ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "restore") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Interface("resourceid", rid).Str("key", key).Logger() + + req := &provider.RestoreFileVersionRequest{ + Ref: &provider.Reference{ResourceId: rid}, + Key: key, + } + + client, err := s.gatewaySelector.Next() + if err != nil { + sublog.Error().Err(err).Msg("error selecting next gateway client") + w.WriteHeader(http.StatusInternalServerError) + return + } + res, err := client.RestoreFileVersion(ctx, req) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc restore version request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if res.Status.Code != rpc.Code_CODE_OK { + if res.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + w.WriteHeader(http.StatusForbidden) + b, err := errors.Marshal(http.StatusForbidden, "You have no permission to restore versions on this resource", "", "") + errors.HandleWebdavError(&sublog, w, b, err) + return + } + errors.HandleErrorStatus(&sublog, w, res.Status) + return + } + w.WriteHeader(http.StatusNoContent) +} diff --git a/services/webdav/pkg/ocdav/webdav.go b/services/webdav/pkg/ocdav/webdav.go new file mode 100644 index 0000000000..8402d68ed1 --- /dev/null +++ b/services/webdav/pkg/ocdav/webdav.go @@ -0,0 +1,120 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "fmt" + "net/http" + "path" + + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/errors" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/propfind" + "github.com/opencloud-eu/reva/v2/pkg/appctx" +) + +// Common Webdav methods. +// +// Unless otherwise noted, these are defined in RFC 4918 section 9. +const ( + MethodPropfind = "PROPFIND" + MethodLock = "LOCK" + MethodUnlock = "UNLOCK" + MethodProppatch = "PROPPATCH" + MethodMkcol = "MKCOL" + MethodMove = "MOVE" + MethodCopy = "COPY" + MethodReport = "REPORT" +) + +// WebDavHandler implements a dav endpoint +type WebDavHandler struct { + namespace string + useLoggedInUserNS bool +} + +func (h *WebDavHandler) Init(ns string, useLoggedInUserNS bool) error { + h.namespace = path.Join("/", ns) + h.useLoggedInUserNS = useLoggedInUserNS + return nil +} + +// Handler handles requests +func (h *WebDavHandler) Handler(s *svc) http.Handler { + config := s.Config() + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ns, newPath, err := s.ApplyLayout(r.Context(), h.namespace, h.useLoggedInUserNS, r.URL.Path) + if err != nil { + w.WriteHeader(http.StatusNotFound) + b, err := errors.Marshal(http.StatusNotFound, fmt.Sprintf("could not get storage for %s", r.URL.Path), "", "") + errors.HandleWebdavError(appctx.GetLogger(r.Context()), w, b, err) + return + } + r.URL.Path = newPath + + // TODO initialize status with http.StatusBadRequest + // TODO initialize err with errors.ErrUnsupportedMethod + var status int // status 0 means the handler already sent the response + switch r.Method { + case MethodPropfind: + p := propfind.NewHandler(config.PublicURL, s.gatewaySelector, s.urlSigner, config) + p.HandlePathPropfind(w, r, ns) + case MethodLock: + status, err = s.handleLock(w, r, ns) + case MethodUnlock: + status, err = s.handleUnlock(w, r, ns) + case MethodProppatch: + status, err = s.handlePathProppatch(w, r, ns) + case MethodMkcol: + status, err = s.handlePathMkcol(w, r, ns) + case MethodMove: + s.handlePathMove(w, r, ns) + case MethodCopy: + s.handlePathCopy(w, r, ns) + case MethodReport: + s.handleReport(w, r, ns) + case http.MethodGet: + s.handlePathGet(w, r, ns) + case http.MethodPut: + s.handlePathPut(w, r, ns) + case http.MethodPost: + s.handlePathTusPost(w, r, ns) + case http.MethodOptions: + s.handleOptions(w, r) + case http.MethodHead: + s.handlePathHead(w, r, ns) + case http.MethodDelete: + status, err = s.handlePathDelete(w, r, ns) + default: + w.WriteHeader(http.StatusNotFound) + } + + if status != 0 { // 0 means the handler already sent the response + w.WriteHeader(status) + if status != http.StatusNoContent { + var b []byte + if b, err = errors.Marshal(status, err.Error(), "", ""); err == nil { + _, err = w.Write(b) + } + } + } + if err != nil { + appctx.GetLogger(r.Context()).Error().Err(err).Msg(err.Error()) + } + }) +} diff --git a/services/webdav/pkg/service/v0/search.go b/services/webdav/pkg/service/v0/search.go index 7c4120997c..56438192f4 100644 --- a/services/webdav/pkg/service/v0/search.go +++ b/services/webdav/pkg/service/v0/search.go @@ -20,6 +20,7 @@ import ( "github.com/opencloud-eu/reva/v2/pkg/tags" "github.com/opencloud-eu/reva/v2/pkg/utils" + "github.com/opencloud-eu/opencloud/pkg/log" searchmsg "github.com/opencloud-eu/opencloud/protogen/gen/opencloud/messages/search/v0" searchsvc "github.com/opencloud-eu/opencloud/protogen/gen/opencloud/services/search/v0" "github.com/opencloud-eu/opencloud/services/thumbnails/pkg/thumbnail" @@ -31,11 +32,11 @@ import ( const ( elementNameSearchFiles = "search-files" - // TODO elementNameFilterFiles = "filter-files" + elementNameFilterFiles = "filter-files" ) -// Search is the endpoint for retrieving search results for REPORT requests -func (g Webdav) Search(w http.ResponseWriter, r *http.Request) { +// Report is the endpoint for retrieving search results for REPORT requests +func (g Webdav) Report(w http.ResponseWriter, r *http.Request) { logger := g.log.SubloggerWithRequestID(r.Context()) rep, err := readReport(r.Body) if err != nil { @@ -44,8 +45,8 @@ func (g Webdav) Search(w http.ResponseWriter, r *http.Request) { return } - if rep.SearchFiles == nil { - renderError(w, r, errBadRequest("missing search-files tag")) + if rep.SearchFiles == nil && rep.FilterFiles == nil { + renderError(w, r, errBadRequest("missing search-files or filter-files element")) logger.Debug().Err(err).Msg("error reading report") return } @@ -54,6 +55,81 @@ func (g Webdav) Search(w http.ResponseWriter, r *http.Request) { ctx := revactx.ContextSetToken(r.Context(), t) ctx = metadata.Set(ctx, revactx.TokenHeader, t) + if rep.SearchFiles != nil { + g.handleSearchFiles(ctx, w, r, rep, logger) + return + } + if rep.FilterFiles != nil { + g.handleFilterFiles(ctx, w, r, rep, logger) + return + } +} + +func (g Webdav) handleFilterFiles(ctx context.Context, w http.ResponseWriter, r *http.Request, rep *report, log log.Logger) { + // if rep.FilterFiles.Rules.Favorite { + // // List the users favorite resources. + // client, err := g.gatewaySelector.Next() + // if err != nil { + // log.Error().Err(err).Msg("error selecting next gateway client") + // w.WriteHeader(http.StatusInternalServerError) + // return + // } + // currentUser := revactx.ContextMustGetUser(ctx) + // ok, err := utils.CheckPermission(ctx, permission.ListFavorites, client) + // if err != nil { + // log.Error().Err(err).Msg("error checking permission") + // w.WriteHeader(http.StatusInternalServerError) + // return + // } + // if !ok { + // log.Info().Interface("user", currentUser).Msg("user not allowed to list favorites") + // w.WriteHeader(http.StatusForbidden) + // return + // } + // favorites, err := g.favoritesManager.ListFavorites(ctx, currentUser.Id) + // if err != nil { + // log.Error().Err(err).Msg("error getting favorites") + // w.WriteHeader(http.StatusInternalServerError) + // return + // } + + // infos := make([]*provider.ResourceInfo, 0, len(favorites)) + // for i := range favorites { + // statRes, err := client.Stat(ctx, &providerv1beta1.StatRequest{Ref: &providerv1beta1.Reference{ResourceId: favorites[i]}}) + // if err != nil { + // log.Error().Err(err).Msg("error getting resource info") + // continue + // } + // if statRes.Status.Code != rpcv1beta1.Code_CODE_OK { + // log.Error().Interface("stat_response", statRes).Msg("error getting resource info") + // continue + // } + // infos = append(infos, statRes.Info) + // } + + // prefer := net.ParsePrefer(r.Header.Get("prefer")) + // returnMinimal := prefer[net.HeaderPreferReturn] == "minimal" + + // responsesXML, err := propfind.MultistatusResponse(ctx, &propfind.XML{Prop: ff.Prop}, infos, s.c.PublicURL, namespace, nil, returnMinimal, nil) + // if err != nil { + // log.Error().Err(err).Msg("error formatting propfind") + // w.WriteHeader(http.StatusInternalServerError) + // return + // } + // w.Header().Set(net.HeaderDav, "1, 3, extended-mkcol") + // w.Header().Set(net.HeaderContentType, "application/xml; charset=utf-8") + // w.Header().Set(net.HeaderVary, net.HeaderPrefer) + // if returnMinimal { + // w.Header().Set(net.HeaderPreferenceApplied, "return=minimal") + // } + // w.WriteHeader(http.StatusMultiStatus) + // if _, err := w.Write(responsesXML); err != nil { + // log.Err(err).Msg("error writing response") + // } + // } +} + +func (g Webdav) handleSearchFiles(ctx context.Context, w http.ResponseWriter, r *http.Request, rep *report, logger log.Logger) { req := &searchsvc.SearchRequest{ Query: rep.SearchFiles.Search.Pattern, PageSize: int32(rep.SearchFiles.Search.Limit), @@ -252,7 +328,6 @@ func hasPreview(md *provider.ResourceInfo, appendToOK func(p ...prop.PropertyXML type report struct { SearchFiles *reportSearchFiles - // FilterFiles TODO add this for tag based search FilterFiles *reportFilterFiles `xml:"filter-files"` } type reportSearchFiles struct { @@ -307,22 +382,21 @@ func readReport(r io.Reader) (rep *report, err error) { } if v, ok := t.(xml.StartElement); ok { - if v.Name.Local == elementNameSearchFiles { + switch v.Name.Local { + case elementNameSearchFiles: var repSF reportSearchFiles err = decoder.DecodeElement(&repSF, &v) if err != nil { return nil, err } rep.SearchFiles = &repSF - /* - } else if v.Name.Local == elementNameFilterFiles { - var repFF reportFilterFiles - err = decoder.DecodeElement(&repFF, &v) - if err != nil { - return nil, http.StatusBadRequest, err - } - rep.FilterFiles = &repFF - */ + case elementNameFilterFiles: + var repFF reportFilterFiles + err = decoder.DecodeElement(&repFF, &v) + if err != nil { + return nil, err + } + rep.FilterFiles = &repFF } } } diff --git a/services/webdav/pkg/service/v0/service.go b/services/webdav/pkg/service/v0/service.go index c61f63f1e8..cb0dcbe3e4 100644 --- a/services/webdav/pkg/service/v0/service.go +++ b/services/webdav/pkg/service/v0/service.go @@ -3,6 +3,7 @@ package svc import ( "context" "encoding/xml" + "errors" "io" "math/rand/v2" "net/http" @@ -17,8 +18,17 @@ import ( rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" "github.com/go-chi/chi/v5" "github.com/go-chi/render" + + "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth" + _ "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/credential/loader" + _ "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/token/loader" + _ "github.com/opencloud-eu/opencloud/internal/http/interceptors/auth/tokenwriter/loader" + "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav" + ocdavConfig "github.com/opencloud-eu/opencloud/services/webdav/pkg/ocdav/config" revactx "github.com/opencloud-eu/reva/v2/pkg/ctx" "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" + "github.com/opencloud-eu/reva/v2/pkg/storage/favorite" + favregistry "github.com/opencloud-eu/reva/v2/pkg/storage/favorite/registry" "github.com/opencloud-eu/reva/v2/pkg/storage/utils/templates" "github.com/riandyrn/otelchi" merrors "go-micro.dev/v4/errors" @@ -27,6 +37,7 @@ import ( "github.com/opencloud-eu/opencloud/pkg/log" "github.com/opencloud-eu/opencloud/pkg/registry" "github.com/opencloud-eu/opencloud/pkg/tracing" + "github.com/opencloud-eu/opencloud/pkg/version" thumbnailsmsg "github.com/opencloud-eu/opencloud/protogen/gen/opencloud/messages/thumbnails/v0" searchsvc "github.com/opencloud-eu/opencloud/protogen/gen/opencloud/services/search/v0" thumbnailssvc "github.com/opencloud-eu/opencloud/protogen/gen/opencloud/services/thumbnails/v0" @@ -50,6 +61,20 @@ type Service interface { Thumbnail(w http.ResponseWriter, r *http.Request) } +// Webdav implements the business logic for Service. +type Webdav struct { + config *config.Config + log log.Logger + mux *chi.Mux + searchClient searchsvc.SearchProviderService + thumbnailsClient thumbnailssvc.ThumbnailService + gatewaySelector pool.Selectable[gatewayv1beta1.GatewayAPIClient] + favoritesManager favorite.Manager + + webDavHandler *ocdav.WebDavHandler + davHandler *ocdav.DavHandler +} + // NewService returns a service implementation for Service. func NewService(opts ...Option) (Service, error) { options := newOptions(opts...) @@ -79,6 +104,11 @@ func NewService(opts ...Option) (Service, error) { return nil, err } + fm, err := favoriteManager(conf) + if err != nil { + return nil, err + } + svc := Webdav{ config: conf, log: options.Logger, @@ -86,54 +116,114 @@ func NewService(opts ...Option) (Service, error) { searchClient: searchsvc.NewSearchProviderService("eu.opencloud.api.search", conf.GrpcClient), thumbnailsClient: thumbnailssvc.NewThumbnailService("eu.opencloud.api.thumbnails", conf.GrpcClient), gatewaySelector: gatewaySelector, + favoritesManager: fm, + webDavHandler: new(ocdav.WebDavHandler), + davHandler: new(ocdav.DavHandler), } + // Embed the ocdav service + ocdavCfg := &ocdavConfig.Config{ + Prefix: conf.OCDav.Prefix, + WebdavNamespace: conf.WebdavNamespace, + FilesNamespace: conf.OCDav.FilesNamespace, + SharesNamespace: conf.OCDav.SharesNamespace, + OCMNamespace: conf.OCDav.OCMNamespace, + GatewaySvc: conf.RevaGateway, + Timeout: conf.OCDav.Timeout, + Insecure: conf.OCDav.Insecure, + EnableHTTPTpc: conf.OCDav.EnableHTTPTPC, + PublicURL: conf.OCDav.PublicURL, + AllowPropfindDepthInfinitiy: conf.OCDav.AllowPropfindDepthInfinity, + NameValidation: ocdavConfig.NameValidation{ + InvalidChars: conf.OCDav.NameValidation.InvalidChars, + MaxLength: conf.OCDav.NameValidation.MaxLength, + }, + MachineAuthAPIKey: conf.OCDav.MachineAuthAPIKey, + Version: version.Legacy, + VersionString: version.LegacyString, + Edition: version.Edition, + Product: "OpenCloud", + ProductName: "OpenCloud", + ProductVersion: version.GetString(), + URLSigningSharedSecret: conf.Commons.URLSigningSecret, + } + + ls := ocdav.NewCS3LS(gatewaySelector) + ocdav, err := ocdav.NewWith(ocdavCfg, svc.favoritesManager, ls, &options.Logger.Logger, gatewaySelector) + if svc.config.DisablePreviews { svc.thumbnailsClient = nil } - // register method with chi before any routing is set up - chi.RegisterMethod("REPORT") + authMiddleware, err := auth.New(map[string]any{}, ocdav.Unprotected(), options.TraceProvider) + if err != nil { + return nil, err + } + ocdavHandler := authMiddleware(ocdav.Handler()) + + previewOrOcdav := func(previewHandler http.HandlerFunc, fallbackHandler http.Handler) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.URL.Query().Get("preview") == "1" { + previewHandler(w, r) + } else { + fallbackHandler.ServeHTTP(w, r) + } + } + } + // register methods with chi before any routing is set up + chi.RegisterMethod("PROPFIND") + chi.RegisterMethod("PROPPATCH") + chi.RegisterMethod("MKCOL") + chi.RegisterMethod("COPY") + chi.RegisterMethod("MOVE") + chi.RegisterMethod("LOCK") + chi.RegisterMethod("UNLOCK") + chi.RegisterMethod("REPORT") m.Route(options.Config.HTTP.Root, func(r chi.Router) { + r.Use(options.Middleware...) if !svc.config.DisablePreviews { r.Group(func(r chi.Router) { r.Use(svc.DavUserContext()) - r.Get("/remote.php/dav/spaces/{id}", svc.SpacesThumbnail) - r.Get("/remote.php/dav/spaces/{id}/*", svc.SpacesThumbnail) - r.Get("/dav/spaces/{id}", svc.SpacesThumbnail) - r.Get("/dav/spaces/{id}/*", svc.SpacesThumbnail) - r.MethodFunc("REPORT", "/remote.php/dav/spaces*", svc.Search) - r.MethodFunc("REPORT", "/dav/spaces*", svc.Search) + r.Get("/remote.php/dav/spaces/{id}", previewOrOcdav(svc.SpacesThumbnail, ocdavHandler)) + r.Get("/remote.php/dav/spaces/{id}/*", previewOrOcdav(svc.SpacesThumbnail, ocdavHandler)) + r.Get("/dav/spaces/{id}", previewOrOcdav(svc.SpacesThumbnail, ocdavHandler)) + r.Get("/dav/spaces/{id}/*", previewOrOcdav(svc.SpacesThumbnail, ocdavHandler)) + r.MethodFunc("REPORT", "/remote.php/dav/spaces*", svc.Report) + r.MethodFunc("REPORT", "/dav/spaces*", svc.Report) - r.Get("/remote.php/dav/files/{id}", svc.Thumbnail) - r.Get("/remote.php/dav/files/{id}/*", svc.Thumbnail) - r.Get("/dav/files/{id}", svc.Thumbnail) - r.Get("/dav/files/{id}/*", svc.Thumbnail) + r.Get("/remote.php/dav/files/{id}", previewOrOcdav(svc.Thumbnail, ocdavHandler)) + r.Get("/remote.php/dav/files/{id}/*", previewOrOcdav(svc.Thumbnail, ocdavHandler)) + r.Get("/dav/files/{id}", previewOrOcdav(svc.Thumbnail, ocdavHandler)) + r.Get("/dav/files/{id}/*", previewOrOcdav(svc.Thumbnail, ocdavHandler)) - r.MethodFunc("REPORT", "/remote.php/dav/files*", svc.Search) - r.MethodFunc("REPORT", "/dav/files*", svc.Search) + r.MethodFunc("REPORT", "/remote.php/dav/files*", svc.Report) + r.MethodFunc("REPORT", "/dav/files*", svc.Report) }) r.Group(func(r chi.Router) { r.Use(svc.DavPublicContext()) - r.Head("/remote.php/dav/public-files/{token}/*", svc.PublicThumbnailHead) - r.Head("/dav/public-files/{token}/*", svc.PublicThumbnailHead) + r.Head("/remote.php/dav/public-files/{token}/*", previewOrOcdav(svc.PublicThumbnailHead, ocdavHandler)) + r.Head("/dav/public-files/{token}/*", previewOrOcdav(svc.PublicThumbnailHead, ocdavHandler)) - r.Get("/remote.php/dav/public-files/{token}/*", svc.PublicThumbnail) - r.Get("/dav/public-files/{token}/*", svc.PublicThumbnail) + r.Get("/remote.php/dav/public-files/{token}/*", previewOrOcdav(svc.PublicThumbnail, ocdavHandler)) + r.Get("/dav/public-files/{token}/*", previewOrOcdav(svc.PublicThumbnail, ocdavHandler)) }) r.Group(func(r chi.Router) { r.Use(svc.WebDAVContext()) - r.Get("/remote.php/webdav/*", svc.Thumbnail) - r.Get("/webdav/*", svc.Thumbnail) + r.Get("/remote.php/webdav/*", previewOrOcdav(svc.Thumbnail, ocdavHandler)) + r.Get("/webdav/*", previewOrOcdav(svc.Thumbnail, ocdavHandler)) - r.MethodFunc("REPORT", "/remote.php/webdav*", svc.Search) - r.MethodFunc("REPORT", "/webdav*", svc.Search) + r.MethodFunc("REPORT", "/remote.php/webdav*", svc.Report) + r.MethodFunc("REPORT", "/webdav*", svc.Report) + }) + + r.Group(func(r chi.Router) { + r.Handle("/*", ocdavHandler) }) } @@ -147,14 +237,25 @@ func NewService(opts ...Option) (Service, error) { return svc, nil } -// Webdav implements the business logic for Service. -type Webdav struct { - config *config.Config - log log.Logger - mux *chi.Mux - searchClient searchsvc.SearchProviderService - thumbnailsClient thumbnailssvc.ThumbnailService - gatewaySelector pool.Selectable[gatewayv1beta1.GatewayAPIClient] +func favoriteManager(conf *config.Config) (favorite.Manager, error) { + switch conf.FavoritesStore.Store { + case "memory": + if f, ok := favregistry.NewFuncs["memory"]; ok { + return f(nil) + } + case "nats-js-kv": + if f, ok := favregistry.NewFuncs["nats-js-kv"]; ok { + return f(map[string]any{ + "nats_nodes": conf.FavoritesStore.Nodes, + "nats_database": conf.FavoritesStore.Database, + "nats_table": conf.FavoritesStore.Table, + "nats_auth_username": conf.FavoritesStore.AuthUsername, + "nats_auth_password": conf.FavoritesStore.AuthPassword, + }) + } + } + + return nil, errors.New("invalid favorites store configured") } // ServeHTTP implements the Service interface. diff --git a/services/webdav/pkg/xs/xs.go b/services/webdav/pkg/xs/xs.go new file mode 100644 index 0000000000..ad96e18573 --- /dev/null +++ b/services/webdav/pkg/xs/xs.go @@ -0,0 +1,77 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package xs + +import ( + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" +) + +// XS defines an hex-encoded string as checksum. +type XS string + +func (x XS) String() string { return string(x) } + +const ( + // XSInvalid means the checksum type is invalid. + XSInvalid XS = "invalid" + // XSUnset means the checksum is optional. + XSUnset = "unset" + // XSAdler32 means the checksum is adler32 + XSAdler32 = "adler32" + // XSMD5 means the checksum is md5 + XSMD5 = "md5" + // XSSHA1 means the checksum is SHA1 + XSSHA1 = "sha1" + // XSSHA256 means the checksum is SHA256. + XSSHA256 = "sha256" +) + +// GRPC2PKGXS converts the grpc checksum type to an internal pkg type. +func GRPC2PKGXS(t provider.ResourceChecksumType) XS { + switch t { + case provider.ResourceChecksumType_RESOURCE_CHECKSUM_TYPE_INVALID: + return XSInvalid + case provider.ResourceChecksumType_RESOURCE_CHECKSUM_TYPE_UNSET: + return XSUnset + case provider.ResourceChecksumType_RESOURCE_CHECKSUM_TYPE_SHA1: + return XSSHA1 + case provider.ResourceChecksumType_RESOURCE_CHECKSUM_TYPE_ADLER32: + return XSAdler32 + case provider.ResourceChecksumType_RESOURCE_CHECKSUM_TYPE_MD5: + return XSMD5 + default: + return XSInvalid + } +} + +// PKG2GRPCXS converts an internal checksum type to the grpc checksum type. +func PKG2GRPCXS(xsType string) provider.ResourceChecksumType { + switch xsType { + case XSUnset: + return provider.ResourceChecksumType_RESOURCE_CHECKSUM_TYPE_UNSET + case XSAdler32: + return provider.ResourceChecksumType_RESOURCE_CHECKSUM_TYPE_ADLER32 + case XSMD5: + return provider.ResourceChecksumType_RESOURCE_CHECKSUM_TYPE_MD5 + case XSSHA1: + return provider.ResourceChecksumType_RESOURCE_CHECKSUM_TYPE_SHA1 + default: + return provider.ResourceChecksumType_RESOURCE_CHECKSUM_TYPE_INVALID + } +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/cache.go b/vendor/github.com/onsi/gomega/gmeasure/cache.go new file mode 100644 index 0000000000..4717e8df33 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/cache.go @@ -0,0 +1,202 @@ +package gmeasure + +import ( + "crypto/md5" + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/onsi/gomega/internal/gutil" +) + +const CACHE_EXT = ".gmeasure-cache" + +/* +ExperimentCache provides a director-and-file based cache of experiments +*/ +type ExperimentCache struct { + Path string +} + +/* +NewExperimentCache creates and initializes a new cache. Path must point to a directory (if path does not exist, NewExperimentCache will create a directory at path). + +Cached Experiments are stored as separate files in the cache directory - the filename is a hash of the Experiment name. Each file contains two JSON-encoded objects - a CachedExperimentHeader that includes the experiment's name and cache version number, and then the Experiment itself. +*/ +func NewExperimentCache(path string) (ExperimentCache, error) { + stat, err := os.Stat(path) + if os.IsNotExist(err) { + err := os.MkdirAll(path, 0777) + if err != nil { + return ExperimentCache{}, err + } + } else if !stat.IsDir() { + return ExperimentCache{}, fmt.Errorf("%s is not a directory", path) + } + + return ExperimentCache{ + Path: path, + }, nil +} + +/* +CachedExperimentHeader captures the name of the Cached Experiment and its Version +*/ +type CachedExperimentHeader struct { + Name string + Version int +} + +func (cache ExperimentCache) hashOf(name string) string { + return fmt.Sprintf("%x", md5.Sum([]byte(name))) +} + +func (cache ExperimentCache) readHeader(filename string) (CachedExperimentHeader, error) { + out := CachedExperimentHeader{} + f, err := os.Open(filepath.Join(cache.Path, filename)) + if err != nil { + return out, err + } + defer f.Close() + err = json.NewDecoder(f).Decode(&out) + return out, err +} + +/* +List returns a list of all Cached Experiments found in the cache. +*/ +func (cache ExperimentCache) List() ([]CachedExperimentHeader, error) { + var out []CachedExperimentHeader + names, err := gutil.ReadDir(cache.Path) + if err != nil { + return out, err + } + for _, name := range names { + if filepath.Ext(name) != CACHE_EXT { + continue + } + header, err := cache.readHeader(name) + if err != nil { + return out, err + } + out = append(out, header) + } + return out, nil +} + +/* +Clear empties out the cache - this will delete any and all detected cache files in the cache directory. Use with caution! +*/ +func (cache ExperimentCache) Clear() error { + names, err := gutil.ReadDir(cache.Path) + if err != nil { + return err + } + for _, name := range names { + if filepath.Ext(name) != CACHE_EXT { + continue + } + err := os.Remove(filepath.Join(cache.Path, name)) + if err != nil { + return err + } + } + return nil +} + +/* +Load fetches an experiment from the cache. Lookup occurs by name. Load requires that the version number in the cache is equal to or greater than the passed-in version. + +If an experiment with corresponding name and version >= the passed-in version is found, it is unmarshaled and returned. + +If no experiment is found, or the cached version is smaller than the passed-in version, Load will return nil. + +When paired with Ginkgo you can cache experiments and prevent potentially expensive recomputation with this pattern: + + const EXPERIMENT_VERSION = 1 //bump this to bust the cache and recompute _all_ experiments + + Describe("some experiments", func() { + var cache gmeasure.ExperimentCache + var experiment *gmeasure.Experiment + + BeforeEach(func() { + cache = gmeasure.NewExperimentCache("./gmeasure-cache") + name := CurrentSpecReport().LeafNodeText + experiment = cache.Load(name, EXPERIMENT_VERSION) + if experiment != nil { + AddReportEntry(experiment) + Skip("cached") + } + experiment = gmeasure.NewExperiment(name) + AddReportEntry(experiment) + }) + + It("foo runtime", func() { + experiment.SampleDuration("runtime", func() { + //do stuff + }, gmeasure.SamplingConfig{N:100}) + }) + + It("bar runtime", func() { + experiment.SampleDuration("runtime", func() { + //do stuff + }, gmeasure.SamplingConfig{N:100}) + }) + + AfterEach(func() { + if !CurrentSpecReport().State.Is(types.SpecStateSkipped) { + cache.Save(experiment.Name, EXPERIMENT_VERSION, experiment) + } + }) + }) +*/ +func (cache ExperimentCache) Load(name string, version int) *Experiment { + path := filepath.Join(cache.Path, cache.hashOf(name)+CACHE_EXT) + f, err := os.Open(path) + if err != nil { + return nil + } + defer f.Close() + dec := json.NewDecoder(f) + header := CachedExperimentHeader{} + dec.Decode(&header) + if header.Version < version { + return nil + } + out := NewExperiment("") + err = dec.Decode(out) + if err != nil { + return nil + } + return out +} + +/* +Save stores the passed-in experiment to the cache with the passed-in name and version. +*/ +func (cache ExperimentCache) Save(name string, version int, experiment *Experiment) error { + path := filepath.Join(cache.Path, cache.hashOf(name)+CACHE_EXT) + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + enc := json.NewEncoder(f) + err = enc.Encode(CachedExperimentHeader{ + Name: name, + Version: version, + }) + if err != nil { + return err + } + return enc.Encode(experiment) +} + +/* +Delete removes the experiment with the passed-in name from the cache +*/ +func (cache ExperimentCache) Delete(name string) error { + path := filepath.Join(cache.Path, cache.hashOf(name)+CACHE_EXT) + return os.Remove(path) +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/enum_support.go b/vendor/github.com/onsi/gomega/gmeasure/enum_support.go new file mode 100644 index 0000000000..b5404f9620 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/enum_support.go @@ -0,0 +1,43 @@ +package gmeasure + +import "encoding/json" + +type enumSupport struct { + toString map[uint]string + toEnum map[string]uint + maxEnum uint +} + +func newEnumSupport(toString map[uint]string) enumSupport { + toEnum, maxEnum := map[string]uint{}, uint(0) + for k, v := range toString { + toEnum[v] = k + if maxEnum < k { + maxEnum = k + } + } + return enumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum} +} + +func (es enumSupport) String(e uint) string { + if e > es.maxEnum { + return es.toString[0] + } + return es.toString[e] +} + +func (es enumSupport) UnmarshJSON(b []byte) (uint, error) { + var dec string + if err := json.Unmarshal(b, &dec); err != nil { + return 0, err + } + out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway + return out, nil +} + +func (es enumSupport) MarshJSON(e uint) ([]byte, error) { + if e == 0 || e > es.maxEnum { + return json.Marshal(nil) + } + return json.Marshal(es.toString[e]) +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/experiment.go b/vendor/github.com/onsi/gomega/gmeasure/experiment.go new file mode 100644 index 0000000000..f4368738de --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/experiment.go @@ -0,0 +1,530 @@ +/* +Package gomega/gmeasure provides support for benchmarking and measuring code. It is intended as a more robust replacement for Ginkgo V1's Measure nodes. + +gmeasure is organized around the metaphor of an Experiment that can record multiple Measurements. A Measurement is a named collection of data points and gmeasure supports +measuring Values (of type float64) and Durations (of type time.Duration). + +Experiments allows the user to record Measurements directly by passing in Values (i.e. float64) or Durations (i.e. time.Duration) +or to measure measurements by passing in functions to measure. When measuring functions Experiments take care of timing the duration of functions (for Duration measurements) +and/or recording returned values (for Value measurements). Experiments also support sampling functions - when told to sample Experiments will run functions repeatedly +and measure and record results. The sampling behavior is configured by passing in a SamplingConfig that can control the maximum number of samples, the maximum duration for sampling (or both) +and the number of concurrent samples to take. + +Measurements can be decorated with additional information. This is supported by passing in special typed decorators when recording measurements. These include: + +- Units("any string") - to attach units to a Value Measurement (Duration Measurements always have units of "duration") +- Style("any Ginkgo color style string") - to attach styling to a Measurement. This styling is used when rendering console information about the measurement in reports. Color style strings are documented at TODO. +- Precision(integer or time.Duration) - to attach precision to a Measurement. This controls how many decimal places to show for Value Measurements and how to round Duration Measurements when rendering them to screen. + +In addition, individual data points in a Measurement can be annotated with an Annotation("any string"). The annotation is associated with the individual data point and is intended to convey additional context about the data point. + +Once measurements are complete, an Experiment can generate a comprehensive report by calling its String() or ColorableString() method. + +Users can also access and analyze the resulting Measurements directly. Use Experiment.Get(NAME) to fetch the Measurement named NAME. This returned struct will have fields containing +all the data points and annotations recorded by the experiment. You can subsequently fetch the Measurement.Stats() to get a Stats struct that contains basic statistical information about the +Measurement (min, max, median, mean, standard deviation). You can order these Stats objects using RankStats() to identify best/worst performers across multiple experiments or measurements. + +gmeasure also supports caching Experiments via an ExperimentCache. The cache supports storing and retrieving experiments by name and version. This allows you to rerun code without +repeating expensive experiments that may not have changed (which can be controlled by the cache version number). It also enables you to compare new experiment runs with older runs to detect +variations in performance/behavior. + +When used with Ginkgo, you can emit experiment reports and encode them in test reports easily using Ginkgo V2's support for Report Entries. +Simply pass your experiment to AddReportEntry to get a report every time the tests run. You can also use AddReportEntry with Measurements to emit all the captured data +and Rankings to emit measurement summaries in rank order. + +Finally, Experiments provide an additional mechanism to measure durations called a Stopwatch. The Stopwatch makes it easy to pepper code with statements that measure elapsed time across +different sections of code and can be useful when debugging or evaluating bottlenecks in a given codepath. +*/ +package gmeasure + +import ( + "fmt" + "math" + "reflect" + "sync" + "time" + + "github.com/onsi/gomega/gmeasure/table" +) + +/* +SamplingConfig configures the Sample family of experiment methods. +These methods invoke passed-in functions repeatedly to sample and record a given measurement. +SamplingConfig is used to control the maximum number of samples or time spent sampling (or both). When both are specified sampling ends as soon as one of the conditions is met. +SamplingConfig can also ensure a minimum interval between samples and can enable concurrent sampling. +*/ +type SamplingConfig struct { + // N - the maximum number of samples to record + N int + // Duration - the maximum amount of time to spend recording samples + Duration time.Duration + // MinSamplingInterval - the minimum time that must elapse between samplings. It is an error to specify both MinSamplingInterval and NumParallel. + MinSamplingInterval time.Duration + // NumParallel - the number of parallel workers to spin up to record samples. It is an error to specify both MinSamplingInterval and NumParallel. + NumParallel int +} + +// The Units decorator allows you to specify units (an arbitrary string) when recording values. It is ignored when recording durations. +// +// e := gmeasure.NewExperiment("My Experiment") +// e.RecordValue("length", 3.141, gmeasure.Units("inches")) +// +// Units are only set the first time a value of a given name is recorded. In the example above any subsequent calls to e.RecordValue("length", X) will maintain the "inches" units even if a new set of Units("UNIT") are passed in later. +type Units string + +// The Annotation decorator allows you to attach an annotation to a given recorded data-point: +// +// For example: +// +// e := gmeasure.NewExperiment("My Experiment") +// e.RecordValue("length", 3.141, gmeasure.Annotation("bob")) +// e.RecordValue("length", 2.71, gmeasure.Annotation("jane")) +// +// ...will result in a Measurement named "length" that records two values )[3.141, 2.71]) annotation with (["bob", "jane"]) +type Annotation string + +// The Style decorator allows you to associate a style with a measurement. This is used to generate colorful console reports using Ginkgo V2's +// console formatter. Styles are strings in curly brackets that correspond to a color or style. +// +// For example: +// +// e := gmeasure.NewExperiment("My Experiment") +// e.RecordValue("length", 3.141, gmeasure.Style("{{blue}}{{bold}}")) +// e.RecordValue("length", 2.71) +// e.RecordDuration("cooking time", 3 * time.Second, gmeasure.Style("{{red}}{{underline}}")) +// e.RecordDuration("cooking time", 2 * time.Second) +// +// will emit a report with blue bold entries for the length measurement and red underlined entries for the cooking time measurement. +// +// Units are only set the first time a value or duration of a given name is recorded. In the example above any subsequent calls to e.RecordValue("length", X) will maintain the "{{blue}}{{bold}}" style even if a new Style is passed in later. +type Style string + +// The PrecisionBundle decorator controls the rounding of value and duration measurements. See Precision(). +type PrecisionBundle struct { + Duration time.Duration + ValueFormat string +} + +// Precision() allows you to specify the precision of a value or duration measurement - this precision is used when rendering the measurement to screen. +// +// To control the precision of Value measurements, pass Precision an integer. This will denote the number of decimal places to render (equivalen to the format string "%.Nf") +// To control the precision of Duration measurements, pass Precision a time.Duration. Duration measurements will be rounded oo the nearest time.Duration when rendered. +// +// For example: +// +// e := gmeasure.NewExperiment("My Experiment") +// e.RecordValue("length", 3.141, gmeasure.Precision(2)) +// e.RecordValue("length", 2.71) +// e.RecordDuration("cooking time", 3214 * time.Millisecond, gmeasure.Precision(100*time.Millisecond)) +// e.RecordDuration("cooking time", 2623 * time.Millisecond) +func Precision(p any) PrecisionBundle { + out := DefaultPrecisionBundle + switch reflect.TypeOf(p) { + case reflect.TypeOf(time.Duration(0)): + out.Duration = p.(time.Duration) + case reflect.TypeOf(int(0)): + out.ValueFormat = fmt.Sprintf("%%.%df", p.(int)) + default: + panic("invalid precision type, must be time.Duration or int") + } + return out +} + +// DefaultPrecisionBundle captures the default precisions for Vale and Duration measurements. +var DefaultPrecisionBundle = PrecisionBundle{ + Duration: 100 * time.Microsecond, + ValueFormat: "%.3f", +} + +type extractedDecorations struct { + annotation Annotation + units Units + precisionBundle PrecisionBundle + style Style +} + +func extractDecorations(args []any) extractedDecorations { + var out extractedDecorations + out.precisionBundle = DefaultPrecisionBundle + + for _, arg := range args { + switch reflect.TypeOf(arg) { + case reflect.TypeOf(out.annotation): + out.annotation = arg.(Annotation) + case reflect.TypeOf(out.units): + out.units = arg.(Units) + case reflect.TypeOf(out.precisionBundle): + out.precisionBundle = arg.(PrecisionBundle) + case reflect.TypeOf(out.style): + out.style = arg.(Style) + default: + panic(fmt.Sprintf("unrecognized argument %#v", arg)) + } + } + + return out +} + +/* +Experiment is gmeasure's core data type. You use experiments to record Measurements and generate reports. +Experiments are thread-safe and all methods can be called from multiple goroutines. +*/ +type Experiment struct { + Name string + + // Measurements includes all Measurements recorded by this experiment. You should access them by name via Get() and GetStats() + Measurements Measurements + lock *sync.Mutex +} + +/* +NexExperiment creates a new experiment with the passed-in name. + +When using Ginkgo we recommend immediately registering the experiment as a ReportEntry: + + experiment = NewExperiment("My Experiment") + AddReportEntry(experiment.Name, experiment) + +this will ensure an experiment report is emitted as part of the test output and exported with any test reports. +*/ +func NewExperiment(name string) *Experiment { + experiment := &Experiment{ + Name: name, + lock: &sync.Mutex{}, + } + return experiment +} + +func (e *Experiment) report(enableStyling bool) string { + t := table.NewTable() + t.TableStyle.EnableTextStyling = enableStyling + t.AppendRow(table.R( + table.C("Name"), table.C("N"), table.C("Min"), table.C("Median"), table.C("Mean"), table.C("StdDev"), table.C("Max"), + table.Divider("="), + "{{bold}}", + )) + + for _, measurement := range e.Measurements { + r := table.R(measurement.Style) + t.AppendRow(r) + switch measurement.Type { + case MeasurementTypeNote: + r.AppendCell(table.C(measurement.Note)) + case MeasurementTypeValue, MeasurementTypeDuration: + name := measurement.Name + if measurement.Units != "" { + name += " [" + measurement.Units + "]" + } + r.AppendCell(table.C(name)) + r.AppendCell(measurement.Stats().cells()...) + } + } + + out := e.Name + "\n" + if enableStyling { + out = "{{bold}}" + out + "{{/}}" + } + out += t.Render() + return out +} + +/* +ColorableString returns a Ginkgo formatted summary of the experiment and all its Measurements. +It is called automatically by Ginkgo's reporting infrastructure when the Experiment is registered as a ReportEntry via AddReportEntry. +*/ +func (e *Experiment) ColorableString() string { + return e.report(true) +} + +/* +ColorableString returns an unformatted summary of the experiment and all its Measurements. +*/ +func (e *Experiment) String() string { + return e.report(false) +} + +/* +RecordNote records a Measurement of type MeasurementTypeNote - this is simply a textual note to annotate the experiment. It will be emitted in any experiment reports. + +RecordNote supports the Style() decoration. +*/ +func (e *Experiment) RecordNote(note string, args ...any) { + decorations := extractDecorations(args) + + e.lock.Lock() + defer e.lock.Unlock() + e.Measurements = append(e.Measurements, Measurement{ + ExperimentName: e.Name, + Type: MeasurementTypeNote, + Note: note, + Style: string(decorations.style), + }) +} + +/* +RecordDuration records the passed-in duration on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. + +RecordDuration supports the Style(), Precision(), and Annotation() decorations. +*/ +func (e *Experiment) RecordDuration(name string, duration time.Duration, args ...any) { + decorations := extractDecorations(args) + e.recordDuration(name, duration, decorations) +} + +/* +MeasureDuration runs the passed-in callback and times how long it takes to complete. The resulting duration is recorded on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. + +MeasureDuration supports the Style(), Precision(), and Annotation() decorations. +*/ +func (e *Experiment) MeasureDuration(name string, callback func(), args ...any) time.Duration { + t := time.Now() + callback() + duration := time.Since(t) + e.RecordDuration(name, duration, args...) + return duration +} + +/* +SampleDuration samples the passed-in callback and times how long it takes to complete each sample. +The resulting durations are recorded on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. + +The callback is given a zero-based index that increments by one between samples. The Sampling is configured via the passed-in SamplingConfig + +SampleDuration supports the Style(), Precision(), and Annotation() decorations. When passed an Annotation() the same annotation is applied to all sample measurements. +*/ +func (e *Experiment) SampleDuration(name string, callback func(idx int), samplingConfig SamplingConfig, args ...any) { + decorations := extractDecorations(args) + e.Sample(func(idx int) { + t := time.Now() + callback(idx) + duration := time.Since(t) + e.recordDuration(name, duration, decorations) + }, samplingConfig) +} + +/* +SampleDuration samples the passed-in callback and times how long it takes to complete each sample. +The resulting durations are recorded on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. + +The callback is given a zero-based index that increments by one between samples. The callback must return an Annotation - this annotation is attached to the measured duration. + +# The Sampling is configured via the passed-in SamplingConfig + +SampleAnnotatedDuration supports the Style() and Precision() decorations. +*/ +func (e *Experiment) SampleAnnotatedDuration(name string, callback func(idx int) Annotation, samplingConfig SamplingConfig, args ...any) { + decorations := extractDecorations(args) + e.Sample(func(idx int) { + t := time.Now() + decorations.annotation = callback(idx) + duration := time.Since(t) + e.recordDuration(name, duration, decorations) + }, samplingConfig) +} + +func (e *Experiment) recordDuration(name string, duration time.Duration, decorations extractedDecorations) { + e.lock.Lock() + defer e.lock.Unlock() + idx := e.Measurements.IdxWithName(name) + if idx == -1 { + measurement := Measurement{ + ExperimentName: e.Name, + Type: MeasurementTypeDuration, + Name: name, + Units: "duration", + Durations: []time.Duration{duration}, + PrecisionBundle: decorations.precisionBundle, + Style: string(decorations.style), + Annotations: []string{string(decorations.annotation)}, + } + e.Measurements = append(e.Measurements, measurement) + } else { + if e.Measurements[idx].Type != MeasurementTypeDuration { + panic(fmt.Sprintf("attempting to record duration with name '%s'. That name is already in-use for recording values.", name)) + } + e.Measurements[idx].Durations = append(e.Measurements[idx].Durations, duration) + e.Measurements[idx].Annotations = append(e.Measurements[idx].Annotations, string(decorations.annotation)) + } +} + +/* +NewStopwatch() returns a stopwatch configured to record duration measurements with this experiment. +*/ +func (e *Experiment) NewStopwatch() *Stopwatch { + return newStopwatch(e) +} + +/* +RecordValue records the passed-in value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. + +RecordValue supports the Style(), Units(), Precision(), and Annotation() decorations. +*/ +func (e *Experiment) RecordValue(name string, value float64, args ...any) { + decorations := extractDecorations(args) + e.recordValue(name, value, decorations) +} + +/* +MeasureValue runs the passed-in callback and records the return value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. + +MeasureValue supports the Style(), Units(), Precision(), and Annotation() decorations. +*/ +func (e *Experiment) MeasureValue(name string, callback func() float64, args ...any) float64 { + value := callback() + e.RecordValue(name, value, args...) + return value +} + +/* +SampleValue samples the passed-in callback and records the return value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. + +The callback is given a zero-based index that increments by one between samples. The callback must return a float64. The Sampling is configured via the passed-in SamplingConfig + +SampleValue supports the Style(), Units(), Precision(), and Annotation() decorations. When passed an Annotation() the same annotation is applied to all sample measurements. +*/ +func (e *Experiment) SampleValue(name string, callback func(idx int) float64, samplingConfig SamplingConfig, args ...any) { + decorations := extractDecorations(args) + e.Sample(func(idx int) { + value := callback(idx) + e.recordValue(name, value, decorations) + }, samplingConfig) +} + +/* +SampleAnnotatedValue samples the passed-in callback and records the return value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. + +The callback is given a zero-based index that increments by one between samples. The callback must return a float64 and an Annotation - the annotation is attached to the recorded value. + +# The Sampling is configured via the passed-in SamplingConfig + +SampleValue supports the Style(), Units(), and Precision() decorations. +*/ +func (e *Experiment) SampleAnnotatedValue(name string, callback func(idx int) (float64, Annotation), samplingConfig SamplingConfig, args ...any) { + decorations := extractDecorations(args) + e.Sample(func(idx int) { + var value float64 + value, decorations.annotation = callback(idx) + e.recordValue(name, value, decorations) + }, samplingConfig) +} + +func (e *Experiment) recordValue(name string, value float64, decorations extractedDecorations) { + e.lock.Lock() + defer e.lock.Unlock() + idx := e.Measurements.IdxWithName(name) + if idx == -1 { + measurement := Measurement{ + ExperimentName: e.Name, + Type: MeasurementTypeValue, + Name: name, + Style: string(decorations.style), + Units: string(decorations.units), + PrecisionBundle: decorations.precisionBundle, + Values: []float64{value}, + Annotations: []string{string(decorations.annotation)}, + } + e.Measurements = append(e.Measurements, measurement) + } else { + if e.Measurements[idx].Type != MeasurementTypeValue { + panic(fmt.Sprintf("attempting to record value with name '%s'. That name is already in-use for recording durations.", name)) + } + e.Measurements[idx].Values = append(e.Measurements[idx].Values, value) + e.Measurements[idx].Annotations = append(e.Measurements[idx].Annotations, string(decorations.annotation)) + } +} + +/* +Sample samples the passed-in callback repeatedly. The sampling is governed by the passed in SamplingConfig. + +The SamplingConfig can limit the total number of samples and/or the total time spent sampling the callback. +The SamplingConfig can also instruct Sample to run with multiple concurrent workers. + +The callback is called with a zero-based index that incerements by one between samples. +*/ +func (e *Experiment) Sample(callback func(idx int), samplingConfig SamplingConfig) { + if samplingConfig.N == 0 && samplingConfig.Duration == 0 { + panic("you must specify at least one of SamplingConfig.N and SamplingConfig.Duration") + } + if samplingConfig.MinSamplingInterval > 0 && samplingConfig.NumParallel > 1 { + panic("you cannot specify both SamplingConfig.MinSamplingInterval and SamplingConfig.NumParallel") + } + maxTime := time.Now().Add(100000 * time.Hour) + if samplingConfig.Duration > 0 { + maxTime = time.Now().Add(samplingConfig.Duration) + } + maxN := math.MaxInt32 + if samplingConfig.N > 0 { + maxN = samplingConfig.N + } + numParallel := max(samplingConfig.NumParallel, 1) + minSamplingInterval := samplingConfig.MinSamplingInterval + + work := make(chan int) + var wg sync.WaitGroup + defer func() { + close(work) + wg.Wait() + }() + if numParallel > 1 { + wg.Add(numParallel) + for worker := 0; worker < numParallel; worker++ { + go func() { + for idx := range work { + callback(idx) + } + wg.Done() + }() + } + } + + idx := 0 + var avgDt time.Duration + for { + t := time.Now() + if numParallel > 1 { + work <- idx + } else { + callback(idx) + } + dt := time.Since(t) + if numParallel == 1 && dt < minSamplingInterval { + time.Sleep(minSamplingInterval - dt) + dt = time.Since(t) + } + if idx >= numParallel { + avgDt = (avgDt*time.Duration(idx-numParallel) + dt) / time.Duration(idx-numParallel+1) + } + idx += 1 + if idx >= maxN { + return + } + if time.Now().Add(avgDt).After(maxTime) { + return + } + } +} + +/* +Get returns the Measurement with the associated name. If no Measurement is found a zero Measurement{} is returned. +*/ +func (e *Experiment) Get(name string) Measurement { + e.lock.Lock() + defer e.lock.Unlock() + idx := e.Measurements.IdxWithName(name) + if idx == -1 { + return Measurement{} + } + return e.Measurements[idx] +} + +/* +GetStats returns the Stats for the Measurement with the associated name. If no Measurement is found a zero Stats{} is returned. + +experiment.GetStats(name) is equivalent to experiment.Get(name).Stats() +*/ +func (e *Experiment) GetStats(name string) Stats { + measurement := e.Get(name) + e.lock.Lock() + defer e.lock.Unlock() + return measurement.Stats() +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/measurement.go b/vendor/github.com/onsi/gomega/gmeasure/measurement.go new file mode 100644 index 0000000000..103d3ea9d0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/measurement.go @@ -0,0 +1,235 @@ +package gmeasure + +import ( + "fmt" + "math" + "sort" + "time" + + "github.com/onsi/gomega/gmeasure/table" +) + +type MeasurementType uint + +const ( + MeasurementTypeInvalid MeasurementType = iota + MeasurementTypeNote + MeasurementTypeDuration + MeasurementTypeValue +) + +var letEnumSupport = newEnumSupport(map[uint]string{uint(MeasurementTypeInvalid): "INVALID LOG ENTRY TYPE", uint(MeasurementTypeNote): "Note", uint(MeasurementTypeDuration): "Duration", uint(MeasurementTypeValue): "Value"}) + +func (s MeasurementType) String() string { return letEnumSupport.String(uint(s)) } +func (s *MeasurementType) UnmarshalJSON(b []byte) error { + out, err := letEnumSupport.UnmarshJSON(b) + *s = MeasurementType(out) + return err +} +func (s MeasurementType) MarshalJSON() ([]byte, error) { return letEnumSupport.MarshJSON(uint(s)) } + +/* +Measurement records all captured data for a given measurement. You generally don't make Measurements directly - but you can fetch them from Experiments using Get(). + +When using Ginkgo, you can register Measurements as Report Entries via AddReportEntry. This will emit all the captured data points when Ginkgo generates the report. +*/ +type Measurement struct { + // Type is the MeasurementType - one of MeasurementTypeNote, MeasurementTypeDuration, or MeasurementTypeValue + Type MeasurementType + + // ExperimentName is the name of the experiment that this Measurement is associated with + ExperimentName string + + // If Type is MeasurementTypeNote, Note is populated with the note text. + Note string + + // If Type is MeasurementTypeDuration or MeasurementTypeValue, Name is the name of the recorded measurement + Name string + + // Style captures the styling information (if any) for this Measurement + Style string + + // Units capture the units (if any) for this Measurement. Units is set to "duration" if the Type is MeasurementTypeDuration + Units string + + // PrecisionBundle captures the precision to use when rendering data for this Measurement. + // If Type is MeasurementTypeDuration then PrecisionBundle.Duration is used to round any durations before presentation. + // If Type is MeasurementTypeValue then PrecisionBundle.ValueFormat is used to format any values before presentation + PrecisionBundle PrecisionBundle + + // If Type is MeasurementTypeDuration, Durations will contain all durations recorded for this measurement + Durations []time.Duration + + // If Type is MeasurementTypeValue, Values will contain all float64s recorded for this measurement + Values []float64 + + // If Type is MeasurementTypeDuration or MeasurementTypeValue then Annotations will include string annotations for all recorded Durations or Values. + // If the user does not pass-in an Annotation() decoration for a particular value or duration, the corresponding entry in the Annotations slice will be the empty string "" + Annotations []string +} + +type Measurements []Measurement + +func (m Measurements) IdxWithName(name string) int { + for idx, measurement := range m { + if measurement.Name == name { + return idx + } + } + + return -1 +} + +func (m Measurement) report(enableStyling bool) string { + out := "" + style := m.Style + if !enableStyling { + style = "" + } + switch m.Type { + case MeasurementTypeNote: + out += fmt.Sprintf("%s - Note\n%s\n", m.ExperimentName, m.Note) + if style != "" { + out = style + out + "{{/}}" + } + return out + case MeasurementTypeValue, MeasurementTypeDuration: + out += fmt.Sprintf("%s - %s", m.ExperimentName, m.Name) + if m.Units != "" { + out += " [" + m.Units + "]" + } + if style != "" { + out = style + out + "{{/}}" + } + out += "\n" + out += m.Stats().String() + "\n" + } + t := table.NewTable() + t.TableStyle.EnableTextStyling = enableStyling + switch m.Type { + case MeasurementTypeValue: + t.AppendRow(table.R(table.C("Value", table.AlignTypeCenter), table.C("Annotation", table.AlignTypeCenter), table.Divider("="), style)) + for idx := range m.Values { + t.AppendRow(table.R( + table.C(fmt.Sprintf(m.PrecisionBundle.ValueFormat, m.Values[idx]), table.AlignTypeRight), + table.C(m.Annotations[idx], "{{gray}}", table.AlignTypeLeft), + )) + } + case MeasurementTypeDuration: + t.AppendRow(table.R(table.C("Duration", table.AlignTypeCenter), table.C("Annotation", table.AlignTypeCenter), table.Divider("="), style)) + for idx := range m.Durations { + t.AppendRow(table.R( + table.C(m.Durations[idx].Round(m.PrecisionBundle.Duration).String(), style, table.AlignTypeRight), + table.C(m.Annotations[idx], "{{gray}}", table.AlignTypeLeft), + )) + } + } + out += t.Render() + return out +} + +/* +ColorableString generates a styled report that includes all the data points for this Measurement. +It is called automatically by Ginkgo's reporting infrastructure when the Measurement is registered as a ReportEntry via AddReportEntry. +*/ +func (m Measurement) ColorableString() string { + return m.report(true) +} + +/* +String generates an unstyled report that includes all the data points for this Measurement. +*/ +func (m Measurement) String() string { + return m.report(false) +} + +/* +Stats returns a Stats struct summarizing the statistic of this measurement +*/ +func (m Measurement) Stats() Stats { + if m.Type == MeasurementTypeInvalid || m.Type == MeasurementTypeNote { + return Stats{} + } + + out := Stats{ + ExperimentName: m.ExperimentName, + MeasurementName: m.Name, + Style: m.Style, + Units: m.Units, + PrecisionBundle: m.PrecisionBundle, + } + + switch m.Type { + case MeasurementTypeValue: + out.Type = StatsTypeValue + out.N = len(m.Values) + if out.N == 0 { + return out + } + indices, sum := make([]int, len(m.Values)), 0.0 + for idx, v := range m.Values { + indices[idx] = idx + sum += v + } + sort.Slice(indices, func(i, j int) bool { + return m.Values[indices[i]] < m.Values[indices[j]] + }) + out.ValueBundle = map[Stat]float64{ + StatMin: m.Values[indices[0]], + StatMax: m.Values[indices[out.N-1]], + StatMean: sum / float64(out.N), + StatStdDev: 0.0, + } + out.AnnotationBundle = map[Stat]string{ + StatMin: m.Annotations[indices[0]], + StatMax: m.Annotations[indices[out.N-1]], + } + + if out.N%2 == 0 { + out.ValueBundle[StatMedian] = (m.Values[indices[out.N/2]] + m.Values[indices[out.N/2-1]]) / 2.0 + } else { + out.ValueBundle[StatMedian] = m.Values[indices[(out.N-1)/2]] + } + + for _, v := range m.Values { + out.ValueBundle[StatStdDev] += (v - out.ValueBundle[StatMean]) * (v - out.ValueBundle[StatMean]) + } + out.ValueBundle[StatStdDev] = math.Sqrt(out.ValueBundle[StatStdDev] / float64(out.N)) + case MeasurementTypeDuration: + out.Type = StatsTypeDuration + out.N = len(m.Durations) + if out.N == 0 { + return out + } + indices, sum := make([]int, len(m.Durations)), time.Duration(0) + for idx, v := range m.Durations { + indices[idx] = idx + sum += v + } + sort.Slice(indices, func(i, j int) bool { + return m.Durations[indices[i]] < m.Durations[indices[j]] + }) + out.DurationBundle = map[Stat]time.Duration{ + StatMin: m.Durations[indices[0]], + StatMax: m.Durations[indices[out.N-1]], + StatMean: sum / time.Duration(out.N), + } + out.AnnotationBundle = map[Stat]string{ + StatMin: m.Annotations[indices[0]], + StatMax: m.Annotations[indices[out.N-1]], + } + + if out.N%2 == 0 { + out.DurationBundle[StatMedian] = (m.Durations[indices[out.N/2]] + m.Durations[indices[out.N/2-1]]) / 2 + } else { + out.DurationBundle[StatMedian] = m.Durations[indices[(out.N-1)/2]] + } + stdDev := 0.0 + for _, v := range m.Durations { + stdDev += float64(v-out.DurationBundle[StatMean]) * float64(v-out.DurationBundle[StatMean]) + } + out.DurationBundle[StatStdDev] = time.Duration(math.Sqrt(stdDev / float64(out.N))) + } + + return out +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/rank.go b/vendor/github.com/onsi/gomega/gmeasure/rank.go new file mode 100644 index 0000000000..6be9105e89 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/rank.go @@ -0,0 +1,141 @@ +package gmeasure + +import ( + "fmt" + "sort" + + "github.com/onsi/gomega/gmeasure/table" +) + +/* +RankingCriteria is an enum representing the criteria by which Stats should be ranked. The enum names should be self explanatory. e.g. LowerMeanIsBetter means that Stats with lower mean values are considered more beneficial, with the lowest mean being declared the "winner" . +*/ +type RankingCriteria uint + +const ( + LowerMeanIsBetter RankingCriteria = iota + HigherMeanIsBetter + LowerMedianIsBetter + HigherMedianIsBetter + LowerMinIsBetter + HigherMinIsBetter + LowerMaxIsBetter + HigherMaxIsBetter +) + +var rcEnumSupport = newEnumSupport(map[uint]string{uint(LowerMeanIsBetter): "Lower Mean is Better", uint(HigherMeanIsBetter): "Higher Mean is Better", uint(LowerMedianIsBetter): "Lower Median is Better", uint(HigherMedianIsBetter): "Higher Median is Better", uint(LowerMinIsBetter): "Lower Mins is Better", uint(HigherMinIsBetter): "Higher Min is Better", uint(LowerMaxIsBetter): "Lower Max is Better", uint(HigherMaxIsBetter): "Higher Max is Better"}) + +func (s RankingCriteria) String() string { return rcEnumSupport.String(uint(s)) } +func (s *RankingCriteria) UnmarshalJSON(b []byte) error { + out, err := rcEnumSupport.UnmarshJSON(b) + *s = RankingCriteria(out) + return err +} +func (s RankingCriteria) MarshalJSON() ([]byte, error) { return rcEnumSupport.MarshJSON(uint(s)) } + +/* +Ranking ranks a set of Stats by a specified RankingCriteria. Use RankStats to create a Ranking. + +When using Ginkgo, you can register Rankings as Report Entries via AddReportEntry. This will emit a formatted table representing the Stats in rank-order when Ginkgo generates the report. +*/ +type Ranking struct { + Criteria RankingCriteria + Stats []Stats +} + +/* +RankStats creates a new ranking of the passed-in stats according to the passed-in criteria. +*/ +func RankStats(criteria RankingCriteria, stats ...Stats) Ranking { + sort.Slice(stats, func(i int, j int) bool { + switch criteria { + case LowerMeanIsBetter: + return stats[i].FloatFor(StatMean) < stats[j].FloatFor(StatMean) + case HigherMeanIsBetter: + return stats[i].FloatFor(StatMean) > stats[j].FloatFor(StatMean) + case LowerMedianIsBetter: + return stats[i].FloatFor(StatMedian) < stats[j].FloatFor(StatMedian) + case HigherMedianIsBetter: + return stats[i].FloatFor(StatMedian) > stats[j].FloatFor(StatMedian) + case LowerMinIsBetter: + return stats[i].FloatFor(StatMin) < stats[j].FloatFor(StatMin) + case HigherMinIsBetter: + return stats[i].FloatFor(StatMin) > stats[j].FloatFor(StatMin) + case LowerMaxIsBetter: + return stats[i].FloatFor(StatMax) < stats[j].FloatFor(StatMax) + case HigherMaxIsBetter: + return stats[i].FloatFor(StatMax) > stats[j].FloatFor(StatMax) + } + return false + }) + + out := Ranking{ + Criteria: criteria, + Stats: stats, + } + + return out +} + +/* +Winner returns the Stats with the most optimal rank based on the specified ranking criteria. For example, if the RankingCriteria is LowerMaxIsBetter then the Stats with the lowest value or duration for StatMax will be returned as the "winner" +*/ +func (c Ranking) Winner() Stats { + if len(c.Stats) == 0 { + return Stats{} + } + return c.Stats[0] +} + +func (c Ranking) report(enableStyling bool) string { + if len(c.Stats) == 0 { + return "Empty Ranking" + } + t := table.NewTable() + t.TableStyle.EnableTextStyling = enableStyling + t.AppendRow(table.R( + table.C("Experiment"), table.C("Name"), table.C("N"), table.C("Min"), table.C("Median"), table.C("Mean"), table.C("StdDev"), table.C("Max"), + table.Divider("="), + "{{bold}}", + )) + + for idx, stats := range c.Stats { + name := stats.MeasurementName + if stats.Units != "" { + name = name + " [" + stats.Units + "]" + } + experimentName := stats.ExperimentName + style := stats.Style + if idx == 0 { + style = "{{bold}}" + style + name += "\n*Winner*" + experimentName += "\n*Winner*" + } + r := table.R(style) + t.AppendRow(r) + r.AppendCell(table.C(experimentName), table.C(name)) + r.AppendCell(stats.cells()...) + + } + out := fmt.Sprintf("Ranking Criteria: %s\n", c.Criteria) + if enableStyling { + out = "{{bold}}" + out + "{{/}}" + } + out += t.Render() + return out +} + +/* +ColorableString generates a styled report that includes a table of the rank-ordered Stats +It is called automatically by Ginkgo's reporting infrastructure when the Ranking is registered as a ReportEntry via AddReportEntry. +*/ +func (c Ranking) ColorableString() string { + return c.report(true) +} + +/* +String generates an unstyled report that includes a table of the rank-ordered Stats +*/ +func (c Ranking) String() string { + return c.report(false) +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/stats.go b/vendor/github.com/onsi/gomega/gmeasure/stats.go new file mode 100644 index 0000000000..52b75a7d38 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/stats.go @@ -0,0 +1,153 @@ +package gmeasure + +import ( + "fmt" + "time" + + "github.com/onsi/gomega/gmeasure/table" +) + +/* +Stat is an enum representing the statistics you can request of a Stats struct +*/ +type Stat uint + +const ( + StatInvalid Stat = iota + StatMin + StatMax + StatMean + StatMedian + StatStdDev +) + +var statEnumSupport = newEnumSupport(map[uint]string{uint(StatInvalid): "INVALID STAT", uint(StatMin): "Min", uint(StatMax): "Max", uint(StatMean): "Mean", uint(StatMedian): "Median", uint(StatStdDev): "StdDev"}) + +func (s Stat) String() string { return statEnumSupport.String(uint(s)) } +func (s *Stat) UnmarshalJSON(b []byte) error { + out, err := statEnumSupport.UnmarshJSON(b) + *s = Stat(out) + return err +} +func (s Stat) MarshalJSON() ([]byte, error) { return statEnumSupport.MarshJSON(uint(s)) } + +type StatsType uint + +const ( + StatsTypeInvalid StatsType = iota + StatsTypeValue + StatsTypeDuration +) + +var statsTypeEnumSupport = newEnumSupport(map[uint]string{uint(StatsTypeInvalid): "INVALID STATS TYPE", uint(StatsTypeValue): "StatsTypeValue", uint(StatsTypeDuration): "StatsTypeDuration"}) + +func (s StatsType) String() string { return statsTypeEnumSupport.String(uint(s)) } +func (s *StatsType) UnmarshalJSON(b []byte) error { + out, err := statsTypeEnumSupport.UnmarshJSON(b) + *s = StatsType(out) + return err +} +func (s StatsType) MarshalJSON() ([]byte, error) { return statsTypeEnumSupport.MarshJSON(uint(s)) } + +/* +Stats records the key statistics for a given measurement. You generally don't make Stats directly - but you can fetch them from Experiments using GetStats() and from Measurements using Stats(). + +When using Ginkgo, you can register Measurements as Report Entries via AddReportEntry. This will emit all the captured data points when Ginkgo generates the report. +*/ +type Stats struct { + // Type is the StatType - one of StatTypeDuration or StatTypeValue + Type StatsType + + // ExperimentName is the name of the Experiment that recorded the Measurement from which this Stat is derived + ExperimentName string + + // MeasurementName is the name of the Measurement from which this Stat is derived + MeasurementName string + + // Units captures the Units of the Measurement from which this Stat is derived + Units string + + // Style captures the Style of the Measurement from which this Stat is derived + Style string + + // PrecisionBundle captures the precision to use when rendering data for this Measurement. + // If Type is StatTypeDuration then PrecisionBundle.Duration is used to round any durations before presentation. + // If Type is StatTypeValue then PrecisionBundle.ValueFormat is used to format any values before presentation + PrecisionBundle PrecisionBundle + + // N represents the total number of data points in the Measurement from which this Stat is derived + N int + + // If Type is StatTypeValue, ValueBundle will be populated with float64s representing this Stat's statistics + ValueBundle map[Stat]float64 + + // If Type is StatTypeDuration, DurationBundle will be populated with float64s representing this Stat's statistics + DurationBundle map[Stat]time.Duration + + // AnnotationBundle is populated with Annotations corresponding to the data points that can be associated with a Stat. + // For example AnnotationBundle[StatMin] will return the Annotation for the data point that has the minimum value/duration. + AnnotationBundle map[Stat]string +} + +// String returns a minimal summary of the stats of the form "MIN < [MEDIAN] | ±STDDEV < MAX" +func (s Stats) String() string { + return fmt.Sprintf("%s < [%s] | <%s> ±%s < %s", s.StringFor(StatMin), s.StringFor(StatMedian), s.StringFor(StatMean), s.StringFor(StatStdDev), s.StringFor(StatMax)) +} + +// ValueFor returns the float64 value for a particular Stat. You should only use this if the Stats has Type StatsTypeValue +// For example: +// +// median := experiment.GetStats("length").ValueFor(gmeasure.StatMedian) +// +// will return the median data point for the "length" Measurement. +func (s Stats) ValueFor(stat Stat) float64 { + return s.ValueBundle[stat] +} + +// DurationFor returns the time.Duration for a particular Stat. You should only use this if the Stats has Type StatsTypeDuration +// For example: +// +// mean := experiment.GetStats("runtime").ValueFor(gmeasure.StatMean) +// +// will return the mean duration for the "runtime" Measurement. +func (s Stats) DurationFor(stat Stat) time.Duration { + return s.DurationBundle[stat] +} + +// FloatFor returns a float64 representation of the passed-in Stat. +// When Type is StatsTypeValue this is equivalent to s.ValueFor(stat). +// When Type is StatsTypeDuration this is equivalent to float64(s.DurationFor(stat)) +func (s Stats) FloatFor(stat Stat) float64 { + switch s.Type { + case StatsTypeValue: + return s.ValueFor(stat) + case StatsTypeDuration: + return float64(s.DurationFor(stat)) + } + return 0 +} + +// StringFor returns a formatted string representation of the passed-in Stat. +// The formatting honors the precision directives provided in stats.PrecisionBundle +func (s Stats) StringFor(stat Stat) string { + switch s.Type { + case StatsTypeValue: + return fmt.Sprintf(s.PrecisionBundle.ValueFormat, s.ValueFor(stat)) + case StatsTypeDuration: + return s.DurationFor(stat).Round(s.PrecisionBundle.Duration).String() + } + return "" +} + +func (s Stats) cells() []table.Cell { + out := []table.Cell{} + out = append(out, table.C(fmt.Sprintf("%d", s.N))) + for _, stat := range []Stat{StatMin, StatMedian, StatMean, StatStdDev, StatMax} { + content := s.StringFor(stat) + if s.AnnotationBundle[stat] != "" { + content += "\n" + s.AnnotationBundle[stat] + } + out = append(out, table.C(content)) + } + return out +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/stopwatch.go b/vendor/github.com/onsi/gomega/gmeasure/stopwatch.go new file mode 100644 index 0000000000..0da22f863e --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/stopwatch.go @@ -0,0 +1,116 @@ +package gmeasure + +import "time" + +/* +Stopwatch provides a convenient abstraction for recording durations. There are two ways to make a Stopwatch: + +You can make a Stopwatch from an Experiment via experiment.NewStopwatch(). This is how you first get a hold of a Stopwatch. + +You can subsequently call stopwatch.NewStopwatch() to get a fresh Stopwatch. +This is only necessary if you need to record durations on a different goroutine as a single Stopwatch is not considered thread-safe. + +The Stopwatch starts as soon as it is created. You can Pause() the stopwatch and Reset() it as needed. + +Stopwatches refer back to their parent Experiment. They use this reference to record any measured durations back with the Experiment. +*/ +type Stopwatch struct { + Experiment *Experiment + t time.Time + pauseT time.Time + pauseDuration time.Duration + running bool +} + +func newStopwatch(experiment *Experiment) *Stopwatch { + return &Stopwatch{ + Experiment: experiment, + t: time.Now(), + running: true, + } +} + +/* +NewStopwatch returns a new Stopwatch pointing to the same Experiment as this Stopwatch +*/ +func (s *Stopwatch) NewStopwatch() *Stopwatch { + return newStopwatch(s.Experiment) +} + +/* +Record captures the amount of time that has passed since the Stopwatch was created or most recently Reset(). It records the duration on it's associated Experiment in a Measurement with the passed-in name. + +Record takes all the decorators that experiment.RecordDuration takes (e.g. Annotation("...") can be used to annotate this duration) + +Note that Record does not Reset the Stopwatch. It does, however, return the Stopwatch so the following pattern is common: + + stopwatch := experiment.NewStopwatch() + // first expensive operation + stopwatch.Record("first operation").Reset() //records the duration of the first operation and resets the stopwatch. + // second expensive operation + stopwatch.Record("second operation").Reset() //records the duration of the second operation and resets the stopwatch. + +omitting the Reset() after the first operation would cause the duration recorded for the second operation to include the time elapsed by both the first _and_ second operations. + +The Stopwatch must be running (i.e. not paused) when Record is called. +*/ +func (s *Stopwatch) Record(name string, args ...any) *Stopwatch { + if !s.running { + panic("stopwatch is not running - call Resume or Reset before calling Record") + } + duration := time.Since(s.t) - s.pauseDuration + s.Experiment.RecordDuration(name, duration, args...) + return s +} + +/* +Reset resets the Stopwatch. Subsequent recorded durations will measure the time elapsed from the moment Reset was called. +If the Stopwatch was Paused it is unpaused after calling Reset. +*/ +func (s *Stopwatch) Reset() *Stopwatch { + s.running = true + s.t = time.Now() + s.pauseDuration = 0 + return s +} + +/* +Pause pauses the Stopwatch. While pasued the Stopwatch does not accumulate elapsed time. This is useful for ignoring expensive operations that are incidental to the behavior you are attempting to characterize. +Note: You must call Resume() before you can Record() subsequent measurements. + +For example: + + stopwatch := experiment.NewStopwatch() + // first expensive operation + stopwatch.Record("first operation").Reset() + // second expensive operation - part 1 + stopwatch.Pause() + // something expensive that we don't care about + stopwatch.Resume() + // second expensive operation - part 2 + stopwatch.Record("second operation").Reset() // the recorded duration captures the time elapsed during parts 1 and 2 of the second expensive operation, but not the bit in between + +The Stopwatch must be running when Pause is called. +*/ +func (s *Stopwatch) Pause() *Stopwatch { + if !s.running { + panic("stopwatch is not running - call Resume or Reset before calling Pause") + } + s.running = false + s.pauseT = time.Now() + return s +} + +/* +Resume resumes a paused Stopwatch. Any time that elapses after Resume is called will be accumulated as elapsed time when a subsequent duration is Recorded. + +The Stopwatch must be Paused when Resume is called +*/ +func (s *Stopwatch) Resume() *Stopwatch { + if s.running { + panic("stopwatch is running - call Pause before calling Resume") + } + s.running = true + s.pauseDuration = s.pauseDuration + time.Since(s.pauseT) + return s +} diff --git a/vendor/github.com/onsi/gomega/gmeasure/table/table.go b/vendor/github.com/onsi/gomega/gmeasure/table/table.go new file mode 100644 index 0000000000..0a0df3b7ae --- /dev/null +++ b/vendor/github.com/onsi/gomega/gmeasure/table/table.go @@ -0,0 +1,356 @@ +package table + +// This is a temporary package - Table will move to github.com/onsi/consolable once some more dust settles + +import ( + "reflect" + "strings" + "unicode/utf8" +) + +type AlignType uint + +const ( + AlignTypeLeft AlignType = iota + AlignTypeCenter + AlignTypeRight +) + +type Divider string + +type Row struct { + Cells []Cell + Divider string + Style string +} + +func R(args ...any) *Row { + r := &Row{ + Divider: "-", + } + for _, arg := range args { + switch reflect.TypeOf(arg) { + case reflect.TypeOf(Divider("")): + r.Divider = string(arg.(Divider)) + case reflect.TypeOf(r.Style): + r.Style = arg.(string) + case reflect.TypeOf(Cell{}): + r.Cells = append(r.Cells, arg.(Cell)) + } + } + return r +} + +func (r *Row) AppendCell(cells ...Cell) *Row { + r.Cells = append(r.Cells, cells...) + return r +} + +func (r *Row) Render(widths []int, totalWidth int, tableStyle TableStyle, isLastRow bool) string { + out := "" + if len(r.Cells) == 1 { + out += strings.Join(r.Cells[0].render(totalWidth, r.Style, tableStyle), "\n") + "\n" + } else { + if len(r.Cells) != len(widths) { + panic("row vs width mismatch") + } + renderedCells := make([][]string, len(r.Cells)) + maxHeight := 0 + for colIdx, cell := range r.Cells { + renderedCells[colIdx] = cell.render(widths[colIdx], r.Style, tableStyle) + if len(renderedCells[colIdx]) > maxHeight { + maxHeight = len(renderedCells[colIdx]) + } + } + for colIdx := range r.Cells { + for len(renderedCells[colIdx]) < maxHeight { + renderedCells[colIdx] = append(renderedCells[colIdx], strings.Repeat(" ", widths[colIdx])) + } + } + border := strings.Repeat(" ", tableStyle.Padding) + if tableStyle.VerticalBorders { + border += "|" + border + } + for lineIdx := 0; lineIdx < maxHeight; lineIdx++ { + for colIdx := range r.Cells { + out += renderedCells[colIdx][lineIdx] + if colIdx < len(r.Cells)-1 { + out += border + } + } + out += "\n" + } + } + if tableStyle.HorizontalBorders && !isLastRow && r.Divider != "" { + out += strings.Repeat(string(r.Divider), totalWidth) + "\n" + } + + return out +} + +type Cell struct { + Contents []string + Style string + Align AlignType +} + +func C(contents string, args ...any) Cell { + c := Cell{ + Contents: strings.Split(contents, "\n"), + } + for _, arg := range args { + switch reflect.TypeOf(arg) { + case reflect.TypeOf(c.Style): + c.Style = arg.(string) + case reflect.TypeOf(c.Align): + c.Align = arg.(AlignType) + } + } + return c +} + +func (c Cell) Width() (int, int) { + w, minW := 0, 0 + for _, line := range c.Contents { + lineWidth := utf8.RuneCountInString(line) + if lineWidth > w { + w = lineWidth + } + for _, word := range strings.Split(line, " ") { + wordWidth := utf8.RuneCountInString(word) + if wordWidth > minW { + minW = wordWidth + } + } + } + return w, minW +} + +func (c Cell) alignLine(line string, width int) string { + lineWidth := utf8.RuneCountInString(line) + if lineWidth == width { + return line + } + if lineWidth < width { + gap := width - lineWidth + switch c.Align { + case AlignTypeLeft: + return line + strings.Repeat(" ", gap) + case AlignTypeRight: + return strings.Repeat(" ", gap) + line + case AlignTypeCenter: + leftGap := gap / 2 + rightGap := gap - leftGap + return strings.Repeat(" ", leftGap) + line + strings.Repeat(" ", rightGap) + } + } + return line +} + +func (c Cell) splitWordToWidth(word string, width int) []string { + out := []string{} + n, subWord := 0, "" + for _, c := range word { + subWord += string(c) + n += 1 + if n == width-1 { + out = append(out, subWord+"-") + n, subWord = 0, "" + } + } + return out +} + +func (c Cell) splitToWidth(line string, width int) []string { + lineWidth := utf8.RuneCountInString(line) + if lineWidth <= width { + return []string{line} + } + + outLines := []string{} + words := strings.Split(line, " ") + outWords := []string{words[0]} + length := utf8.RuneCountInString(words[0]) + if length > width { + splitWord := c.splitWordToWidth(words[0], width) + lastIdx := len(splitWord) - 1 + outLines = append(outLines, splitWord[:lastIdx]...) + outWords = []string{splitWord[lastIdx]} + length = utf8.RuneCountInString(splitWord[lastIdx]) + } + + for _, word := range words[1:] { + wordLength := utf8.RuneCountInString(word) + if length+wordLength+1 <= width { + length += wordLength + 1 + outWords = append(outWords, word) + continue + } + outLines = append(outLines, strings.Join(outWords, " ")) + + outWords = []string{word} + length = wordLength + if length > width { + splitWord := c.splitWordToWidth(word, width) + lastIdx := len(splitWord) - 1 + outLines = append(outLines, splitWord[:lastIdx]...) + outWords = []string{splitWord[lastIdx]} + length = utf8.RuneCountInString(splitWord[lastIdx]) + } + } + if len(outWords) > 0 { + outLines = append(outLines, strings.Join(outWords, " ")) + } + + return outLines +} + +func (c Cell) render(width int, style string, tableStyle TableStyle) []string { + out := []string{} + for _, line := range c.Contents { + out = append(out, c.splitToWidth(line, width)...) + } + for idx := range out { + out[idx] = c.alignLine(out[idx], width) + } + + if tableStyle.EnableTextStyling { + style = style + c.Style + if style != "" { + for idx := range out { + out[idx] = style + out[idx] + "{{/}}" + } + } + } + + return out +} + +type TableStyle struct { + Padding int + VerticalBorders bool + HorizontalBorders bool + MaxTableWidth int + MaxColWidth int + EnableTextStyling bool +} + +var DefaultTableStyle = TableStyle{ + Padding: 1, + VerticalBorders: true, + HorizontalBorders: true, + MaxTableWidth: 120, + MaxColWidth: 40, + EnableTextStyling: true, +} + +type Table struct { + Rows []*Row + + TableStyle TableStyle +} + +func NewTable() *Table { + return &Table{ + TableStyle: DefaultTableStyle, + } +} + +func (t *Table) AppendRow(row *Row) *Table { + t.Rows = append(t.Rows, row) + return t +} + +func (t *Table) Render() string { + out := "" + totalWidth, widths := t.computeWidths() + for rowIdx, row := range t.Rows { + out += row.Render(widths, totalWidth, t.TableStyle, rowIdx == len(t.Rows)-1) + } + return out +} + +func (t *Table) computeWidths() (int, []int) { + nCol := 0 + for _, row := range t.Rows { + if len(row.Cells) > nCol { + nCol = len(row.Cells) + } + } + + // lets compute the contribution to width from the borders + padding + borderWidth := t.TableStyle.Padding + if t.TableStyle.VerticalBorders { + borderWidth += 1 + t.TableStyle.Padding + } + totalBorderWidth := borderWidth * (nCol - 1) + + // lets compute the width of each column + widths := make([]int, nCol) + minWidths := make([]int, nCol) + for colIdx := range widths { + for _, row := range t.Rows { + if colIdx >= len(row.Cells) { + // ignore rows with fewer columns + continue + } + w, minWid := row.Cells[colIdx].Width() + if w > widths[colIdx] { + widths[colIdx] = w + } + if minWid > minWidths[colIdx] { + minWidths[colIdx] = minWid + } + } + } + + // do we already fit? + if sum(widths)+totalBorderWidth <= t.TableStyle.MaxTableWidth { + // yes! we're done + return sum(widths) + totalBorderWidth, widths + } + + // clamp the widths and minWidths to MaxColWidth + for colIdx := range widths { + widths[colIdx] = min(widths[colIdx], t.TableStyle.MaxColWidth) + minWidths[colIdx] = min(minWidths[colIdx], t.TableStyle.MaxColWidth) + } + + // do we fit now? + if sum(widths)+totalBorderWidth <= t.TableStyle.MaxTableWidth { + // yes! we're done + return sum(widths) + totalBorderWidth, widths + } + + // hmm... still no... can we possibly squeeze the table in without violating minWidths? + if sum(minWidths)+totalBorderWidth >= t.TableStyle.MaxTableWidth { + // nope - we're just going to have to exceed MaxTableWidth + return sum(minWidths) + totalBorderWidth, minWidths + } + + // looks like we don't fit yet, but we should be able to fit without violating minWidths + // lets start scaling down + n := 0 + for sum(widths)+totalBorderWidth > t.TableStyle.MaxTableWidth { + budget := t.TableStyle.MaxTableWidth - totalBorderWidth + baseline := sum(widths) + + for colIdx := range widths { + widths[colIdx] = max((widths[colIdx]*budget)/baseline, minWidths[colIdx]) + } + n += 1 + if n > 100 { + break // in case we somehow fail to converge + } + } + + return sum(widths) + totalBorderWidth, widths +} + +func sum(s []int) int { + out := 0 + for _, v := range s { + out += v + } + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b1a4f898e4..cce89adf61 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1281,6 +1281,8 @@ github.com/onsi/ginkgo/v2/types ## explicit; go 1.24.0 github.com/onsi/gomega github.com/onsi/gomega/format +github.com/onsi/gomega/gmeasure +github.com/onsi/gomega/gmeasure/table github.com/onsi/gomega/internal github.com/onsi/gomega/internal/gutil github.com/onsi/gomega/matchers